diff --git "a/6434.jsonl" "b/6434.jsonl" new file mode 100644--- /dev/null +++ "b/6434.jsonl" @@ -0,0 +1,228 @@ +{"seq_id":"78597351","text":"from gensim.models import KeyedVectors\nfrom tqdm import tqdm\nimport numpy as np\n\n\ndef save_embedding(word2vecfilename, term_index, embedfilename):\n embed = {}\n model = KeyedVectors.load_word2vec_format(word2vecfilename)\n\n embedfile = open(embedfilename, 'w')\n for key, value in tqdm(term_index.items()):\n try:\n embed[value] = model.wv[key]\n except: # embedding OOV字符\n embed[value] = np.float32(np.random.uniform(-0.2, 0.2, 300))\n\n embedfile.write(str(value) + '\\t' + \" \".join(map(str, embed[value])))\n embedfile.write('\\n')\n embedfile.close()\n print('[%s]\\n\\tEmbedding size: %d' % (term_index, len(embed)), end='\\n')\n return\n\n\n# Read Embedding File\ndef read_embedding(filename):\n embed = {}\n # for line in open(filename, encoding='utf-8'):\n # line = line.strip().split()\n # embed[line[0]] = list(map(float, line[1:]))\n for line in open(filename):\n line = line.strip().split()\n embed[int(line[0])] = list(map(float, line[1:]))\n print('[%s]\\n\\tEmbedding size: %d' % (filename, len(embed)), end='\\n')\n return embed\n\n\ndef build_matrix(embed_dict, vocab_size, embed_size):\n _PAD_ = vocab_size\n embed_dict[_PAD_] = np.zeros((embed_size,), dtype=np.float32)\n embed = np.float32(np.random.uniform(-0.02, 0.02, [vocab_size, embed_size]))\n matrix = convert_embed_2_numpy(embed_dict, embed=embed)\n return matrix\n\n\n# Convert Embedding Dict 2 numpy array\ndef convert_embed_2_numpy(embed_dict, max_size=0, embed=None):\n feat_size = len(embed_dict[list(embed_dict.keys())[0]])\n if embed is None:\n embed = np.zeros((max_size, feat_size), dtype=np.float32)\n\n if len(embed_dict) > len(embed):\n raise Exception(\"vocab_size %d is larger than embed_size %d, change the vocab_size in the config!\"\n % (len(embed_dict), len(embed)))\n\n # for index, k in enumerate(embed_dict):\n # embed[index] = np.array(embed_dict[k])\n for k in embed_dict:\n embed[k-1] = np.array(embed_dict[k])\n print('Generate numpy embed:', str(embed.shape), end='\\n')\n return embed\n\n","sub_path":"nlp_architect/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"248164866","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nfrom model.config import cfg\nfrom model.test import im_detect\nfrom model.nms_wrapper import nms\n\nfrom utils.timer import Timer\nimport tensorflow as tf\nimport numpy as np\nimport os, cv2\n\nfrom nets.resnet_v1 import resnetv1\n\nimport xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import ElementTree\nfrom xml.dom import minidom\n\nfrom final.config import CLASSES\n\nIMAGES_DIR = os.path.join('labeling/JPEGImages')\nOUTPUT_DIR = os.path.join('labeling/Annotations')\n\ndef create_xml(image_name, shape):\n height, width, channels = shape\n\n xml_annotation = ET.Element('annotation')\n xml_folder = ET.SubElement(xml_annotation, 'folder')\n xml_folder.text = 'JPEGImages'\n xml_filename = ET.SubElement(xml_annotation, 'filename')\n xml_filename.text = image_name\n xml_path = ET.SubElement(xml_annotation, 'path')\n xml_path.text = os.path.abspath(os.path.join(os.getcwd(), IMAGES_DIR, image_name))\n \n xml_source = ET.SubElement(xml_annotation, 'source')\n xml_database = ET.SubElement(xml_source, 'database')\n xml_database.text = 'Unknown'\n\n # Configure Size\n xml_size = ET.SubElement(xml_annotation, 'size')\n xml_width = ET.SubElement(xml_size, 'width')\n xml_width.text = str(width)\n xml_height = ET.SubElement(xml_size, 'height')\n xml_height.text = str(height)\n xml_depth = ET.SubElement(xml_size, 'depth')\n xml_depth.text = str(channels)\n\n xml_segmented = ET.SubElement(xml_annotation, 'segmented')\n xml_segmented.text = '0'\n\n return xml_annotation\n\ndef mark_object(xml_element, class_name, box, shape):\n height, width, channels = shape\n\n xmin = int(box[0]) + 1\n ymin = int(box[1]) + 1\n xmax = int(box[2]) + 1\n ymax = int(box[3]) + 1\n truncated = '1' if xmin == 1 or ymin == 1 or xmax == width or ymax == height else '0'\n difficult = '0'\n\n xml_object = ET.SubElement(xml_element, 'object')\n xml_name = ET.SubElement(xml_object, 'name')\n xml_name.text = class_name\n xml_pose = ET.SubElement(xml_object, 'pose')\n xml_pose.text = 'Unspecified'\n xml_truncated = ET.SubElement(xml_object, 'truncated')\n xml_truncated.text = truncated\n xml_difficult = ET.SubElement(xml_object, 'difficult')\n xml_difficult.text = difficult\n xml_box = ET.SubElement(xml_object, 'bndbox')\n xml_xmin = ET.SubElement(xml_box, 'xmin')\n xml_xmin.text = str(xmin)\n xml_ymin = ET.SubElement(xml_box, 'ymin')\n xml_ymin.text = str(ymin)\n xml_xmax = ET.SubElement(xml_box, 'xmax')\n xml_xmax.text = str(xmax)\n xml_ymax = ET.SubElement(xml_box, 'ymax')\n xml_ymax.text = str(ymax)\n return xml_object\n\n\ndef label(sess, net, image_name):\n \"\"\"Detect object classes in an image using pre-computed object proposals.\"\"\"\n\n # Load the input image\n im_file = os.path.join(IMAGES_DIR, image_name)\n assert os.path.exists(im_file), \"Image does not exist: {}\".format(im_file)\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n\n # Visualize detections for each class\n CONF_THRESH = 0.8\n NMS_THRESH = 0.01\n\n xml = create_xml(image_name, im.shape)\n\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n \n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n if len(inds) == 0:\n continue\n\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n \n mark_object(xml, cls, bbox, im.shape)\n\n minidom_xml = minidom.parseString(ET.tostring(xml))\n annotation_filename = os.path.splitext(image_name)[0]+'.xml'\n annotation_path = os.path.join(OUTPUT_DIR, annotation_filename)\n with open(annotation_path, \"w\") as xml_file:\n minidom_xml.writexml(xml_file, addindent='\\t', newl='\\n')\n\nif __name__ == '__main__':\n cfg.TEST.HAS_RPN = True # Use RPN for proposals\n cfg.USE_GPU_NMS =False\n\n # model path\n tfmodel = os.path.join('output', 'res101', 'voc_final_trainval', 'default', 'res101_faster_rcnn_iter_1000.ckpt')\n\n print(tfmodel)\t\t\t\t\t\t\t \n if not os.path.isfile(tfmodel + '.meta'):\n \n raise IOError(('{:s} not found.\\nDid you download the proper networks from '\n 'our server and place them properly?').format(tfmodel + '.meta'))\n\n # set config\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth=True\n\n # init session\n sess = tf.Session(config=tfconfig)\n # load network\n net = resnetv1(num_layers=101)\n net.create_architecture(\"TEST\", len(CLASSES),\n tag='default', anchor_scales=[8, 16, 32])\n saver = tf.train.Saver()\n saver.restore(sess, tfmodel)\n\n print('Loaded network {:s}'.format(tfmodel))\n\n for root, dirs, files in os.walk(IMAGES_DIR): \n for filename in files:\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print('Labeling for {}/{}'.format(IMAGES_DIR, filename))\n label(sess, net, filename)\n","sub_path":"tools/generate_annotations.py","file_name":"generate_annotations.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"104215303","text":"# ライブラリの読み込み\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport librosa\n\n# サンプリングレート\nSR = 16000\n\n# 音声ファイルの読み込み\nx, _ = librosa.load('exercise/exercise17/woman.wav', sr=SR)\n\n#\n# 短時間フーリエ変換\n#\n\n# フレームサイズ\nsize_frame = 4096\t\t\t# 2のべき乗\n\n# フレームサイズに合わせてブラックマン窓を作成\nwindow = np.blackman(size_frame)\n\n# シフトサイズ\nsize_shift = 16000 / 1000\t# 0.001 秒 (10 msec)\n\n# スペクトログラムを保存するlist\nspectrogram = []\n\n# size_shift分ずらしながらsize_frame分のデータを取得\n# np.arange関数はfor文で辿りたい数値のリストを返す\n# 通常のrange関数と違うのは3つ目の引数で間隔を指定できるところ\n# (初期位置, 終了位置, 1ステップで進める間隔)\nfor i in np.arange(0, len(x)-size_frame, size_shift):\n\t\n\t# 該当フレームのデータを取得\n\tidx = int(i)\t# arangeのインデクスはfloatなのでintに変換\n\tx_frame = x[idx : idx+size_frame]\n\n\t# np.fft.rfftを使用するとFFTの前半部分のみが得られる\n\tfft_spec = np.fft.rfft(x_frame * window)\n\n\t# 複素スペクトログラムを対数振幅スペクトログラムに\n\tfft_log_abs_spec = np.log(np.abs(fft_spec))\n\n\t# 計算した対数振幅スペクトログラムを配列に保存\n\tspectrogram.append(fft_log_abs_spec)\n\n\n#\n# スペクトログラムを画像に表示・保存\n#\n\n# 画像として保存するための設定\nfig = plt.figure()\n\n# スペクトログラムを描画\nplt.xlabel('sample')\t\t\t\t\t# x軸のラベルを設定\nplt.ylabel('frequency [Hz]')\t\t# y軸のラベルを設定\nplt.imshow(\n\tnp.flipud(np.array(spectrogram).T),\t\t# 画像とみなすために,データを転地して上下反転\n\textent=[0, len(x), 0, SR/2],\t\t\t# (横軸の原点の値,横軸の最大値,縦軸の原点の値,縦軸の最大値)。\n\taspect='auto',\n\tinterpolation='nearest'\n)\nplt.ylim([0, 2000]) # 縦軸を拡大する。\nplt.show()\n\n# ブラックマン窓を保存\nfig.savefig('exercise/exercise17/plot-spectrogram-woman.png')\n\n","sub_path":"exercise/exercise17/plot_spectrogram_woman.py","file_name":"plot_spectrogram_woman.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"336488563","text":"import numpy as np\nimport warnings\nfrom scipy.ndimage import zoom\n\n\"\"\"\nPreprocessing tools used by the gtr123_models\nCode adapted from https://github.com/lfz/DSB2017\n\"\"\"\n\n\ndef lum_trans(img):\n \"\"\"\n\n Args:\n img: Input image in Hu units\n\n Returns: Image windowed to [-1200; 600] and scaled to 0-255\n\n \"\"\"\n lungwin = np.array([-1200., 600.])\n newimg = (img - lungwin[0]) / (lungwin[1] - lungwin[0])\n newimg[newimg < 0] = 0\n newimg[newimg > 1] = 1\n return (newimg * 255).astype('uint8')\n\n\ndef resample(imgs, spacing, new_spacing, order=2):\n \"\"\"\n\n Args:\n imgs:\n spacing: Input image voxel size\n new_spacing: Output image voxel size\n order: (Default value = 2)\n\n Returns:\n\n \"\"\"\n if len(imgs.shape) == 3:\n new_shape = np.round(imgs.shape * spacing / new_spacing)\n true_spacing = spacing * imgs.shape / new_shape\n resize_factor = new_shape / imgs.shape\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n imgs = zoom(imgs, resize_factor, mode='nearest', order=order)\n\n return imgs, true_spacing\n elif len(imgs.shape) == 4:\n n = imgs.shape[-1]\n newimg = []\n\n for i in range(n):\n slice = imgs[:, :, :, i]\n newslice, true_spacing = resample(slice, spacing, new_spacing)\n newimg.append(newslice)\n\n newimg = np.transpose(np.array(newimg), [1, 2, 3, 0])\n return newimg, true_spacing\n else:\n raise ValueError('wrong shape')\n","sub_path":"prediction/src/preprocess/gtr123_preprocess.py","file_name":"gtr123_preprocess.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"453624366","text":"#!/usr/bin/env python2\n\n# matveev.a.s@yandex.ru\n# 2018 Anton Matveev\n\nimport numpy as np\nfrom scipy.interpolate import CubicSpline\n\nclass Quadrupole():\n def __init__(self, L, G, gamma, file=None, var_name='G'):\n self.L = L\n self.G = G\n self.gamma = gamma\n self.file = file\n self.var_name = var_name\n\n if file is not None:\n data = np.loadtxt(file, dtype='float64')\n self._Gspline = CubicSpline(data[:,0], data[:,1])\n\n def set_var(self, var):\n if self.var_name == 'G':\n self.G = var\n\n def csFunc(self, h, g, axis='x'):\n pc = 0.511e6*np.sqrt(self.gamma**2 - 1)\n k = np.sign(g)*np.sqrt(300.*abs(g)/pc)\n #import pdb; pdb.set_trace()\n if k == 0:\n return np.array([1., h, 0., 1.])\n elif (k>0 and axis=='x') or (k<0 and axis=='y'):\n c = np.cos(h*abs(k))\n s = 1./abs(k)*np.sin(h*abs(k))\n return np.array([c, s, -(k**2)*s, c])\n elif (k<0 and axis=='x') or (k>0 and axis=='y'):\n c = np.cosh(h*abs(k))\n s = 1./abs(k)*np.sinh(h*abs(k))\n return np.array([c, s, (k**2)*s, c])\n\n def M(self):\n if self.file is None:\n #import pdb; pdb.set_trace()\n cx, sx, cpx, spx = self.csFunc(self.L, self.G, axis='x')\n cy, sy, cpy, spy = self.csFunc(self.L, self.G, axis='y')\n\n return np.array([[ cx, sx, 0, 0], \\\n [cpx, spx, 0, 0], \\\n [ 0, 0, cy, sy], \\\n [ 0, 0, cpy, spy]])\n else:\n pass\n\n def twM(self):\n if self.file is None:\n cx, sx, cpx, spx = self.csFunc(self.L, self.G, axis='x') \\\n * np.array([1., 0.01, 100., 1.])\n cy, sy, cpy, spy = self.csFunc(self.L, self.G, axis='y') \\\n * np.array([1., 0.01, 100., 1.])\n\n twMx = np.array([[ cx**2, -2*cx*sx, sx**2], \\\n [ -cx*cpx, cx*spx+cpx*sx, -sx*spx], \\\n [ cpx**2, -2*cpx*spx, spx**2]])\n\n twMy = np.array([[ cy**2, -2*cy*sy, sy**2], \\\n [ -cy*cpy, cy*spy+cpy*sy, -sy*spy], \\\n [ cpy**2, -2*cpy*spy, spy**2]])\n\n return np.block([[ twMx, np.zeros((3,3))], \n [np.zeros((3,3)), twMy]])\n else:\n pass \n\nif __name__ == '__main__':\n pc = 0.511e6*np.sqrt(24.5**2-1)\n quad = Quadrupole(16, 4.90237618e-4/300*pc, 24.5)\n print(quad.M())\n print()\n print(quad.twM())\n","sub_path":"optics/quadrupole.py","file_name":"quadrupole.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"14086881","text":"import threading\nimport time\n\n\nclass CtsThread:\n\n def __init__(self, output):\n self._output = output\n self._cts_magic = [0xef, 0xbe, 0xad, 0xde]\n self._count = 0\n self.cts_state = False\n self._cts_thread = threading.Thread(target=self._check_cts, name=\"CtsThread\")\n self._cts_thread.start()\n\n def set_cts(self, c):\n self.cts_state = c\n\n def _check_cts(self):\n while True:\n byte = self._output.read()\n if byte == self._cts_magic[self._count].to_bytes(1, byteorder='big'):\n self._count += 1\n if self._count >= len(self._cts_magic):\n self._count = 0\n self.cts_state = True\n time.sleep(0.02)\n\n\n","sub_path":"src/output/CtsThread.py","file_name":"CtsThread.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"566397796","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\n\n\nclass MaoyanmoviePipeline:\n def process_item(self, item, spider):\n film_name = item['film_name']\n film_type = item['film_type']\n plan_date = item['plan_date']\n output = f'|{film_name}|\\t|{film_type}|\\t|{plan_date}|\\n\\n'\n with open('./maoyanmovie.csv', 'a+', encoding='utf-8') as article:\n article.write(output)\n\n return item\n","sub_path":"week01/maoyanmovie/maoyanmovie/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"88163387","text":"from .models import Restaurant, Dish, Status, OrderList, People, Order\nfrom .forms import OrderForm\nfrom django.utils import timezone\nfrom spider.nearby_restaunt_manager import RestauntsManager\nfrom .adapter import save_ele_restaurant\ndef get_status(usage):\n try:\n status = Status.objects.get(usage = usage)\n except Status.DoesNotExist:\n status = None\n if status != None:\n return status.status\n else:\n return False\n\ndef set_status(usage, status_flag):\n try:\n status = Status.objects.get(usage = usage)\n except Status.DoesNotExist:\n status = None\n if status != None:\n status.status = status_flag\n status.save()\n return status\n else:\n new_status = Status()\n new_status.usage = usage\n new_status.status = status_flag\n new_status.save()\n return new_status\n\ndef setup_order_list(restaurant):\n order_list = OrderList()\n order_list.timestamp = timezone.now()\n order_list.restaurant = restaurant\n order_list.is_submitted = False\n order_list.save()\n return order_list\n\ndef update_customer_order(order_list, request):\n customer = People.objects.get(pk = request.POST['customer'])\n new_order_list = OrderList.objects.get(pk = request.POST['order_list'])\n if order_list != new_order_list:\n return\n try:\n order = order_list.order_set.get(customer = customer)\n except Order.DoesNotExist:\n order = None\n if order != None:\n #order.dishes.through.objects.all().delete()\n #order.save()\n for pk in request.POST['dishes']:\n order.dishes.add(Dish.objects.get(pk = pk))\n order.save()\n return order\n else:\n new_order = Order()\n new_order.order_list = order_list\n new_order.customer = customer\n new_order.save()\n for pk in request.POST['dishes']:\n new_order.dishes.add(Dish.objects.get(pk = pk))\n new_order.save()\n return new_order\n\ndef get_latest_order_list():\n order_lists = OrderList.objects.order_by('-timestamp')\n if order_lists.count() > 0:\n order_list = order_lists[0]\n return order_list\n return None\n\ndef deduct_expense_from_people(order_list):\n orders = order_list.order_set.all()\n for order in orders:\n for dish in order.dishes.all():\n order.customer.money_left -= dish.price\n order.customer.save()\n\nrestaunt_manager = RestauntsManager('shanghai')\ndef refresh_nearby_restaurants():\n for restaurant in restaunt_manager.restaunts:\n save_ele_restaurant(restaurant)","sub_path":"order/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"213673365","text":"import pygame\nimport sys\nfrom pygame.locals import *\n\n# init Pygame\npygame.init()\n\nsize = width, height = 600, 500\nspeed = [-2, 1]\nbg = (255, 255, 255) # RGB\n\n#Surface\nscreen = pygame.display.set_mode(size)\n#set the title\npygame.display.set_caption(\"I am a Rabbit!I am running\")\n\n#load the image\nrabbit = pygame.image.load(\"badguy.png\")\n#get the position-rect of image\nposition = rabbit.get_rect()\n\nl_head = rabbit\nr_head = pygame.transform.flip(rabbit, True, False)\n\n\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n if event.type == KEYDOWN:\n if event.key == K_LEFT:\n rabbit = l_head\n speed = [-1, 0]\n if event.key == K_RIGHT:\n rabbit = r_head\n speed = [1, 0]\n if event.key == K_UP:\n speed = [0, -1]\n if event.key == K_DOWN:\n speed = [0, 1]\n\n #move\n position = position.move(speed)\n\n if position.left < 0 or position.right > width:\n # transform\n rabbit = pygame.transform.flip(rabbit, True, False)\n # move oppsite\n speed[0] = -speed[0]\n\n if position.top < 0 or position.bottom > height:\n speed[1] = -speed[1]\n\n # fill the bg\n screen.fill(bg)\n # update img\n screen.blit(rabbit, position)\n # update screen\n pygame.display.flip()\n # delay\n pygame.time.delay(10)\n","sub_path":"Assignment1/Python_Game/resources/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"359006996","text":"from rest_framework import status\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase\nfrom api.models import Region\nfrom api.serializers import RegionSerializer\n\n\nclass GetAllRegionsTest(APITestCase):\n \"\"\" Test module for GET all puppies API \"\"\"\n\n def setUp(self):\n Region.objects.create(code=1, name='region 1')\n Region.objects.create(code=2, name='region 2')\n Region.objects.create(code=3, name='region 3')\n Region.objects.create(code=4, name='region 4')\n\n def test_get_all_regions(self):\n url = reverse('regions-list')\n response = self.client.get(url, format='json')\n regions = Region.objects.all()\n serializer = RegionSerializer(regions, many=True)\n # have to use data['results'] instead of data because of the custom region pagination ApiCustomSetPagination\n self.assertEqual(response.data['results'], serializer.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n","sub_path":"proctest/api/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"210292558","text":"from fsl.file_prep import fast_seg,os_path_2_fsl, create_inv_mat, reg_from_mprage_2_chm_inv\nimport glob, os\n\nmain_fol = 'F:\\Hila\\siemens'\nall_subj_fol = glob.glob(f'{main_fol}{os.sep}*{os.sep}')\n\nexperiments = ['D31d18','D45d13','D60d11']\n\nfor fol in all_subj_fol:\n for experiment in experiments:\n exp_fol = os_path_2_fsl(f'{fol}{experiment}{os.sep}')\n subj_mprage = os_path_2_fsl(f'{fol}MPRAGE.nii')\n diff_file_name = f'{exp_fol}diff_corrected_{experiment}.nii'\n\n in_brain = subj_mprage[:-4]\n out_brain = subj_mprage[:-4]+'_brain'\n diff_file_1st = diff_file_name[:-4]+'_1st'\n\n # BET for MPRAGE:\n cmd = f'bash -lc \"bet {in_brain} {out_brain} -f 0.45 -g -0.3\"'\n cmd = cmd.replace(os.sep, '/')\n os.system(cmd)\n # save first corrected diff:\n cmd = fr'bash -lc \"fslroi {diff_file_name} {diff_file_1st} 0 1\"'\n cmd = cmd.replace(os.sep, '/')\n os.system(cmd)\n\n ''' Registration from MPRAGE to 1st CHARMED scan using inverse matrix of CHARMED to MPRAGE registration:\n From CHARMED to MPRAGE:'''\n subj_first_charmed = diff_file_name[:-4] + '_1st.nii'\n out_registered = diff_file_name[:-4] + '_1st_reg.nii'\n out_registered_mat = out_registered[:-4] + '.mat'\n options = '-bins 256 -cost normmi -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12'\n cmd = f'bash -lc \"flirt -ref {subj_mprage} -in {subj_first_charmed} -out {out_registered} -omat {out_registered_mat} {options}\"'\n cmd = cmd.replace(os.sep, '/')\n os.system(cmd)\n\n '''Creation of inverse matrix: '''\n inv_mat = create_inv_mat(out_registered_mat)\n\n '''From MPRAGE to CHARMED using the inverse matrix: '''\n out_registered = f'{exp_fol}mprage_reg.nii'\n cmd = f'bash -lc \"flirt -in {out_brain} -ref {subj_first_charmed} -out {out_registered} -applyxfm -init {inv_mat}\"'\n cmd = cmd.replace(os.sep, '/')\n os.system(cmd)\n\n '''FAST segmentation: '''\n fast_seg(out_registered)\n","sub_path":"fsl/FAST_siemens.py","file_name":"FAST_siemens.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"313844575","text":"from __future__ import division\r\nfrom math import *\r\nfrom numpy import *\r\nimport numpy as np\r\nfrom Two_EField_Comps import *\r\n\r\n\r\ndef Stokes(EFieldComps):\r\n #Calculate componets of Efields on each axis\r\n Ecx,Ecy,Ecz=loadtxt('EField_Comps.dat', unpack=True)\r\n outfile_P=open('Polarization.dat','w')\r\n \r\n a=array([sqrt(2),sqrt(2)]) #rotates axes\r\n b=array([sqrt(2),-sqrt(2)])\r\n\r\n EZ=np.zeros([Ecx.size])\r\n EY=np.zeros([Ecx.size])\r\n Ea=np.zeros([Ecx.size])\r\n Eb=np.zeros([Ecx.size])\r\n \r\n for i in range(Ecx.size):\r\n Ec=array([Ecy[i], Ecz[i]])\r\n\r\n EZ[i]=Ecz[i]\r\n EY[i]=Ecy[i]\r\n\r\n Ea[i]=np.dot(Ec,a)\r\n Eb[i]=np.dot(Ec,b)\r\n\r\n #Compute Stokes Parameters\r\n EZ2=EZ**2\r\n EY2=EY**2\r\n Ea2=Ea**2\r\n Eb2=Eb**2\r\n\r\n I=sum(EZ2)/Ecx.size + sum(EY2)/Ecx.size\r\n Q=sum(EZ2)/Ecx.size - sum(EY2)/Ecx.size\r\n U=sum(Ea2)/Ecx.size - sum(Eb2)/Ecx.size\r\n P=np.sqrt(Q**2+U**2)/I\r\n\r\n outfile_P.write(str(P))\r\n outfile_P.close()\r\n\r\n return P\r\n\r\n#print Stokes(EField_Comps(Detect))\r\n\r\n\r\n","sub_path":"Three_Stokes.py","file_name":"Three_Stokes.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"592263797","text":"'''\nMarks all tabs and two or more spaces in each line with separate colors\n\nConfig summary (see README.md for details):\n\n # key binding\n { \"keys\": [\"ctrl+alt+w\"], \"command\": \"hws_toggle_whitespaces\" }\n\n # file settings\n {\n \"highlight_whitespaces_space_highlight_scope_name\": \"invalid\",\n \"highlight_whitespaces_tab_highlight_scope_name\": \"invalid\",\n \"highlight_whitespaces_file_max_size\": 1048576,\n \"highlight_whitespaces_enabled\": true,\n \"highlight_whitespaces_check_spaces\": true,\n \"highlight_whitespaces_check_tabs\": true,\n \"highlight_whitespaces_single_space\": false,\n \"highlight_last_whitespace\": true\n }\n\nForked from https://github.com/SublimeText/TrailingSpaces/ by Jean-Denis Vauguet , Oktay Acikalin \n\n@author: Kemal Hadimli \n@license: MIT (http://www.opensource.org/licenses/mit-license.php)\n@since: 2012-10-05\n'''\n\nimport sublime\nimport sublime_plugin\n\nDEFAULT_MAX_FILE_SIZE = 1048576\nDEFAULT_COLOR_SCOPE_NAME = \"invalid\"\nDEFAULT_IS_ENABLED = True\nDEFAULT_CHECK_SPACES = True\nDEFAULT_SINGLE_SPACE = False\nDEFAULT_CHECK_TABS = True\nDEFAULT_LAST_WHITESPACE = False\n\n#Set whether the plugin is on or off\nhws_settings = sublime.load_settings('highlight_whitespaces.sublime-settings')\nhws_enabled = bool(hws_settings.get('highlight_whitespaces_enabled',\n DEFAULT_IS_ENABLED))\n\ndef get_settings():\n s = sublime.load_settings('highlight_whitespaces.sublime-settings')\n return s\n\n# Determine if the view is a find results view\ndef is_find_results(view):\n return view.settings().get('syntax') and \"Find Results\" in view.settings().get('syntax')\n\n# Return an array of regions matching whitespaces.\ndef find_whitespaces_spaces(view):\n hws_settings = get_settings()\n last_whitespace = bool(hws_settings.get('highlight_last_whitespace',DEFAULT_LAST_WHITESPACE))\n single_space = bool(hws_settings.get('highlight_whitespaces_single_space',DEFAULT_SINGLE_SPACE))\n if single_space:\n regex = ' +'\n else:\n regex = ' {2,}|\\t | \\t'\n if last_whitespace:\n regex += '| {1,}$'\n\n return view.find_all(regex)\n\ndef find_whitespaces_tabs(view):\n return view.find_all('\\t+')\n\n\n# Highlight whitespaces\ndef highlight_whitespaces(view):\n hws_settings = get_settings()\n\n max_size = hws_settings.get('highlight_whitespaces_file_max_size',\n DEFAULT_MAX_FILE_SIZE)\n space_scope_name = hws_settings.get('highlight_whitespaces_space_highlight_scope_name',\n DEFAULT_COLOR_SCOPE_NAME)\n tab_scope_name = hws_settings.get('highlight_whitespaces_tab_highlight_scope_name',\n DEFAULT_COLOR_SCOPE_NAME)\n if view.size() <= max_size and not is_find_results(view):\n if hws_settings.get('highlight_whitespaces_check_spaces', DEFAULT_CHECK_SPACES):\n space_regions = find_whitespaces_spaces(view)\n view.add_regions('WhitespacesHighlightListener',\n space_regions, space_scope_name, '',\n sublime.DRAW_EMPTY)\n if hws_settings.get('highlight_whitespaces_check_tabs', DEFAULT_CHECK_TABS):\n tab_regions = find_whitespaces_tabs(view)\n view.add_regions('WhitespacesHighlightListener2',\n tab_regions, tab_scope_name, '',\n sublime.DRAW_EMPTY)\n\n\n# Clear all white spaces\ndef clear_whitespaces_highlight(window):\n for view in window.views():\n view.erase_regions('WhitespacesHighlightListener')\n view.erase_regions('WhitespacesHighlightListener2')\n\n\n# Toggle the event listner on or off\nclass HwsToggleWhitespacesCommand(sublime_plugin.WindowCommand):\n def run(self):\n global hws_enabled\n hws_enabled = False if hws_enabled else True\n\n # If toggling on, go ahead and perform a pass,\n # else clear the highlighting in all views\n if hws_enabled:\n highlight_whitespaces(self.window.active_view())\n else:\n clear_whitespaces_highlight(self.window)\n\n\n# Highlight matching regions.\nclass WhitespacesHighlightListener(sublime_plugin.EventListener):\n def on_modified(self, view):\n if hws_enabled:\n highlight_whitespaces(view)\n\n def on_activated(self, view):\n if hws_enabled:\n highlight_whitespaces(view)\n\n def on_load(self, view):\n if hws_enabled:\n highlight_whitespaces(view)\n\nclass WhitespacesHighlightListener2(sublime_plugin.EventListener):\n def on_modified(self, view):\n if hws_enabled:\n highlight_whitespaces(view)\n\n def on_activated(self, view):\n if hws_enabled:\n highlight_whitespaces(view)\n\n def on_load(self, view):\n if hws_enabled:\n highlight_whitespaces(view)\n\n","sub_path":"highlight_whitespaces.py","file_name":"highlight_whitespaces.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"643536013","text":"import socket\nimport os\nimport time\n\n\nfrom flask import Flask\n\napplication = Flask(__name__)\n\n@application.route(\"/\")\ndef hello():\n dir = \"/mnt/\"\n fn = dir + \"log.txt\"\n\n if os.path.exists(fn):\n append_write = 'a'\n else:\n append_write = 'w'\n\n file = open(fn, append_write)\n file.write(\"Hostname: \" + str(socket.gethostname()) + \" Timestamp: \" + str(time.time()) + \"
\")\n file.close()\n\n file = open(fn, 'r')\n return file.read()\n\n\nif __name__ == \"__main__\":\n application.run()\n","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"85657688","text":"from fun_sbr import *\nimport os\nimport json\nimport requests\nimport sys\nimport time\n \n# cd to Script Directory\n##scriptDirectory = os.path.dirname(os.path.realpath(sys.argv[0]))\n##os.chdir(scriptDirectory)\ndir_local=path_sh ##scriptDirectory+'/../sh/'\ndir_local_mic=path_py #scriptDirectory\n\n### Load server link\n##dir_online_file = open(\"../../credentials/serverPath.txt\", \"r\")\ndir_online = app_url +'pi/'# the server path already a global variable #dir_online_file.read()\n\n###### To send the current status in RP\nstatus_web=-1 #blue led status 0,1\nstatus_local=-1 #green led status 0,1\nstatus_screen=0 #screen status 0,1\nstatus_autoscreen=0 #screen status 0,1\nstatus_chrome=0 #chrome status 0,1\nfile_relay=path_data+'mic_status_relay.txt'\nfile_web=path_data+'mic_status_web.txt'\nfile_screen=path_data+'screen_status.txt'\nfile_autoscreen=path_data+'autoscreen_status.txt'\nfile_chrome=path_data+'chrome_status.txt'\n####\nwhat_now=\"\"\nwhile True:\n try:\n ######\n title= \"\"\n status= \"\"\n status_web=str(get_it(file_web)) # get the blue led status 0,1 from the file status_web.txt\n status_local=str(get_it(file_relay)) # get the relay status 0,1 (as well as green led status) from the file status_relay.txt\n status_screen=str(get_it(file_screen))\n status_autoscreen=str(get_it(file_autoscreen))\n status_chrome=str(get_it(file_chrome))\n try:\n f111 = open(path_data+\"sbr.sbr\")\n x111=f111.read()\n x111=x111.strip()\n except:\n x111=\"111\"\n h=requests.get(dir_online+'make.php?city='+city+'&device_id='+device_id+'&device_type='+device_type+'&'+'action=update&status_chrome='+status_chrome+'&status_autoscreen='+status_autoscreen+'&status_screen='+status_screen+'&status_web='+status_web+'&status_local='+status_local+'&mac='+x111) \t\n if what_now==\"\" and x111!=\"111\":\n if h.text==\"BLOCKED\" or h.text==\"PASSED\":\n what_now=h.text\n #print(what_now)\n os.system(\"sudo echo \"+x111+what_now+\" | base64 > \"+path_data+\"sbr2.sbr\")\n #print(dir_online+'make.php?city='+city+'&device_id='+device_id+'&device_type='+device_type+'&'+'action=update&status_chrome='+status_chrome+'&status_autoscreen='+status_autoscreen+'&status_screen='+status_screen+'&status_web='+status_web+'&status_local='+status_local) \t\n ######\n r = requests.get(dir_online+ city+'/'+device_id+'/internet.json')\n\n if r.status_code != 404:\n x=r.json()\n # print x\n title= x['posts'][0]['title']\n status= x['posts'][0]['status']\n r = requests.get(dir_online+'make.php?city='+city+'&device_id='+device_id+'&device_type='+device_type+'&'+'action=rm')\n ####################\n if(title==\"RELAY\" and device_type!='monitor'):\n if(status==\"ON\"):\n sys.argv = ['0','RELAY_ON']\n else:\n sys.argv = ['0','RELAY_OFF']\n runx(dir_local_mic+'mic_control.py')\n ####################\n if(title==\"WEB\" and device_type!='monitor' ):\n if(status==\"ON\"):\n sys.argv = ['0','WEB_ON']\n else:\n sys.argv = ['0','WEB_OFF']\n runx(dir_local_mic+'mic_control.py')\n ####################\n if(title==\"PI\"):\n if(status==\"REBOOT\"):\n GPIO.cleanup()\n os_system('reboot')\n ####################\n if(title==\"CHROME\" and device_type!='mic'):\n if(status==\"ON\"):\n do_it(1,file_chrome)\n try:\n time.sleep(1.0)\n os_system('sh '+dir_local+'Chromium_Open.sh &')\n except:\n pass\n\n \n if(status==\"OFF\"):\n do_it(0,file_chrome)\n try:\n os_system('pkill chromium')\n time.sleep(1.0)\n os_system('sh '+dir_local+'Chromium_Close.sh &')\n except:\n pass\n \n \n ####################\n if(title==\"SCREEN\" and device_type!='mic'):\n if(status==\"ON\"):\n #dummyVariable=1\n #vcgencmd display_power 1\n '''\n os_system('sh '+dir_local+'Chromium_Open.sh &')\n time.sleep(1.0)\n '''\n os_system('sh '+dir_local+'screen_on.sh')\n if(status==\"OFF\"):\n #dummyVariable=0\n #vcgencmd display_power 0\n '''\n os_system('sh '+dir_local+'Chromium_Close.sh &')\n time.sleep(1.0)\n '''\n os_system('sh '+dir_local+'screen_off.sh')\n ####################\n if(title==\"AUTOSCREEN\" and device_type!='mic'):\n if(status==\"ON\"):\n do_it(1,file_autoscreen)\n if(status==\"OFF\"):\n do_it(0,file_autoscreen)\n else:\n #print (\"There is no acion online\")\n time.sleep(1.0)\n ####################\n except:\n pass\n","sub_path":"py/mic_listen_online.py","file_name":"mic_listen_online.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"170506824","text":"from flask import Flask,render_template, request, url_for, redirect, jsonify\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\n\napp.config[\"MYSQL_HOST\"]=\"localhost\"\napp.config[\"MYSQL_DB\"]=\"website_crud\"\napp.config[\"MYSQL_USER\"]=\"root\"\napp.config[\"MYSQL_PASSWORD\"]=\"\"\napp.config[\"MYSQL_CURSORCLASS\"]=\"DictCursor\"\nmysql = MySQL(app)\n\napp.config['JSON_SORT_KEYS'] = False\n\n@app.route(\"/\")\ndef index():\n title=\"Index\"\n return render_template(\"index.html\",title=title)\n \n@app.route(\"/buku\",methods=[\"GET\",\"POST\"])\ndef buku():\n if request.method==\"GET\" :\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM buku\")\n data = cur.fetchall()\n cur.close()\n\n if data :\n return jsonify({'status':'success', 'message':'Data found', 'data': data })\n else:\n return jsonify({'status':'failed', 'message':'Data not found', 'data': [] })\n\n\n elif request.method==\"POST\" :\n penulis=request.form[\"penulis\"]\n judul=request.form[\"judul\"]\n kota=request.form[\"kota\"]\n penerbit=request.form[\"penerbit\"]\n tahun=request.form[\"tahun\"]\n \n data=(penulis,judul,kota,penerbit,tahun)\n query=\"INSERT INTO buku (penulis,judul,kota,penerbit,tahun) VALUES (%s,%s,%s,%s,%s)\"\n cur = mysql.connection.cursor()\n result=cur.execute(query,data)\n mysql.connection.commit()\n cur.close()\n \n if result > 0 :\n return jsonify({'status':'success', 'message':'Data has been created', 'data': [] })\n else:\n return jsonify({'status':'failed', 'message':'Data not created', 'data': [] })\n\n \n else :\n return \"Error method is not allowed\"\n\n\n\n@app.route(\"/buku/\",methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef bukuBy(id):\n if request.method==\"GET\" :\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM buku WHERE id={}\".format(id))\n data = cur.fetchone()\n cur.close()\n\n if data :\n return jsonify({'status':'success', 'message':'Data found', 'data': data })\n else:\n return jsonify({'status':'failed', 'message':'Data not found', 'data': [] })\n\n\n elif request.method==\"PUT\" :\n penulis=request.form[\"penulis\"]\n judul=request.form[\"judul\"]\n kota=request.form[\"kota\"]\n penerbit=request.form[\"penerbit\"]\n tahun=request.form[\"tahun\"]\n \n data_update=(penulis,judul,kota,penerbit,tahun)\n query=\"UPDATE buku SET penulis = %s, judul = %s, kota = %s, penerbit = %s, tahun = %s WHERE id = {}\".format(id)\n cur = mysql.connection.cursor()\n result=cur.execute(query,data_update)\n mysql.connection.commit()\n cur.close()\n\n if result > 0 :\n return jsonify({'status':'success', 'message':'Data has been updated', 'data': [] })\n else:\n return jsonify({'status':'failed', 'message':'Data not updated', 'data': [] })\n\n\n elif request.method==\"DELETE\" :\n query=\"DELETE FROM buku WHERE id={}\".format(id)\n cur = mysql.connection.cursor()\n result=cur.execute(query)\n mysql.connection.commit()\n cur.close()\n \n if result > 0:\n return jsonify({'status':'success', 'message':'Data has been deleted', 'data': [] })\n \n else:\n return jsonify({'status':'failed', 'message':'Data not deleted', 'data': [] })\n\n else :\n return \"Error method is not allowed\"\n \nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"223626503","text":"from floodsystem.stationdata import build_station_list\nfrom floodsystem.geo import stations_within_radius\n\ndef run():\n \"\"\"Requirements for Task 1C\"\"\"\n\n # Build list of stations\n stations = build_station_list()\n centre = (52.2053, 0.1218)\n stations_within_rad = stations_within_radius(stations, centre, 10.0)\n print(stations_within_rad)\n\n\nif __name__ == \"__main__\":\n print(\"*** Task 1C: CUED Part IA Flood Warning System ***\")\n\n # Run Task1C\n run()\n","sub_path":"Task1C.py","file_name":"Task1C.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"559161382","text":"#!/usr/bin/python \n# This is server.py file\n# author: zhj@ihep.ac.cn\n# 2019-06-18 created\n\nfrom time import sleep\nimport sys\nimport lib\nfrom lib import rbcp\nfrom lib import i2c\nfrom lib import mux\nfrom lib import sysmon\nfrom lib import ucd90xxx\nfrom lib import gpio\nfrom lib import spi\nfrom lib import klaus6\nfrom lib import klaus6config\nfrom lib import gui\nfrom lib import interface\nfrom lib import sitcp\nfrom lib import EDM\nfrom lib import afg3252c\nfrom ROOT import TFile\nfrom ROOT import TTree\nfrom ROOT import TBranch\nfrom array import array\n\n# command line argument parser\ndef get_parser():\n import argparse\n parser = argparse.ArgumentParser(description='Run KLauS6 DAQ.')\n parser.add_argument(\"--nseq\", type=int, default=1, help='sequences to be processed')\n parser.add_argument(\"--test\", default=False, help=\"test mode using Klaus6_bitflow_test.txt, instead of from output of Klaus6\")\n parser.add_argument(\"--quiet\", default=True, help=\"be quiet\")\n parser.add_argument(\"--output\", default=\"test-output.root\", help=\"specify output filename\")\n return parser\n\nparser = get_parser()\nargs = parser.parse_args()\nprint(args)\n\ndef printf(format, *outs):\n sys.stdout.write(format % outs)\n\noutput = TFile(args.output, \"recreate\")\ntree = TTree(\"dump\", \"dumptree\")\n\nasicid = array('i',[0])\nchannel = array('i',[0])\ngroupID = array('i',[0])\nchannelID = array('i',[0])\ngainsel_evt = array('i',[0])\nADC_10b = array('i',[0])\nADC_6b = array('i',[0])\nADC_PIPE = array('i',[0])\nT_CC = array('i',[0])\nT_MC = array('i',[0])\nT_FC = array('i',[0])\n\n# \"*/i\": i means unsigned int with 32 bit\ntree.Branch(\"asicid\",asicid,\"asicid/i\")\ntree.Branch(\"channel\",channel,\"channel/i\")\ntree.Branch(\"groupID\",groupID,\"groupID/i\")\ntree.Branch(\"channelID\",channelID,\"channelID/i\")\ntree.Branch(\"gainsel_evt\",gainsel_evt,\"gainsel_evt/i\")\ntree.Branch(\"ADC_10b\",ADC_10b,\"ADC_10b/i\")\ntree.Branch(\"ADC_6b\",ADC_6b,\"ADC_6b/i\")\ntree.Branch(\"ADC_PIPE\",ADC_PIPE,\"ADC_PIPE/i\")\ntree.Branch(\"T_CC\",T_CC,\"T_CC/i\")\ntree.Branch(\"T_MC\",T_MC,\"T_MC/i\")\ntree.Branch(\"T_FC\",T_FC,\"T_FC/i\")\n\n########################################\n# main\n# afg=afg3252c.afg3252c()\n\n# afg.setOutput(on=0)\n# afg.setFrequency(10)\n# afg.setPluse(10000)\n# afg.setVoltage(100)\n# afg.setEdgeLeadin(2.5)\n# afg.setEdgeTrailing(2.5)\n# afg.setOutput(on=1)\n\n# afg.close()\n\nsitcp = sitcp.sitcp()\nwhile (0==sitcp.getLinkStatus()):\n sitcp.resetLink()\n sleep(1)\n print(\"Relink\")\n\nsitcp.open()\nsleep(5)\ni_asic=-1\nfor n in range(args.nseq):\n sleep(2)\n data = sitcp.readEvents()\n length = len(data)\n i = 0\n ready = False\n print(\"**** length is \", length)\n #for j in range(0, length):\n # print('0x%x'%data[j])\n\n while i0 and (i-6)>=0):\n if((data[i]==0x3F) and (data[i+1]<0x08) and (data[i-6]==0x3F) and (data[i-2]==0xFF) and (data[i-1]==0xFF)):\n i_asic = data[i+1] & 0xFF\n ready = True\n\n if(data[i]!=0x3F and ready):\n bytes_i_event = data[i:(i+6)]\n i_event = EDM.EDM(bytes_i_event)\n if not args.quiet:\n if i == 0:\n i_event.printHeader()\n i_event.print()\n \n asicid [0]= i_asic\n channel [0]= i_event.channel\n groupID [0]= i_event.groupID\n channelID [0]= i_event.channelID\n gainsel_evt[0]= i_event.gainsel_evt\n ADC_10b [0]= i_event.ADC_10b\n ADC_6b [0]= i_event.ADC_6b\n ADC_PIPE [0]= i_event.ADC_PIPE\n T_CC [0]= i_event.T_CC\n T_MC [0]= i_event.T_MC\n T_FC [0]= i_event.T_FC\n tree.Fill()\n \n if(ready):\n i += 6\n if(not ready):\n i += 1\n if(i+6>length):\n break\n if(n>args.nseq):\n break\n\noutput.Write()\noutput.Close()\n\nsitcp.close()\n","sub_path":"script/DAQ.py","file_name":"DAQ.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"94023627","text":"import cv2\nimport numpy\nimport socket\nimport struct\nimport threading\nfrom io import BytesIO\nfrom datetime import datetime\nfrom udp_packages import UdpPacketsHandler, UdpPacket\n\nclass Streamer:\n MAX_UDP_PACKAGE_SIZE = 65535\n \n def __init__(self, hostname, port):\n threading.Thread.__init__(self)\n\n self.hostname = hostname\n self.port = port\n self.streaming = True\n self.jpeg = None\n self.face_cascade = cv2.CascadeClassifier(\n 'model/haarcascades/haarcascade_frontalface_default.xml')\n\n def get(self):\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n print('Socket created')\n\n s.bind((self.hostname, self.port))\n print('Socket bind complete')\n\n try:\n udp_handler = UdpPacketsHandler()\n start_time = datetime.now()\n while True:\n data = s.recv(Streamer.MAX_UDP_PACKAGE_SIZE)\n if data == bytes(\"end\", \"ascii\"):\n self.streaming = False\n continue\n\n # Processes the package. If the package completes an image, data will not be None.\n image_data = udp_handler.process_packet(UdpPacket.decode(data))\n if image_data:\n # Successfully joins packages and gets an image\n print(\"Took %.3f (ms) for receiving the image %d KB with UDP.\" %\n ((datetime.now() - start_time).total_seconds() * 1000, len(image_data) // 1024))\n \n start_time = datetime.now() \n # Convert the byte array to a 'jpeg' format\n memfile = BytesIO()\n memfile.write(image_data)\n memfile.seek(0)\n image = numpy.load(memfile)[\"frame\"]\n\n # Classfication\n faces = self.face_cascade.detectMultiScale(image, 1.3, 5)\n for (x, y, w, h) in faces:\n image = cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)\n\n ret, jpeg = cv2.imencode('.jpg', image)\n yield jpeg.tobytes()\n\n print(\"Took %.3f (ms) for processsing the image.\" % \n ((datetime.now() - start_time).total_seconds() * 1000))\n\n # Reset the time\n start_time = datetime.now()\n except Exception as e:\n s.close()\n print('Closed')\n","sub_path":"udp_streamer.py","file_name":"udp_streamer.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"411138646","text":"# Graph out G code path\n\nwith open('gcode.txt','r') as f:\n x = f.readlines()\n\nlinecount = 0\ncommand = \"\"\ncodes = ['X','x','Y','y','G','g','M','m',]\nasskeynum = [48,49,50,51,52,53,54,55,56,57]\nfor line in x:\n if any('X' in s for s in line)\\\n or any('x' in s for s in line)\\\n or any('Y' in s for s in line)\\\n or any('y' in s for s in line)\\\n or any('G' in s for s in line)\\\n or any('g' in s for s in line)\\\n or any('M' in s for s in line)\\\n or any('m' in s for s in line):\n for char in line:\n if char in codes:\n command = command + char\n continue\n if ord(char) in asskeynum:\n command = command + char\n continue\n if ord(char) == 46:\n command = command + char\n continue\n if ord(char) == 32:\n continue\n if ord(char) == 10:\n print(command)\n command = \"\"\n break\n\n","sub_path":"utilities/cncgraphics.py","file_name":"cncgraphics.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"128655078","text":"from __future__ import print_function\nfrom datetime import datetime\nimport numpy as np\nimport os\nimport random\nimport time\nimport pickle\nfrom pyniel.python_tools.path_tools import make_dir_if_not_exists\nimport pandas as pd\n\nfrom navrep.models.tcn import reset_graph, default_hps, MDNTCN\nfrom navrep.tools.commonargs import parse_common_args\n\ncommon_args, _ = parse_common_args()\nVARIANT = common_args.environment\nprint(common_args)\n\n\nhps = default_hps()\n# hps.batch_size = 100\n# hps.max_seq_len = 1000\nN_EPOCHS = 10000\nSTART_TIME = datetime.now().strftime(\"%Y_%m_%d__%H_%M_%S\")\n_Z = 32\n\n# hps = hps._replace(learning_rate=0.001)\n\nif VARIANT == \"ian\":\n dataset_folder = os.path.expanduser(\"~/navrep/datasets/M/ian\")\n log_path = os.path.expanduser(\"~/navrep/logs/M/tcn_train_log_{}.csv\".format(START_TIME))\n log_hyperparams_path = os.path.expanduser(\n \"~/navrep/logs/M/tcn_train_log_{}.hyperparams.pckl\".format(START_TIME))\n model_hyperparams_path = os.path.expanduser(\"~/navrep/models/M/tcn.hyperparams.pckl\")\n model_path = os.path.expanduser(\"~/navrep/models/M/tcn.json\")\n vae_model_path = os.path.expanduser(\"~/navrep/models/V/vae.json\")\nif VARIANT == \"toy\":\n dataset_folder = os.path.expanduser(\"~/navrep/datasets/M/toy\")\n log_path = os.path.expanduser(\"~/navrep/logs/M/toytcn_train_log_{}.csv\".format(START_TIME))\n log_hyperparams_path = os.path.expanduser(\n \"~/navrep/logs/M/toytcn_train_log_{}.hyperparams.pckl\".format(START_TIME))\n model_hyperparams_path = os.path.expanduser(\"~/navrep/models/M/toytcn.hyperparams.pckl\")\n model_path = os.path.expanduser(\"~/navrep/models/M/toytcn.json\")\n vae_model_path = os.path.expanduser(\"~/navrep/models/V/toyvae.json\")\n\nmake_dir_if_not_exists(os.path.dirname(model_path))\nmake_dir_if_not_exists(os.path.dirname(log_path))\n\n# load preprocessed data\nfiles = []\nfor dirpath, dirnames, filenames in os.walk(dataset_folder):\n for filename in [f for f in filenames if f.endswith(\".npz\")]:\n files.append(os.path.join(dirpath, filename))\nall_data = []\nfor path in files:\n arrays = np.load(path)\n all_data.append(\n [\n arrays[\"mus\"],\n arrays[\"logvars\"],\n arrays[\"actions\"],\n arrays[\"dones\"],\n arrays[\"rewards\"],\n ]\n )\nn_total_frames = np.sum([mu.shape[0] for mu, _, _, _, _ in all_data])\nprint(\"total frames: \", n_total_frames)\n\n\nreset_graph()\nmodel = MDNTCN(hps)\n\nviewer = None\nvalues_logs = None\n\nstart = time.time()\nfor epoch in range(1, N_EPOCHS + 1):\n # print('preparing data for epoch', epoch)\n epoch_start = time.time()\n # flatten all sequences into one\n mu_sequence = np.zeros((n_total_frames, _Z), dtype=np.float32)\n logvar_sequence = np.zeros((n_total_frames, _Z), dtype=np.float32)\n action_sequence = np.zeros((n_total_frames, 3), dtype=np.float32)\n done_sequence = np.zeros((n_total_frames, 1), dtype=np.float32)\n reward_sequence = np.zeros((n_total_frames, 1), dtype=np.float32)\n i = 0\n random.shuffle(all_data)\n for mu, logvar, action, done, reward in all_data:\n L = len(mu)\n mu_sequence[i : i + L, :] = mu.reshape(L, _Z)\n logvar_sequence[i : i + L, :] = logvar.reshape(L, _Z)\n action_sequence[i : i + L, :] = action.reshape(L, 3)\n done_sequence[i : i + L, :] = done.reshape(L, 1)\n reward_sequence[i : i + L, :] = reward.reshape(L, 1)\n i += L\n # sample z from mu and logvar\n z_sequence = mu_sequence + np.exp(logvar_sequence / 2.0) * np.random.randn(\n *(mu_sequence.shape)\n )\n # resize array to be reshapable into sequences and batches\n chunksize = hps.batch_size * hps.max_seq_len # frames per batch (100'000)\n n_chunks = n_total_frames // chunksize\n # reshape into sequences\n z_sequences = np.reshape(\n z_sequence[: n_chunks * chunksize, :], (-1, hps.max_seq_len, _Z)\n )\n action_sequences = np.reshape(\n action_sequence[: n_chunks * chunksize], (-1, hps.max_seq_len, 3)\n )\n done_sequences = np.reshape(\n done_sequence[: n_chunks * chunksize], (-1, hps.max_seq_len)\n )\n reward_sequences = np.reshape(\n reward_sequence[: n_chunks * chunksize], (-1, hps.max_seq_len)\n )\n num_sequences = len(z_sequences)\n # shuffle\n random_idxs = list(range(num_sequences))\n random.shuffle(random_idxs)\n random_idxs = np.reshape(random_idxs, (-1, hps.batch_size))\n # reshape into batches\n z_batches = z_sequences[random_idxs]\n action_batches = action_sequences[random_idxs]\n done_batches = done_sequences[random_idxs]\n reward_batches = reward_sequences[random_idxs]\n num_batches = len(z_batches)\n # result is of size (n_batches, batch_size, seq_len, ...)\n # print('number of batches', num_batches)\n batch_create_time = time.time() - epoch_start\n # print('time taken to create batches', batch_create_time)\n\n for batch_z, batch_action, batch_done, batch_reward in zip(\n z_batches, action_batches, done_batches, reward_batches\n ):\n\n if False: # Visually check that the batch is sound\n from navrep.models.vae2d import ConvVAE\n import matplotlib.pyplot as plt\n from navrep.tools.rings import generate_rings\n\n reset_graph()\n vae = ConvVAE(batch_size=1, is_training=False)\n vae.load_json(vae_model_path)\n rings_def = generate_rings(64, 64)\n rings_pred = vae.decode(batch_z[0]) * rings_def[\"rings_to_bool\"]\n plt.ion()\n for i, ring in enumerate(rings_pred):\n rings_def[\"visualize_rings\"](ring, scan=None)\n plt.ylim([0, 10])\n plt.title(str(batch_action[0, i]))\n plt.pause(0.1)\n exit()\n if False:\n from navrep.models.vae2d import ConvVAE\n from navrep.tools.render import render_lidar_batch\n from navrep.tools.rings import generate_rings\n\n reset_graph()\n vae = ConvVAE(batch_size=100, is_training=False)\n vae.load_json(vae_model_path)\n rings_def = generate_rings(64, 64)\n batch_decodings = []\n for i in range(batch_z.shape[1]): # for each sequence step\n rings_pred = vae.decode(batch_z[:, i]) * rings_def[\"rings_to_bool\"]\n predicted_ranges = rings_def[\"rings_to_lidar\"](rings_pred, 1080)\n batch_decodings.append(predicted_ranges)\n for i, predicted_ranges in enumerate(batch_decodings):\n viewer = render_lidar_batch(\n predicted_ranges, 0, 2 * np.pi, viewer=viewer\n )\n import pyglet\n\n filename = \"/tmp/frame{:03}.png\".format(i)\n pyglet.image.get_buffer_manager().get_color_buffer().save(filename)\n print(\"image file writen : \", filename)\n\n step = model.sess.run(model.global_step)\n curr_learning_rate = (hps.learning_rate - hps.min_learning_rate) * (\n hps.decay_rate\n ) ** step + hps.min_learning_rate\n\n feed = {\n model.batch_z: batch_z,\n model.batch_action: batch_action,\n model.batch_restart: batch_done,\n model.lr: curr_learning_rate,\n }\n\n (train_cost, rmse, z_cost, r_cost, train_step, _) = model.sess.run(\n [\n model.cost,\n model.rmse,\n model.z_cost,\n model.r_cost,\n model.global_step,\n model.train_op,\n ],\n feed,\n )\n if step % 20 == 0 and step > 0:\n end = time.time()\n time_taken = end - start\n start = time.time()\n output_log = (\n \"step: %d, lr: %.6f, cost: %.4f, z_cost: %.4f, r_cost: %.4f, train_time_taken: %.4f\"\n % (step, curr_learning_rate, train_cost, z_cost, r_cost, time_taken)\n )\n print(output_log)\n model.save_json(model_path)\n with open(model_hyperparams_path, \"wb\") as f:\n pickle.dump(hps, f)\n values_log = pd.DataFrame(\n [\n [\n step,\n curr_learning_rate,\n train_cost,\n rmse,\n z_cost,\n r_cost,\n time_taken,\n ]\n ],\n columns=[\n \"step\",\n \"lr\",\n \"cost\",\n \"rmse\",\n \"z_cost\",\n \"r_cost\",\n \"train_time_taken\",\n ],\n )\n if values_logs is None:\n values_logs = values_log.copy()\n else:\n values_logs = values_logs.append(values_log, ignore_index=True)\n values_logs.to_csv(log_path)\n with open(log_hyperparams_path, \"wb\") as f:\n pickle.dump(hps, f)\n\nmodel.save_json(model_path)\n","sub_path":"navrep/scripts/train_tcn.py","file_name":"train_tcn.py","file_ext":"py","file_size_in_byte":9100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"356150269","text":"from queue import deque\nfrom typing import List\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n\n if not root:\n return list()\n\n ans = list()\n que = deque()\n que.append(root)\n\n while len(que) > 0:\n vals = list()\n length = len(que)\n\n for _ in range(length):\n node = que.popleft()\n vals.append(node.val)\n\n if not node.children:\n continue\n\n for item in node.children:\n que.append(item)\n\n ans.append(vals)\n\n return ans\n","sub_path":"week02/429. N叉树的层序遍历.py","file_name":"429. N叉树的层序遍历.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"11795888","text":"import numpy as np\nfrom serious_run import datasets\nfrom utils.plot_utils import *\nfrom utils.dataloaders import *\nimport os\nimport shutil\n\nif __name__ == '__main__':\n save_folder = 'interval_plots'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n else:\n shutil.rmtree(save_folder)\n os.makedirs(save_folder)\n for idx in [5,6,7]:\n d_str = datasets[idx]\n if d_str=='weibull':\n t_array = np.linspace(0, 2, num=100)\n x_array = [0.0, 0.3, 1.0]\n elif d_str=='checkboard':\n t_array = np.linspace(0, 1, num=100)\n x_array = [0.1, 0.4]\n elif d_str =='normal':\n t_array = np.linspace(80, 120, num=100)\n x_array = [ 0.2, 0.4, 0.6, 0.8, 1.0]\n\n net_types = ['survival_net_basic']\n net_type = net_types[0]\n o = 'S_mean'\n bs = 100\n seed = 1337\n fold_idx = 0\n folder = f'interval_{d_str}_test'\n PATH = f'./{folder}/interval_{d_str}_seed={seed}_fold_idx={fold_idx}_objective={o}_{net_type}/'\n model = load_best_model_interval(PATH=PATH)\n model = model.eval()\n model.direct = 'autograd'\n dl = get_dataloader(d_str,bs,seed,fold_idx)\n dat = pd.DataFrame(np.array(x_array).reshape(-1,1),columns=['x1'])\n plot_survival(dat,t_array.reshape(-1,1),dl,model,f'{save_folder}/{d_str}_{fold_idx}_survival_plot.png')\n\n\n\n\n\n\n","sub_path":"plot_survival_curves_examples_interval.py","file_name":"plot_survival_curves_examples_interval.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"85755786","text":"\nfrom django.contrib import admin\nfrom django.urls import path , include\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('allauth.urls')),\n path('', include('apps.app_1.urls')),\n path('', include('apps.explore_app.urls')),\n path('', include('apps.practice_app.urls')),\n path('learn/', include('apps.learn_app.urls')),\n path('quiz/', include('apps.quiz_app.urls')),\n]\n\nurlpatterns += staticfiles_urlpatterns()","sub_path":"CodingCubs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"396862010","text":"#!/user/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom flask import request, jsonify, g\n\nfrom dms.objects.user import UserObject\nfrom dms.objects.web_config import WebConfig\nfrom dms.web.base import View\nfrom Tools.RenderTemplate import RenderTemplate\n\nfrom Web import config_url_prefix\n\n\nurl_prefix = config_url_prefix\nrt = RenderTemplate(\"Web_Config\", url_prefix=url_prefix)\n\n__author__ = 'Zhouheng'\n\n\nconfig_man = WebConfig.get_instance()\nuser_man = UserObject()\nconfig_view = View('dms_config_bp', __name__, url_prefix=url_prefix,\n auth_required=True)\n\n\n@config_view.route('', methods=['GET'])\ndef config_page():\n return rt.render('config.html')\n\n@config_view.route('/values', methods=['GET'])\ndef get_config_value():\n if not user_man.is_admin(g.user_role):\n return jsonify({\"status\": False, \"data\": \"无权限\"})\n keys = request.args.get('keys', \"\").split(\",\")\n\n configs = config_man.get_keys(keys)\n return jsonify({'status': True, 'data': configs})\n\n\n@config_view.route('/values', methods=['POST'])\ndef post_config_value():\n if not user_man.is_admin(g.user_role):\n return jsonify({\"status\": False, \"data\": \"无权限\"})\n request_data = request.json\n config_man.new_configs(request_data)\n return jsonify({\"status\": True, \"data\": True})\n","sub_path":"dms/web/views/dms_config_view.py","file_name":"dms_config_view.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"512227361","text":"#!/usr/bin/env python\nimport argparse\nimport json\nimport logging\nimport os\nimport shutil\nimport threading\nimport boto.dynamodb2.layer1\nimport datetime\nimport errno\nimport sys\nimport time\nimport re\nfrom boto.dynamodb2.layer1 import DynamoDBConnection\nfrom botocore.exceptions import BotoCoreError\nimport boto3\n\nJSON_INDENT = 2\nAWS_SLEEP_INTERVAL = 10 # seconds\nLOCAL_SLEEP_INTERVAL = 1 # seconds\nBATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds\nMAX_BATCH_WRITE = 25 # DynamoDB limit\nSCHEMA_FILE = \"schema.json\"\nDATA_DIR = \"data\"\nMAX_RETRY = 6\nLOCAL_REGION = \"local\"\nLOG_LEVEL = \"INFO\"\nDATA_DUMP = \"dump\"\nRESTORE_WRITE_CAPACITY = 25\nTHREAD_START_DELAY = 1 # seconds\nCURRENT_WORKING_DIR = os.getcwd()\nDEFAULT_PREFIX_SEPARATOR = \"-\"\n\n\ndef get_table_name_matches(conn, table_name_wildcard, separator):\n all_tables = []\n last_evaluated_table_name = None\n\n while True:\n table_list = conn.list_tables(exclusive_start_table_name=last_evaluated_table_name)\n all_tables.extend(table_list[\"TableNames\"])\n\n try:\n last_evaluated_table_name = table_list[\"LastEvaluatedTableName\"]\n except KeyError:\n break\n\n matching_tables = []\n for table_name in all_tables:\n if table_name_wildcard == \"*\":\n matching_tables.append(table_name)\n elif separator is None:\n if table_name.startswith(table_name_wildcard.split(\"*\", 1)[0]):\n matching_tables.append(table_name)\n elif separator == '':\n if table_name.startswith(re.sub(r\"([A-Z])\", r\" \\1\", table_name_wildcard.split(\"*\", 1)[0]).split()[0]):\n matching_tables.append(table_name)\n elif table_name.split(separator, 1)[0] == table_name_wildcard.split(\"*\", 1)[0]:\n matching_tables.append(table_name)\n\n return matching_tables\n\n\ndef get_restore_table_matches(table_name_wildcard, separator):\n matching_tables = []\n try:\n dir_list = os.listdir(\"./\" + args.dumpPath)\n except OSError:\n logging.info(\"Cannot find \\\"./%s\\\", Now trying current working directory..\" % args.dumpPath)\n dump_data_path = CURRENT_WORKING_DIR\n try:\n dir_list = os.listdir(dump_data_path)\n except OSError:\n logging.info(\"Cannot find \\\"%s\\\" directory containing dump files!\" % dump_data_path)\n sys.exit(1)\n\n for dir_name in dir_list:\n if table_name_wildcard == \"*\":\n matching_tables.append(dir_name)\n elif separator == '':\n if dir_name.startswith(re.sub(r\"([A-Z])\", r\" \\1\", table_name_wildcard.split(\"*\", 1)[0]).split()[0]):\n matching_tables.append(dir_name)\n elif dir_name.split(separator, 1)[0] == table_name_wildcard.split(\"*\", 1)[0]:\n matching_tables.append(dir_name)\n\n return matching_tables\n\n\ndef change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):\n source_prefix = source_wildcard.split(\"*\", 1)[0]\n destination_prefix = destination_wildcard.split(\"*\", 1)[0]\n if separator == '':\n if re.sub(r\"([A-Z])\", r\" \\1\", source_table_name).split()[0] == source_prefix:\n return destination_prefix + re.sub(r\"([A-Z])\", r\" \\1\", source_table_name).split(' ', 1)[1].replace(\" \", \"\")\n if source_table_name.split(separator, 1)[0] == source_prefix:\n return destination_prefix + separator + source_table_name.split(separator, 1)[1]\n\n\ndef delete_table(conn, sleep_interval, table_name):\n if not args.dataOnly:\n while True:\n # delete table if exists\n table_exist = True\n try:\n conn.delete_table(table_name)\n except boto.exception.JSONResponseError as e:\n if e.body[\"__type\"] == \"com.amazonaws.dynamodb.v20120810#ResourceNotFoundException\":\n table_exist = False\n logging.info(table_name + \" table deleted!\")\n break\n elif e.body[\"__type\"] == \"com.amazonaws.dynamodb.v20120810#LimitExceededException\":\n logging.info(\"Limit exceeded, retrying deletion of \" + table_name + \"..\")\n time.sleep(sleep_interval)\n elif e.body[\"__type\"] == \"com.amazon.coral.availability#ThrottlingException\":\n logging.info(\"Control plane limit exceeded, retrying deletion of \" + table_name + \"..\")\n time.sleep(sleep_interval)\n elif e.body[\"__type\"] == \"com.amazonaws.dynamodb.v20120810#ResourceInUseException\":\n logging.info(table_name + \" table is being deleted..\")\n time.sleep(sleep_interval)\n else:\n logging.exception(e)\n sys.exit(1)\n\n # if table exists, wait till deleted\n if table_exist:\n try:\n while True:\n logging.info(\"Waiting for \" + table_name + \" table to be deleted.. [\" +\n conn.describe_table(table_name)[\"Table\"][\"TableStatus\"] + \"]\")\n time.sleep(sleep_interval)\n except boto.exception.JSONResponseError as e:\n if e.body[\"__type\"] == \"com.amazonaws.dynamodb.v20120810#ResourceNotFoundException\":\n logging.info(table_name + \" table deleted.\")\n pass\n else:\n logging.exception(e)\n sys.exit(1)\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef batch_write(conn, sleep_interval, table_name, put_requests):\n request_items = {table_name: put_requests}\n i = 1\n sleep = sleep_interval\n while True:\n response = conn.batch_write_item(request_items)\n unprocessed_items = response[\"UnprocessedItems\"]\n\n if len(unprocessed_items) == 0:\n break\n if len(unprocessed_items) > 0 and i <= MAX_RETRY:\n logging.debug(str(len(unprocessed_items)) + \" unprocessed items, retrying after %s seconds.. [%s/%s]\" % (str(sleep), str(i), str(MAX_RETRY)))\n request_items = unprocessed_items\n time.sleep(sleep)\n sleep += sleep_interval\n i += 1\n else:\n logging.info(\"Max retries reached, failed to processed batch write: \" + json.dumps(unprocessed_items,\n indent=JSON_INDENT))\n logging.info(\"Ignoring and continuing..\")\n break\n\n\ndef wait_for_active_table(conn, table_name, verb):\n while True:\n if conn.describe_table(table_name)[\"Table\"][\"TableStatus\"] != \"ACTIVE\":\n logging.info(\"Waiting for \" + table_name + \" table to be \" + verb + \".. [\" +\n conn.describe_table(table_name)[\"Table\"][\"TableStatus\"] + \"]\")\n time.sleep(sleep_interval)\n else:\n logging.info(table_name + \" \" + verb + \".\")\n break\n\n\ndef update_provisioned_throughput(conn, table_name, read_capacity, write_capacity, wait=True):\n logging.info(\n \"Updating \" + table_name + \" table read capacity to: \" + str(read_capacity) + \", write capacity to: \" + str(\n write_capacity))\n while True:\n try:\n conn.update_table(table_name,\n {\"ReadCapacityUnits\": int(read_capacity), \"WriteCapacityUnits\": int(write_capacity)})\n break\n except boto.exception.JSONResponseError as e:\n if e.body[\"__type\"] == \"com.amazonaws.dynamodb.v20120810#LimitExceededException\":\n logging.info(\"Limit exceeded, retrying updating throughput of \" + table_name + \"..\")\n time.sleep(sleep_interval)\n elif e.body[\"__type\"] == \"com.amazon.coral.availability#ThrottlingException\":\n logging.info(\"Control plane limit exceeded, retrying updating throughput of \" + table_name + \"..\")\n time.sleep(sleep_interval)\n\n # wait for provisioned throughput update completion\n if wait:\n wait_for_active_table(conn, table_name, \"updated\")\n\n\ndef do_empty(conn, table_name):\n logging.info(\"Starting Empty for \" + table_name + \"..\")\n\n # get table schema\n logging.info(\"Fetching table schema for \" + table_name)\n table_data = conn.describe_table(table_name)\n\n table_desc = table_data[\"Table\"]\n table_attribute_definitions = table_desc[\"AttributeDefinitions\"]\n table_key_schema = table_desc[\"KeySchema\"]\n original_read_capacity = table_desc[\"ProvisionedThroughput\"][\"ReadCapacityUnits\"]\n original_write_capacity = table_desc[\"ProvisionedThroughput\"][\"WriteCapacityUnits\"]\n table_local_secondary_indexes = table_desc.get(\"LocalSecondaryIndexes\")\n table_global_secondary_indexes = table_desc.get(\"GlobalSecondaryIndexes\")\n\n table_provisioned_throughput = {\"ReadCapacityUnits\": int(original_read_capacity),\n \"WriteCapacityUnits\": int(original_write_capacity)}\n\n logging.info(\"Deleting Table \" + table_name)\n\n delete_table(conn, sleep_interval, table_name)\n\n logging.info(\"Creating Table \" + table_name)\n\n while True:\n try:\n conn.create_table(table_attribute_definitions, table_name, table_key_schema, table_provisioned_throughput,\n table_local_secondary_indexes, table_global_secondary_indexes)\n break\n except boto.exception.JSONResponseError as e:\n if e.body[\"__type\"] == \"com.amazonaws.dynamodb.v20120810#LimitExceededException\":\n logging.info(\"Limit exceeded, retrying creation of \" + table_name + \"..\")\n time.sleep(sleep_interval)\n elif e.body[\"__type\"] == \"com.amazon.coral.availability#ThrottlingException\":\n logging.info(\"Control plane limit exceeded, retrying creation of \" + table_name + \"..\")\n time.sleep(sleep_interval)\n else:\n logging.exception(e)\n sys.exit(1)\n\n # wait for table creation completion\n wait_for_active_table(conn, table_name, \"created\")\n\n logging.info(\"Recreation of \" + table_name + \" completed. Time taken: \" + str(\n datetime.datetime.now().replace(microsecond=0) - start_time))\n\n\ndef do_backup(conn, table_name, read_capacity):\n logging.info(\"Starting backup for \" + table_name + \"..\")\n\n # trash data, re-create subdir\n if os.path.exists(args.dumpPath + \"/\" + table_name):\n shutil.rmtree(args.dumpPath + \"/\" + table_name)\n mkdir_p(args.dumpPath + \"/\" + table_name)\n\n # get table schema\n logging.info(\"Dumping table schema for \" + table_name)\n f = open(args.dumpPath + \"/\" + table_name + \"/\" + SCHEMA_FILE, \"w+\")\n table_desc = conn.describe_table(table_name)\n f.write(json.dumps(table_desc, indent=JSON_INDENT))\n f.close()\n\n if not args.schemaOnly:\n original_read_capacity = table_desc[\"Table\"][\"ProvisionedThroughput\"][\"ReadCapacityUnits\"]\n original_write_capacity = table_desc[\"Table\"][\"ProvisionedThroughput\"][\"WriteCapacityUnits\"]\n\n # override table read capacity if specified\n if read_capacity is not None and read_capacity != original_read_capacity:\n update_provisioned_throughput(conn, table_name, read_capacity, original_write_capacity)\n\n # get table data\n logging.info(\"Dumping table items for \" + table_name)\n mkdir_p(args.dumpPath + \"/\" + table_name + \"/\" + DATA_DIR)\n\n i = 1\n last_evaluated_key = None\n\n while True:\n scanned_table = conn.scan(table_name, exclusive_start_key=last_evaluated_key)\n\n f = open(args.dumpPath + \"/\" + table_name + \"/\" + DATA_DIR + \"/\" + str(i).zfill(4) + \".json\", \"w+\")\n f.write(json.dumps(scanned_table, indent=JSON_INDENT))\n f.close()\n\n i += 1\n\n try:\n last_evaluated_key = scanned_table[\"LastEvaluatedKey\"]\n except KeyError:\n break\n\n # revert back to original table read capacity if specified\n if read_capacity is not None and read_capacity != original_read_capacity:\n update_provisioned_throughput(conn, table_name, original_read_capacity, original_write_capacity, False)\n\n logging.info(\"Backup for \" + table_name + \" table completed. Time taken: \" + str(\n datetime.datetime.now().replace(microsecond=0) - start_time))\n\n\ndef do_restore(conn, sleep_interval, source_table, destination_table, write_capacity):\n logging.info(\"Starting restore for \" + source_table + \" to \" + destination_table + \"..\")\n\n # create table using schema\n # restore source_table from dump directory if it exists else try current working directory\n if os.path.exists(\"%s/%s\" % (args.dumpPath, source_table)):\n dump_data_path = args.dumpPath\n else:\n logging.info(\"Cannot find \\\"./%s/%s\\\", Now trying current working directory..\" % (args.dumpPath, source_table))\n if os.path.exists(\"%s/%s\" % (CURRENT_WORKING_DIR, source_table)):\n dump_data_path = CURRENT_WORKING_DIR\n else:\n logging.info(\"Cannot find \\\"%s/%s\\\" directory containing dump files!\" % (CURRENT_WORKING_DIR, source_table))\n sys.exit(1)\n table_data = json.load(open(dump_data_path + \"/\" + source_table + \"/\" + SCHEMA_FILE))\n table = table_data[\"Table\"]\n table_attribute_definitions = table[\"AttributeDefinitions\"]\n table_table_name = destination_table\n table_key_schema = table[\"KeySchema\"]\n original_read_capacity = table[\"ProvisionedThroughput\"][\"ReadCapacityUnits\"]\n original_write_capacity = table[\"ProvisionedThroughput\"][\"WriteCapacityUnits\"]\n table_local_secondary_indexes = table.get(\"LocalSecondaryIndexes\")\n table_global_secondary_indexes = table.get(\"GlobalSecondaryIndexes\")\n\n # override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original write capacity is lower\n if write_capacity is None:\n if original_write_capacity < RESTORE_WRITE_CAPACITY:\n write_capacity = RESTORE_WRITE_CAPACITY\n else:\n write_capacity = original_write_capacity\n\n # override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original write capacity is lower\n original_gsi_write_capacities = []\n if table_global_secondary_indexes is not None:\n for gsi in table_global_secondary_indexes:\n original_gsi_write_capacities.append(gsi[\"ProvisionedThroughput\"][\"WriteCapacityUnits\"])\n\n if gsi[\"ProvisionedThroughput\"][\"WriteCapacityUnits\"] < int(write_capacity):\n gsi[\"ProvisionedThroughput\"][\"WriteCapacityUnits\"] = int(write_capacity)\n\n # temp provisioned throughput for restore\n table_provisioned_throughput = {\"ReadCapacityUnits\": int(original_read_capacity),\n \"WriteCapacityUnits\": int(write_capacity)}\n\n if not args.dataOnly:\n\n logging.info(\"Creating \" + destination_table + \" table with temp write capacity of \" + str(write_capacity))\n\n while True:\n try:\n conn.create_table(table_attribute_definitions, table_table_name, table_key_schema,\n table_provisioned_throughput, table_local_secondary_indexes,\n table_global_secondary_indexes)\n break\n except boto.exception.JSONResponseError as e:\n if e.body[\"__type\"] == \"com.amazonaws.dynamodb.v20120810#LimitExceededException\":\n logging.info(\"Limit exceeded, retrying creation of \" + destination_table + \"..\")\n time.sleep(sleep_interval)\n elif e.body[\"__type\"] == \"com.amazon.coral.availability#ThrottlingException\":\n logging.info(\"Control plane limit exceeded, retrying creation of \" + destination_table + \"..\")\n time.sleep(sleep_interval)\n else:\n logging.exception(e)\n sys.exit(1)\n\n # wait for table creation completion\n wait_for_active_table(conn, destination_table, \"created\")\n else:\n # update provisioned capacity\n if int(write_capacity) > original_write_capacity:\n update_provisioned_throughput(conn, destination_table, original_read_capacity, write_capacity,\n False)\n\n if not args.schemaOnly:\n # read data files\n logging.info(\"Restoring data for \" + destination_table + \" table..\")\n data_file_list = os.listdir(dump_data_path + \"/\" + source_table + \"/\" + DATA_DIR + \"/\")\n data_file_list.sort()\n\n for data_file in data_file_list:\n logging.info(\"Processing \" + data_file + \" of \" + destination_table)\n items = []\n item_data = json.load(open(dump_data_path + \"/\" + source_table + \"/\" + DATA_DIR + \"/\" + data_file))\n items.extend(item_data[\"Items\"])\n\n # batch write data\n put_requests = []\n while len(items) > 0:\n put_requests.append({\"PutRequest\": {\"Item\": items.pop(0)}})\n\n # flush every MAX_BATCH_WRITE\n if len(put_requests) == MAX_BATCH_WRITE:\n logging.debug(\"Writing next \" + str(MAX_BATCH_WRITE) + \" items to \" + destination_table + \"..\")\n batch_write(conn, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)\n del put_requests[:]\n\n # flush remainder\n if len(put_requests) > 0:\n batch_write(conn, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)\n\n if not args.skipThroughputUpdate:\n # revert to original table write capacity if it has been modified\n if int(write_capacity) != original_write_capacity:\n update_provisioned_throughput(conn, destination_table, original_read_capacity, original_write_capacity,\n False)\n\n # loop through each GSI to check if it has changed and update if necessary\n if table_global_secondary_indexes is not None:\n gsi_data = []\n for gsi in table_global_secondary_indexes:\n original_gsi_write_capacity = original_gsi_write_capacities.pop(0)\n if original_gsi_write_capacity != gsi[\"ProvisionedThroughput\"][\"WriteCapacityUnits\"]:\n gsi_data.append({\"Update\": {\"IndexName\": gsi[\"IndexName\"],\n \"ProvisionedThroughput\": {\n \"ReadCapacityUnits\": int(\n gsi[\"ProvisionedThroughput\"][\"ReadCapacityUnits\"]),\n \"WriteCapacityUnits\": int(original_gsi_write_capacity)}}})\n\n logging.info(\"Updating \" + destination_table + \" global secondary indexes write capacities as necessary..\")\n while True:\n try:\n conn.update_table(destination_table, global_secondary_index_updates=gsi_data)\n break\n except boto.exception.JSONResponseError as e:\n if e.body[\"__type\"] == \"com.amazonaws.dynamodb.v20120810#LimitExceededException\":\n logging.info(\n \"Limit exceeded, retrying updating throughput of GlobalSecondaryIndexes in \" + destination_table + \"..\")\n time.sleep(sleep_interval)\n elif e.body[\"__type\"] == \"com.amazon.coral.availability#ThrottlingException\":\n logging.info(\n \"Control plane limit exceeded, retrying updating throughput of GlobalSecondaryIndexes in \" + destination_table + \"..\")\n time.sleep(sleep_interval)\n\n # wait for table to become active\n wait_for_active_table(conn, destination_table, \"active\")\n\n logging.info(\"Restore for \" + source_table + \" to \" + destination_table + \" table completed. Time taken: \" + str(\n datetime.datetime.now().replace(microsecond=0) - start_time))\n else:\n logging.info(\"Empty schema of \" + source_table + \" table created. Time taken: \" + str(datetime.datetime.now().replace(microsecond=0) - start_time))\n\n\ndef get_credentials(profile, region):\n try:\n session = boto3.Session(profile_name=profile, region_name=region)\n except BotoCoreError:\n return None\n credentials = session.get_credentials()\n return credentials\n\n\n# parse args\nparser = argparse.ArgumentParser(description=\"Simple DynamoDB backup/restore/empty.\")\nparser.add_argument(\"-m\", \"--mode\", help=\"'backup' or 'restore' or 'empty'\")\nparser.add_argument(\"-r\", \"--region\",\n help=\"AWS region to use, e.g. 'us-west-1'. Use '\" + LOCAL_REGION + \"' for local DynamoDB testing\")\nparser.add_argument(\"--host\", help=\"Host of local DynamoDB [required only for local]\")\nparser.add_argument(\"--port\", help=\"Port of local DynamoDB [required only for local]\")\nparser.add_argument(\"--accessKey\", help=\"Access key of local DynamoDB [required only for local]\")\nparser.add_argument(\"--secretKey\", help=\"Secret key of local DynamoDB [required only for local]\")\nparser.add_argument(\"--sessionToken\", help=\"Session token for AWS profile, aka security token [required for temporary AWS sessions unless profile specified]\")\nparser.add_argument(\"-p\", \"--profile\",\n help=\"AWS credentials file profile to use. Allows you to use a profile instead of accessKey, secretKey authentication\")\nparser.add_argument(\"-s\", \"--srcTable\",\n help=\"Source DynamoDB table name to backup or restore from, use 'tablename*' for wildcard prefix selection or '*' for all tables\")\nparser.add_argument(\"-d\", \"--destTable\",\n help=\"Destination DynamoDB table name to backup or restore to, use 'tablename*' for wildcard prefix selection (defaults to use '-' separator) [optional, defaults to source]\")\nparser.add_argument(\"--prefixSeparator\", help=\"Specify a different prefix separator, e.g. '.' [optional]\")\nparser.add_argument(\"--noSeparator\", action='store_true',\n help=\"Overrides the use of a prefix separator for backup wildcard searches [optional]\")\nparser.add_argument(\"--readCapacity\",\n help=\"Change the temp read capacity of the DynamoDB table to backup from [optional]\")\nparser.add_argument(\"--writeCapacity\",\n help=\"Change the temp write capacity of the DynamoDB table to restore to [defaults to \" + str(\n RESTORE_WRITE_CAPACITY) + \", optional]\")\nparser.add_argument(\"--schemaOnly\", action=\"store_true\", default=False,\n help=\"Backup or restore the schema only. Do not backup/restore data. Can be used with both backup and restore modes. Cannot be used with the --dataOnly [optional]\")\nparser.add_argument(\"--dataOnly\", action=\"store_true\", default=False,\n help=\"Restore data only. Do not delete/recreate schema [optional for restore]\")\nparser.add_argument(\"--skipThroughputUpdate\", action=\"store_true\", default=False,\n help=\"Skip updating throughput values across tables [optional]\")\nparser.add_argument(\"--dumpPath\", help=\"Directory to place and search for DynamoDB table backups (defaults to use '\" + str(DATA_DUMP) + \"') [optional]\", default=str(DATA_DUMP))\nparser.add_argument(\"--log\", help=\"Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL [optional]\")\nargs = parser.parse_args()\n\n# set log level\nlog_level = LOG_LEVEL\nif args.log is not None:\n log_level = args.log.upper()\nlogging.basicConfig(level=getattr(logging, log_level))\n\n\n# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified\nif args.schemaOnly and args.dataOnly:\n logging.info(\"Options --schemaOnly and --dataOnly are mutually exclusive.\")\n sys.exit(1)\n\n\n# instantiate connection\nif args.region == LOCAL_REGION:\n conn = DynamoDBConnection(aws_access_key_id=args.accessKey, aws_secret_access_key=args.secretKey, host=args.host,\n port=int(args.port), is_secure=False)\n sleep_interval = LOCAL_SLEEP_INTERVAL\nelse:\n if not args.profile:\n conn = boto.dynamodb2.connect_to_region(args.region, aws_access_key_id=args.accessKey,\n aws_secret_access_key=args.secretKey,\n security_token=args.sessionToken)\n sleep_interval = AWS_SLEEP_INTERVAL\n else:\n credentials = get_credentials(profile=args.profile, region=args.region)\n conn = boto.dynamodb2.connect_to_region(args.region, aws_access_key_id=credentials.access_key,\n aws_secret_access_key=credentials.secret_key,\n security_token=credentials.token)\n sleep_interval = AWS_SLEEP_INTERVAL\n\n\n# set prefix separator\nprefix_separator = DEFAULT_PREFIX_SEPARATOR\nif args.prefixSeparator is not None:\n prefix_separator = args.prefixSeparator\nif args.noSeparator is True:\n prefix_separator = None\n\n# do backup/restore\nstart_time = datetime.datetime.now().replace(microsecond=0)\nif args.mode == \"backup\":\n if args.srcTable.find(\"*\") != -1:\n matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)\n logging.info(\"Found \" + str(len(matching_backup_tables)) + \" table(s) in DynamoDB host to backup: \" + \", \".join(\n matching_backup_tables))\n\n threads = []\n for table_name in matching_backup_tables:\n t = threading.Thread(target=do_backup, args=(conn, table_name, args.readCapacity,))\n threads.append(t)\n t.start()\n time.sleep(THREAD_START_DELAY)\n\n for thread in threads:\n thread.join()\n\n logging.info(\"Backup of table(s) \" + args.srcTable + \" completed!\")\n else:\n do_backup(conn, args.srcTable, args.readCapacity)\nelif args.mode == \"restore\":\n if args.destTable is not None:\n dest_table = args.destTable\n else:\n dest_table = args.srcTable\n\n if dest_table.find(\"*\") != -1:\n matching_destination_tables = get_table_name_matches(conn, dest_table, prefix_separator)\n delete_str = \": \" if args.dataOnly else \" to be deleted: \"\n logging.info(\n \"Found \" + str(len(matching_destination_tables)) + \" table(s) in DynamoDB host\" + delete_str + \", \".join(\n matching_destination_tables))\n\n threads = []\n for table_name in matching_destination_tables:\n t = threading.Thread(target=delete_table, args=(conn, sleep_interval, table_name,))\n threads.append(t)\n t.start()\n time.sleep(THREAD_START_DELAY)\n\n for thread in threads:\n thread.join()\n\n matching_restore_tables = get_restore_table_matches(args.srcTable, prefix_separator)\n logging.info(\n \"Found \" + str(len(matching_restore_tables)) + \" table(s) in \" + args.dumpPath + \" to restore: \" + \", \".join(\n matching_restore_tables))\n\n threads = []\n for source_table in matching_restore_tables:\n if args.srcTable == \"*\":\n t = threading.Thread(target=do_restore,\n args=(conn, sleep_interval, source_table, source_table, args.writeCapacity))\n else:\n t = threading.Thread(target=do_restore, args=(conn, sleep_interval, source_table,\n change_prefix(source_table, args.srcTable, dest_table,\n prefix_separator), args.writeCapacity,))\n threads.append(t)\n t.start()\n time.sleep(THREAD_START_DELAY)\n\n for thread in threads:\n thread.join()\n\n logging.info(\"Restore of table(s) \" + args.srcTable + \" to \" + dest_table + \" completed!\")\n else:\n delete_table(conn, sleep_interval, dest_table)\n do_restore(conn, sleep_interval, args.srcTable, dest_table, args.writeCapacity)\nelif args.mode == \"empty\":\n if args.srcTable.find(\"*\") != -1:\n matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)\n logging.info(\"Found \" + str(len(matching_backup_tables)) + \" table(s) in DynamoDB host to empty: \" + \", \".join(\n matching_backup_tables))\n\n threads = []\n for table_name in matching_backup_tables:\n t = threading.Thread(target=do_empty, args=(conn, table_name))\n threads.append(t)\n t.start()\n time.sleep(THREAD_START_DELAY)\n\n for thread in threads:\n thread.join()\n\n logging.info(\"Empty of table(s) \" + args.srcTable + \" completed!\")\n else:\n do_empty(conn, args.srcTable)\n","sub_path":"dynamodump.py","file_name":"dynamodump.py","file_ext":"py","file_size_in_byte":29163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"159974568","text":"import keras.backend as K\nimport numpy as np\nimport tensorflow as tf\n\nx = np.array(range(10))\nprint(x) # [0 1 2 3 4 5 6 7 8 9]\n\n# square\nsquare = K.square(x) # x : Tensor or variable\n # return : a tensor\nprint(square) # tf.Tensor([ 0 1 4 9 16 25 36 49 64 81], shape=(10,), dtype=int32)\n\nx1 = np.array([0, -1, -2, -3, 4, 5, -7, -8, 9, 10])\nabs = K.abs(x1)\nprint(abs) # tf.Tensor([ 0 1 2 3 4 5 7 8 9 10], shape=(10,), dtype=int32)\n\n","sub_path":"keras/keras_Backend/backend06_calculate.py","file_name":"backend06_calculate.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"538949044","text":"from django.urls import path\nfrom mytrans import views\n\nurlpatterns = [\n path('', views.allTrans, name=\"mytrans\"),\n\n # view exchange status \n path('ongoing', views.ongoing),\n path('finished', views.finished),\n path('cancelled', views.cancelled),\n\n path('newitem', views.newitem, name=\"newitem\"),\n path('post', views.post_action, name=\"post\"),\n\n # rate\n path('rate/', views.rate_action, name=\"rate\"),\n \n # my item\n path('', views.item_status, name=\"status\"),\n path('/edit', views.edit_item, name=\"edit\"),\n path('/cancel', views.cancel_item, name=\"cancel_item\"),\n path('/reupload', views.reupload_item, name=\"reupload\"),\n\n # my item <-> other item\n path('/accept/', views.accept_action, name=\"accept\"),\n path('/confirm/', views.confirm_action, name=\"confirm\"),\n path('/decline/', views.decline_action, name=\"decline\"),\n path('/cancel/', views.cancel_action, name=\"cancel\"),\n\n \n]\n","sub_path":"swapspace/mytrans/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"266969938","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys,re\nfrom difflib import SequenceMatcher\n\nresults = [line.strip() for line in open('unique_events_complete_cleaned_xtra.txt','r')]\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\ndef removeSimilar(threshold):\n\tfor line in results:\n\t\t\tsplitLine = line.split('\\t')\t\t\t\t\t\t\n\t\t\ttweets = splitLine[4].split('-----')\t \t\t\t\t\t\t\t\t\t\n\t\t\tremovals = []\n\t\t\tfor x in range (0,len(tweets)):\n\t\t\t\tfor y in range(x, len(tweets)):\n\t\t\t\t\tif x != y:\n\t\t\t\t\t\tratio = similar(tweets[x],tweets[y])\n\t\t\t\t\t\tif ratio > threshold :\n\t\t\t\t\t\t\tremovals.append(y)\n\t\t\t\t\t\t\tprint(tweets[x])\n\t\t\t\t\t\t\tprint(tweets[y])\n\t\t\t\t\t\t\tprint('\\n')\t\n\t\t\t#sys.stdout.write(splitLine[0] + '\\t' + splitLine[1] + '\\t' + splitLine[2] + '\\t' + splitLine[3] + '\\t')\n\t\t\tfor x in range (0, len(tweets)):\n\t\t\t\tif x not in removals:\n\t\t\t\t\t#sys.stdout.write(tweets[x])\n\t\t\t\t\tif x != len(tweets) - 1:\n\t\t\t\t\t\t#sys.stdout.write('-----')\n\t\t\t\t\t\ty=1\n\t\t\t\t\telse:\n\t\t\t\t\t\t#sys.stdout.write('\\n')\t\n\t\t\t\t\t\ty=2\t\t\t\n\nremoveSimilar(0.95)\n","sub_path":"removeTooSimilar.py","file_name":"removeTooSimilar.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"448931456","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-intel/egg/ndg/xacml/parsers/etree/actionattributedesignatorreader.py\n# Compiled at: 2011-02-11 08:34:11\n\"\"\"NDG XACML ElementTree based reader for ActionAttributeDesignator type\n\nNERC DataGrid\n\"\"\"\n__author__ = 'P J Kershaw'\n__date__ = '19/03/10'\n__copyright__ = '(C) 2010 Science and Technology Facilities Council'\n__contact__ = 'Philip.Kershaw@stfc.ac.uk'\n__license__ = 'BSD - see LICENSE file in top-level directory'\n__contact__ = 'Philip.Kershaw@stfc.ac.uk'\n__revision__ = '$Id: actionattributedesignatorreader.py 7109 2010-06-28 12:54:57Z pjkersha $'\nfrom ndg.xacml.core.attributedesignator import ActionAttributeDesignator\nfrom ndg.xacml.parsers.etree.attributedesignatorreader import AttributeDesignatorReaderBase\n\nclass ActionAttributeDesignatorReader(AttributeDesignatorReaderBase):\n \"\"\"ElementTree based XACML Action Attribute Designator type parser\n \n @cvar TYPE: XACML class type that this reader will read values into\n @type TYPE: abc.ABCMeta\n \"\"\"\n TYPE = ActionAttributeDesignator","sub_path":"pycfiles/ndg_xacml-0.5.1-py2.7/actionattributedesignatorreader.py","file_name":"actionattributedesignatorreader.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"239131491","text":"import argparse\nimport os\nimport shutil\nimport socket\nimport threading\nimport serial\nimport serial.tools.list_ports\nimport time\nfrom netifaces import interfaces, ifaddresses, AF_INET\nimport queue\nimport logging\nimport sys\n\nNB_GET_TEMP_BEFORE_OK = 2\n\nLOGGING_FORMAT = '%(asctime)s :: %(levelname)s :: %(name)s :: %(lineno)d :: %(funcName)s :: %(message)s'\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--debug', action='store_true',\n help='Activate Debug mode with verbose execution trace information')\n parser.add_argument('--demo', action='store_true',\n help='Activate Demo mode')\n parser.add_argument('--com_port', default=None,\n help='Serial com port')\n parser.add_argument('--tcp_port', default=4000,\n help='TCP IP server port number')\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.DEBUG, format=LOGGING_FORMAT,)\n log = logging.getLogger(__name__)\n\n if args.debug:\n log.setLevel(logging.DEBUG)\n log.info(\"Starting\")\n else:\n log.setLevel(logging.CRITICAL)\n\n print(\"Machine Local IP:\")\n local_ip = socket.gethostbyname(socket.gethostname())\n print(local_ip)\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((local_ip, args.tcp_port))\n\n try:\n while True:\n input(\"press any key for handler ready:\")\n s.sendall(\"H\\r\".encode())\n temp = 25.0\n target_temp = temp\n cnt = NB_GET_TEMP_BEFORE_OK\n while(True):\n rx = s.recv(1024).decode()\n print(\"RX <- {}\".format(rx))\n if rx==\"GET_TEMP?\":\n cnt +=1\n if cnt>=NB_GET_TEMP_BEFORE_OK:\n real_temp=target_temp\n else:\n real_temp=target_temp-10\n ans = \"CUR_TEMP,{}\\r\".format(real_temp).encode()\n s.sendall(ans)\n print(\"Tx -> {}\".format(ans))\n elif rx==\"R\":\n ans = \"S\\r\".format(temp).encode()\n s.sendall(ans)\n print(\"Tx -> {}\".format(ans))\n elif rx==\"TEST_RESULT,1\":\n ans = \"H\\r\".format(temp).encode()\n s.sendall(ans)\n print(\"Tx -> {}\".format(ans))\n elif rx==\"R\":\n ans = \"S\\r\".format(temp).encode()\n s.sendall(ans)\n print(\"Tx -> {}\".format(ans))\n elif rx.startswith(\"SET_TEMP\"):\n t = rx.split(\",\")\n target_temp = float(t[1])\n print(\"Target temp : {}\".format(target_temp))\n cnt = 0\n ans = \"OK\\r\".format(temp).encode()\n s.sendall(ans)\n print(\"Tx -> {}\".format(ans))\n elif rx==\"EOL\":\n break\n except KeyboardInterrupt:\n print('Interrupted')\n try:\n s.close\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n s.close()","sub_path":"src/exatronemulator.py","file_name":"exatronemulator.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"594128902","text":"import library.speech_text_processors.text_to_speech as text_to_speech\nimport thesaurus\nfrom PyDictionary import PyDictionary\n\n\ndef dict(text):\n dictionary = PyDictionary()\n x = dictionary.meaning(text)\n w = thesaurus.Word(text)\n word_synonyms = w.synonyms()\n word_antonyms = w .antonyms()\n print (' Word : ' + text)\n for i in x:\n print ('\\n ' + i + '\\n ' + '='*len(i))\n for j in range(len(x[i])):\n print (' [' + str(j+1) + '] - ' + x[i][j])\n if word_synonyms:\n print ('\\n Synonyms : ' + \",\".join(word_synonyms[:5]))\n if word_antonyms:\n print (' Antonyms : ' + \",\".join(word_antonyms[:5]))\n print ('\\n')\n for i in x:\n if i == 'Adjective':\n text_to_speech.main(text + ' is an ' + i + ' that means,' + x[i][0])\n else:\n text_to_speech.main(text + ' is a ' + i + ' that means,' + x[i][0])","sub_path":"desktop_jarvis/library/jarvis_api/Dictionary.py","file_name":"Dictionary.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"335493635","text":"#coding: utf-8\n\nimport json\nimport logging\nimport traceback\nimport gevent\n\nimport binascii\nfrom ctypes import *\nfrom sqlalchemy.sql import select, update, delete, insert, and_, subquery, not_, null, func, text,exists\nfrom sqlalchemy import desc\n\nimport random,time\nfrom datetime import datetime\n\nfrom services import GameService\nfrom message.base import *\nfrom message.resultdef import *\n\nfrom db.connect import *\n\nfrom proto.access_pb2 import *\nfrom proto.game_pb2 import *\nfrom proto.constant_pb2 import *\nfrom proto.struct_pb2 import *\n\n\nfrom util.handlerutil import *\n\nfrom config.var import *\n\nfrom room import *\n\nclass RoomService(GameService):\n def setup_route(self):\n self.registe_command(SitTableReq,SitTableResp,self.handle_sit_table)\n self.registe_command(OfflineReq,OfflineResp,self.handle_offline)\n \n def init(self):\n self.redis = self.server.redis\n max_user = self.getConfigOption(\"max_user\",\"100\")\n self.room_manager = RoomManager(self,int(max_user))\n \n @USE_TRANSACTION\n def handle_sit_table(self,session,req,resp,event): \n #logging.info(\"====> Sit Table Now: %d\", req.header.user)\n\n if req.body.table_id < 0:\n uid = req.header.user\n room_id = self.room_manager.get_user_room(uid)\n if room_id < 0:\n room_id = self.room_manager.find_room()\n if room_id < 0:\n resp.header.result = -1\n return \n\n new_req = create_client_message(SitTableReq)\n new_req.header.user = uid\n new_req.header.transaction = req.header.transaction\n new_req.body.table_id = req.body.table_id\n new_req.body.table_type = req.body.table_type\n for tid in req.body.not_tables:\n new_req.body.not_tables.append(tid)\n\n self.forward_proxy_message(event.srcId,room_id,new_req.header.command, \\\n new_req.header.user,new_req.header.transaction,new_req.encode())\n else:\n uid = req.header.user\n new_room_id = self.room_manager.find_room()\n if new_room_id < 0:\n resp.header.result = 0\n return\n\n room_id = self.room_manager.get_user_room(uid)\n if room_id > 0 and new_room_id != room_id:\n new_req = create_client_message(LeaveTableInternalReq)\n new_req.header.user = uid\n new_req.header.transaction = req.header.transaction\n self.forward_proxy_message(event.srcId,room_id,new_req.header.command, \\\n req.header.user,req.header.transaction,new_req.encode())\n\n new_req = create_client_message(SitTableReq)\n new_req.header.user = uid\n new_req.header.transaction = req.header.transaction\n new_req.body.table_id = req.body.table_id\n new_req.body.table_type = req.body.table_type\n for tid in req.body.not_tables:\n new_req.body.not_tables.append(tid)\n new_req.body.not_tables.append(req.body.table_id)\n self.forward_proxy_message(event.srcId,new_room_id,new_req.header.command, \\\n req.header.user,req.header.transaction,new_req.encode())\n\n\n return False\n\n \n @USE_TRANSACTION\n def handle_offline(self,session,req,resp,event):\n uid = req.header.user\n room_id = self.room_manager.get_user_room(uid)\n if room_id > 0:\n new_req = create_client_message(OfflineReq)\n new_req.header.user = uid\n new_req.body.uid = uid\n self.forward_proxy_message(event.srcId,room_id,new_req.header.command, \\\n req.header.user,req.header.transaction,new_req.encode()) \n\n return False\n \n \nif __name__ == \"__main__\":\n pass\n\n","sub_path":"code/goldflower/roomservice.py","file_name":"roomservice.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"570919954","text":"#!/bin/env python3\n\nimport requests\nimport json\n\nclass Zabbix:\n __ZABBIX_HOST=\"10.1.27.57\"\n __API_URL=\"http://%s/zabbix/api_jsonrpc.php\"%__ZABBIX_HOST\n __API_USER=\"api_user_01\"\n __API_PASSWORD=\"api_user_01\"\n __auth=None\n\n def __del__(self):\n self.logout()\n\n def logout(self):\n if(self.__auth==None):\n return 0\n data = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"user.login\",\n \"params\": {\n },\n \"auth\":self.__auth,\n \"id\": 1\n }\n try:\n req = requests.post(self.__API_URL, data=json.dumps(data), headers={\"Content-Type\": \"application/json-rpc\"},\n timeout=5)\n self.__auth==None\n return 0\n except Exception as exc:\n return 1\n return 1\n\n def login(self,api_user=__API_USER,api_password=__API_PASSWORD):\n if self.__auth!=None:\n return 0\n data = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"user.login\",\n \"params\": {\n \"user\": api_user,\n \"password\": api_password\n },\n \"id\": 1\n }\n try:\n req = requests.post(self.__API_URL, data=json.dumps(data), headers={\"Content-Type\": \"application/json-rpc\"},\n timeout=5)\n self.__auth = req.json()[\"result\"]\n return self.__auth\n except Exception as exc:\n print(exc.args)\n return 1\n\n def get_host_ip(self):\n ips=WEB_URL.split(\":\")\n try:\n http=ips[1].split(\"//\")[1]\n return http.split(\"/\")[0]\n except:\n return 1\n return 1\n\n def get_host_id(self,host_ip):\n if(self.__auth==None):\n return 1\n data={\n \"jsonrpc\": \"2.0\",\n \"method\": \"host.get\",\n \"params\": {\n \"output\": [\"hostid\",\"host\"],\n \"selectInterfaces\": [\"ip\",\"dns\"]\n },\n \"auth\": self.__auth,\n \"id\": 1\n }\n try:\n req=requests.get(url=self.__API_URL,data=json.dumps(data),headers={\"Content-Type\": \"application/json-rpc\"},timeout=5)\n req_jso=req.json()\n for i in req_jso[\"result\"]:\n if(i[\"interfaces\"][0][\"ip\"]==host_ip):\n return i[\"hostid\"],i[\"host\"]\n elif(i[\"interfaces\"][0][\"ip\"]==\"\"):\n if(i[\"interfaces\"][0][\"dns\"]==host_ip):\n return i[\"hostid\"],i[\"host\"]\n except:\n pass\n return 1,1\n\n def create_web_scenario(self,name,host_id,url):\n data={\n \"jsonrpc\": \"2.0\",\n \"method\": \"httptest.create\",\n \"params\": {\n \"name\": name,\n \"hostid\": host_id,\n \"steps\": [\n {\n \"name\": name,\n \"url\": url,\n \"status_codes\": 200,\n \"no\": 1\n }\n ]\n },\n \"auth\": self.__auth,\n \"id\": 1\n }\n try:\n req=requests.get(url=self.__API_URL,data=json.dumps(data),headers={\"Content-Type\": \"application/json-rpc\"},timeout=5)\n req_jso=req.json()\n if \"result\" in req_jso.keys():\n return 0,req_jso[\"result\"][\"httptestids\"][0]\n msg=req_jso[\"error\"][\"data\"]\n words=msg.split(\" \")\n if(words[-1]==\"exists.\" and words[-2]==\"already\"):\n return 1,\"已存在\"\n except:\n pass\n return 1,\"错误\"\n\n def create_fail_trigger(self,name,host,url,priority):\n data={\n \"jsonrpc\": \"2.0\",\n \"method\": \"trigger.create\",\n \"params\": [\n {\n \"priority\": priority,\n \"description\": \"%s-错误\"%name,\n \"comments\": \"从监控机访问%s连续两次出现错误出现错误\"%url,\n \"expression\": \"{%s:web.test.fail[%s].min(#2)}>0\"%(host,name)\n }\n ],\n \"auth\": self.__auth,\n \"id\": 1\n }\n try:\n req = requests.get(url=self.__API_URL, data=json.dumps(data),\n headers={\"Content-Type\": \"application/json-rpc\"}, timeout=5)\n req_jso = req.json()\n if \"result\" in req_jso.keys():\n return 0\n return 1\n except:\n pass\n return 1\n\n\n def create_time_trigger(self, name, host, url, priority):\n data = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"trigger.create\",\n \"params\": [\n {\n \"priority\": priority,\n \"description\": \"%s-响应慢\" % name,\n \"comments\": \"从监控机访问%s连续两次访问响应时间超过3秒\" % url,\n \"expression\": \"{%s:web.test.time[%s,%s,resp].min(#2)}>3\" % (host, name,name)\n },\n ],\n \"auth\": self.__auth,\n \"id\": 1\n }\n try:\n req = requests.get(url=self.__API_URL, data=json.dumps(data),\n headers={\"Content-Type\": \"application/json-rpc\"}, timeout=5)\n req_jso = req.json()\n if \"result\" in req_jso.keys():\n return 0\n return 1\n except:\n pass\n return 1\n\nif __name__==\"__main__\":\n\n monitor_type = input(\"主机是否采用VCenter监控:(Y/n)\")\n if monitor_type == \"n\" or monitor_type == \"N\":\n monitor_type = False\n else:\n monitor_type = True\n\n NAME=input(\"请输入web名称:\")\n WEB_URL=input(\"请输入WEB URL:\")\n\n z=Zabbix()\n if z.login()==1:\n print(\"登录失败\")\n exit(1)\n\n if not monitor_type:\n host_ip=z.get_host_ip()\n if(host_ip==1):\n is_input=input(\"提取主机IP失败请手动输入:(Y/n)\")\n host_ip=input(\"请输入IP:\")\n print(host_ip)\n\n host_id,host=z.get_host_id(host_ip)\n if(host_id==1):\n print(\"获取主机ID失败!\")\n exit(1)\n ret,msg=z.create_web_scenario(NAME,host_id,WEB_URL)\n print(host)\n if(ret==1):\n print(\"添加web场景失败,错误消息:%s\"%msg)\n exit(1)\n if(z.create_fail_trigger(NAME,host,WEB_URL,\"4\")!=0):\n print(\"创建错误触发器失败!\")\n exit(1)\n if(z.create_time_trigger(NAME,host,WEB_URL,\"3\")!=0):\n print(\"创建响应慢触发器失败!\")\n exit(1)\n print(\"监控项、错误触发器、响应慢触发器创建成功!\")","sub_path":"script/add_web03.py","file_name":"add_web03.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"362828507","text":"\"\"\"\nDirty categories: machine learning with non normalized strings\n==============================================================\n\nIncluding strings that represent categories often calls for much data\npreparation. In particular categories may appear with many morphological\nvariants, when they have been manually input or assembled from diverse\nsources.\n\nHere we look at a dataset on wages [#]_ where the column 'Employee\nPosition Title' contains dirty categories. On such a column, standard\ncategorical encodings leads to very high dimensions and can lose\ninformation on which categories are similar.\n\nWe investigate various encodings of this dirty column for the machine\nlearning workflow, predicting the 'Current Annual Salary' with gradient\nboosted trees. First we manually assemble a complex encoder for the full\ndataframe, after which we show a much simpler way, albeit with less fine\ncontrol.\n\n\n.. [#] https://www.openml.org/d/42125\n\n\n .. |TV| replace::\n :class:`~dirty_cat.TableVectorizer`\n\n .. |Pipeline| replace::\n :class:`~sklearn.pipeline.Pipeline`\n\n .. |OneHotEncoder| replace::\n :class:`~sklearn.preprocessing.OneHotEncoder`\n\n .. |ColumnTransformer| replace::\n :class:`~sklearn.compose.ColumnTransformer`\n\n .. |RandomForestRegressor| replace::\n :class:`~sklearn.ensemble.RandomForestRegressor`\n\n .. |Gap| replace::\n :class:`~dirty_cat.GapEncoder`\n\n .. |MinHash| replace::\n :class:`~dirty_cat.MinHashEncoder`\n\n .. |HGBR| replace::\n :class:`~sklearn.ensemble.HistGradientBoostingRegressor`\n\n .. |SE| replace::\n :class:`~dirty_cat.SimilarityEncoder`\n\n .. |permutation importances| replace::\n :func:`~sklearn.inspection.permutation_importance`\n\"\"\"\n\n###############################################################################\n# The data\n# --------\n#\n# We first retrieve the dataset:\nfrom dirty_cat.datasets import fetch_employee_salaries\n\nemployee_salaries = fetch_employee_salaries()\n\n###############################################################################\n# X, the input data (descriptions of employees):\nX = employee_salaries.X\nX\n\n###############################################################################\n# and y, our target column (the annual salary):\ny = employee_salaries.y\ny.name\n\n###############################################################################\n# Now, let's carry out some basic preprocessing:\nimport pandas as pd\n\nX[\"date_first_hired\"] = pd.to_datetime(X[\"date_first_hired\"])\nX[\"year_first_hired\"] = X[\"date_first_hired\"].apply(lambda x: x.year)\n# Get a mask of the rows with missing values in 'gender'\nmask = X.isna()[\"gender\"]\n# And remove them\nX.dropna(subset=[\"gender\"], inplace=True)\ny = y[~mask]\n\n###############################################################################\n# Assembling a machine-learning pipeline that encodes the data\n# ------------------------------------------------------------\n#\n# To build a learning pipeline, we need to assemble encoders for each\n# column, and apply a supervised learning model on top.\n\n###############################################################################\n# The categorical encoders\n# ........................\n#\n# An encoder is needed to turn a categorical column into a numerical\n# representation:\nfrom sklearn.preprocessing import OneHotEncoder\n\none_hot = OneHotEncoder(handle_unknown=\"ignore\", sparse=False)\n\n###############################################################################\n# We assemble these to apply them to the relevant columns.\n# The |ColumnTransformer| is created by specifying a set of transformers\n# alongside with the column names on which each must be applied:\n\nfrom sklearn.compose import make_column_transformer\n\nencoder = make_column_transformer(\n (one_hot, [\"gender\", \"department_name\", \"assignment_category\"]),\n (\"passthrough\", [\"year_first_hired\"]),\n # Last but not least, our dirty column\n (one_hot, [\"employee_position_title\"]),\n remainder=\"drop\",\n)\n\n###############################################################################\n# Pipelining an encoder with a learner\n# ....................................\n#\n# We will use a |HGBR|,\n# which is a good predictor for data with heterogeneous columns\n# (we need to require the experimental feature for scikit-learn versions\n# earlier than 1.0):\nfrom sklearn.experimental import enable_hist_gradient_boosting\n\n# We can now import the |HGBR| from ensemble\nfrom sklearn.ensemble import HistGradientBoostingRegressor\n\n# We then create a pipeline chaining our encoders to a learner\nfrom sklearn.pipeline import make_pipeline\n\npipeline = make_pipeline(encoder, HistGradientBoostingRegressor())\n\n###############################################################################\n# The pipeline can be readily applied to the dataframe for prediction:\npipeline.fit(X, y)\n\n###############################################################################\n# Dirty-category encoding\n# -----------------------\n#\n# The |OneHotEncoder| is actually not well suited to the 'Employee\n# Position Title' column, as this column contains 400 different entries:\nimport numpy as np\n\nnp.unique(y)\n\n###############################################################################\n# .. _example_minhash_encoder:\n#\n# We will now experiment with encoders specially made for handling\n# dirty columns:\n\nfrom dirty_cat import (\n SimilarityEncoder,\n TargetEncoder,\n MinHashEncoder,\n GapEncoder,\n)\n\nencoders = {\n \"one-hot\": one_hot,\n \"similarity\": SimilarityEncoder(),\n \"target\": TargetEncoder(handle_unknown=\"ignore\"),\n \"minhash\": MinHashEncoder(n_components=100),\n \"gap\": GapEncoder(n_components=100),\n}\n\n###############################################################################\n# We now loop over the different encoding methods,\n# instantiate a new |Pipeline| each time, fit it\n# and store the returned cross-validation score:\n\nfrom sklearn.model_selection import cross_val_score\n\nall_scores = dict()\n\nfor name, method in encoders.items():\n encoder = make_column_transformer(\n (one_hot, [\"gender\", \"department_name\", \"assignment_category\"]),\n (\"passthrough\", [\"year_first_hired\"]),\n # Last but not least, our dirty column\n (method, [\"employee_position_title\"]),\n remainder=\"drop\",\n )\n\n pipeline = make_pipeline(encoder, HistGradientBoostingRegressor())\n scores = cross_val_score(pipeline, X, y)\n print(f\"{name} encoding\")\n print(f\"r2 score: mean: {np.mean(scores):.3f}; std: {np.std(scores):.3f}\\n\")\n all_scores[name] = scores\n\n###############################################################################\n# Plotting the results\n# ....................\n#\n# Finally, we plot the scores on a boxplot:\n\nimport seaborn\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(4, 3))\nax = seaborn.boxplot(data=pd.DataFrame(all_scores), orient=\"h\")\nplt.ylabel(\"Encoding\", size=20)\nplt.xlabel(\"Prediction accuracy \", size=20)\nplt.yticks(size=20)\nplt.tight_layout()\n\n###############################################################################\n# The clear trend is that encoders grasping similarities between categories\n# (|SE|, |MinHash|, and |Gap|) perform better than those discarding it.\n#\n# |SE| is the best performer, but it is less scalable on big\n# data than the |MinHash| and |Gap|. The most scalable encoder is\n# the |MinHash|. On the other hand, the |Gap| has the benefit of\n# providing interpretable features\n# (see :ref:`sphx_glr_auto_examples_02_investigating_dirty_categories.py`)\n#\n# |\n#\n\n###############################################################################\n# .. _example_table_vectorizer:\n#\n# A simpler way: automatic vectorization\n# --------------------------------------\n#\n# The code to assemble a column transformer is a bit tedious. We will\n# now explore a simpler, automated, way of encoding the data.\n#\n# Let's start again from the raw data:\nemployee_salaries = fetch_employee_salaries()\nX = employee_salaries.X\ny = employee_salaries.y\n\n###############################################################################\n# We'll drop the 'date_first_hired' column as it's redundant with\n# 'year_first_hired'.\nX = X.drop([\"date_first_hired\"], axis=1)\n\n###############################################################################\n# We still have a complex and heterogeneous dataframe:\nX\n\n###############################################################################\n# The |TV| can to turn this dataframe into a form suited for\n# machine learning.\n\n###############################################################################\n# Using the TableVectorizer in a supervised-learning pipeline\n# -----------------------------------------------------------\n#\n# Assembling the |TV| in a |Pipeline| with a powerful learner,\n# such as gradient boosted trees, gives **a machine-learning method that\n# can be readily applied to the dataframe**.\n#\n# The |TV| requires at least dirty_cat 0.2.0.\n#\n\nfrom dirty_cat import TableVectorizer\n\npipeline = make_pipeline(\n TableVectorizer(auto_cast=True), HistGradientBoostingRegressor()\n)\n\n###############################################################################\n# Let's perform a cross-validation to see how well this model predicts:\n\nfrom sklearn.model_selection import cross_val_score\n\nscores = cross_val_score(pipeline, X, y, scoring=\"r2\")\n\nprint(f\"scores={scores}\")\nprint(f\"mean={np.mean(scores)}\")\nprint(f\"std={np.std(scores)}\")\n\n###############################################################################\n# The prediction performed here is pretty much as good as above\n# but the code here is much simpler as it does not involve specifying\n# columns manually.\n\n###############################################################################\n# Analyzing the features created\n# ------------------------------\n#\n# Let us perform the same workflow, but without the |Pipeline|, so we can\n# analyze the TableVectorizer's mechanisms along the way.\ntable_vec = TableVectorizer(auto_cast=True)\n\n# %%\n# We split the data between train and test, and transform them:\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.15, random_state=42\n)\n\nX_train_enc = table_vec.fit_transform(X_train, y_train)\nX_test_enc = table_vec.transform(X_test)\n\n###############################################################################\n# The encoded data, X_train_enc and X_test_enc are numerical arrays:\nX_train_enc\n\n###############################################################################\n# They have more columns than the original dataframe, but not much more:\nX_train.shape, X_train_enc.shape\n\n###############################################################################\n# Inspecting the features created\n# ...............................\n#\n# The |TV| assigns a transformer for each column. We can inspect this\n# choice:\nfrom pprint import pprint\n\npprint(table_vec.transformers_)\n\n###############################################################################\n# This is what is being passed to the |ColumnTransformer| under the hood.\n# If you're familiar with how the latter works, it should be very intuitive.\n# We can notice it classified the columns 'gender' and 'assignment_category'\n# as low cardinality string variables.\n# A |OneHotEncoder| will be applied to these columns.\n#\n# The vectorizer actually makes the difference between string variables\n# (data type ``object`` and ``string``) and categorical variables\n# (data type ``category``).\n#\n# Next, we can have a look at the encoded feature names.\n#\n# Before encoding:\nX.columns.to_list()\n\n###############################################################################\n# After encoding (we only plot the first 8 feature names):\nfeature_names = table_vec.get_feature_names_out()\nfeature_names[:8]\n\n###############################################################################\n# As we can see, it gave us interpretable columns.\n# This is because we used the |Gap| on the column 'division',\n# which was classified as a high cardinality string variable.\n# (default values, see |TV|'s docstring).\n#\n# In total, we have a reasonable number of encoded columns:\nlen(feature_names)\n\n\n###############################################################################\n# Feature importances in the statistical model\n# --------------------------------------------\n#\n# In this section, we will train a regressor, and plot the feature importances.\n#\n# .. topic:: Note:\n#\n# To minimize computation time, we use the feature importances computed by the\n# |RandomForestRegressor|, but you should prefer |permutation importances|\n# instead (which are less subject to biases).\n#\n# First, let's train the |RandomForestRegressor|:\n\nfrom sklearn.ensemble import RandomForestRegressor\n\nregressor = RandomForestRegressor()\nregressor.fit(X_train_enc, y_train)\n\n###############################################################################\n# Retrieving the feature importances:\n\nimportances = regressor.feature_importances_\nstd = np.std([tree.feature_importances_ for tree in regressor.estimators_], axis=0)\nindices = np.argsort(importances)\n# Sort from least to most\nindices = list(reversed(indices))\n\n###############################################################################\n# Plotting the results:\n\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(12, 9))\nplt.title(\"Feature importances\")\nn = 20\nn_indices = indices[:n]\nlabels = np.array(feature_names)[n_indices]\nplt.barh(range(n), importances[n_indices], color=\"b\", yerr=std[n_indices])\nplt.yticks(range(n), labels, size=15)\nplt.tight_layout(pad=1)\nplt.show()\n\n###############################################################################\n# We can deduce from this data that the three factors that define the\n# most the salary are: being hired for a long time, being a manager, and\n# having a permanent, full-time job :)\n#\n#\n# .. topic:: The |TV| automates preprocessing\n#\n# As this notebook demonstrates, many preprocessing steps can be\n# automated by the |TV|, and the resulting pipeline can still be\n# inspected, even with non-normalized entries.\n#\n","sub_path":"examples/01_dirty_categories.py","file_name":"01_dirty_categories.py","file_ext":"py","file_size_in_byte":14117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"236212414","text":"\"\"\"\nTest 2. Open the ProQuest website from the results, search for ‘QA’ in the top nav, and take a screenshot.\n\nGoes to proquest.com, takes screenshot and saves it as screenshot.png\n\"\"\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndriver = webdriver.Firefox()\ndriver.implicitly_wait(10)\n\ntry:\n\tdriver.get(\"http://www.proquest.com\")\n\t\n\t# Select select searchbar named searchKeyword\n\telem = driver.find_element_by_name(\"searchKeyword\")\n\telem.clear()\n\telem.send_keys(\"QA\")\n\telem.send_keys(Keys.RETURN)\n\n\t# Wait until the title updates to contain search.\n\t# This makes sure the page has loaded and we don't screenshot the searchbar\n\tWebDriverWait(driver, 10).until(EC.title_contains(\"Search\"))\n\tdriver.save_screenshot(\"screenshot.png\")\n\n\nfinally:\n driver.close()\n","sub_path":"test_proquest.py","file_name":"test_proquest.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"39069576","text":"# File: awsguardduty_consts.py\n# Copyright (c) 2019-2020 Splunk Inc.\n#\n# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)\n\nAWSGUARDDUTY_MAX_PER_PAGE_LIMIT = 50\nAWSGUARDDUTY_POLL_NOW_DAYS = 30\nAWSGUARDDUTY_INVALID_LIMIT = 'Please provide non-zero positive integer in {param_name}'\nAWSGUARDDUTY_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\nAWSGUARDDUTY_SEVERITY_MAP = {\n 'Low': '2',\n 'Medium': '5',\n 'High': '8'\n }\nAWSGUARDDUTY_SEVERITY_REVERSE_MAP = {\n 2: 'Low',\n 5: 'Medium',\n 8: 'High'\n }\nAWSGUARDDUTY_JSON_REGION = \"region\"\nAWSGUARDDUTY_REGION_DICT = {\n \"US East (N. Virginia)\": \"us-east-1\",\n \"US East (Ohio)\": \"us-east-2\",\n \"US West (N. California)\": \"us-west-1\",\n \"US West (Oregon)\": \"us-west-2\",\n \"Asia Pacific (Mumbai)\": \"ap-south-1\",\n \"Asia Pacific (Seoul)\": \"ap-northeast-2\",\n \"Asia Pacific (Singapore)\": \"ap-southeast-1\",\n \"Asia Pacific (Sydney)\": \"ap-southeast-2\",\n \"Asia Pacific (Tokyo)\": \"ap-northeast-1\",\n \"Canada (Central)\": \"ca-central-1\",\n \"EU (Frankfurt)\": \"eu-central-1\",\n \"EU (Ireland)\": \"eu-west-1\",\n \"EU (London)\": \"eu-west-2\",\n \"EU (Paris)\": \"\teu-west-3\",\n \"South America (Sao Paulo)\": \"sa-east-1\"\n }\n","sub_path":"Apps/phawsguardduty/awsguardduty_consts.py","file_name":"awsguardduty_consts.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"137850229","text":"import threading\nimport time\n\n\ndef nondaemon():\n print('Starting {} with a sleep of 2 secs'.format(threading.current_thread().name))\n time.sleep(5)\n\n\ndef daemon():\n print('Starting {} with a sleep of 15 secs'.format(threading.current_thread().name))\n time.sleep(15)\n\n\nif __name__ == \"__main__\":\n t = threading.Thread(name='Non-daemon', target=nondaemon)\n t.start()\n d = threading.Thread(name='Daemon', target=daemon, daemon=True)\n d.start()\n\n # Join() blocks until the thread has exited\n # t.join()\n\n print('{}'.format(threading.main_thread()))\n\n # d.join() - it will block\n print(\"t.isAlive() {}\".format(t.isAlive()))\n print(\"d.isAlive() {}\".format(d.isAlive()))\n","sub_path":"python3/multi-threading/daemonVsnonDaemonThread.py","file_name":"daemonVsnonDaemonThread.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"364290097","text":"from django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect\n#from models import activate_account, get_activity_by_hash, get_user_by_hash, get_user_by_id\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.core.mail import send_mail, EmailMessage, EmailMultiAlternatives\nfrom models import Match, Landing\nimport hashlib\nimport base64\n# import random\n\ndef index_landing(request):\n return render_to_response(\n 'landing.html', {},\n context_instance=RequestContext(request)\n )\ndef landing_privacy(request):\n return render_to_response(\n 'privacy.html', {},\n context_instance=RequestContext(request)\n )\ndef landing_terms(request):\n return render_to_response(\n 'terms.html', {},\n context_instance=RequestContext(request)\n )\ndef landing_contact(request):\n return render_to_response(\n 'contact.html', {},\n context_instance=RequestContext(request)\n )\n\ndef index_fb(request):\n return render_to_response(\n 'fbcanvas.html', {},\n context_instance=RequestContext(request)\n )\n\ndef index_feedback(request):\n return render_to_response(\n 'feedback.html', {},\n context_instance=RequestContext(request)\n )\n \ndef index_otago_comp(request):\n return render_to_response(\n 'misc/otago-comp.html', {},\n context_instance=RequestContext(request)\n )\n\n# def get_fb_profile_pic_link(fb_userid):\n# return 'http://graph.facebook.com/%s/picture' % fb_userid\n\n# def get_fb_profile_link(fb_userid):\n# return '//www.facebook.com/' + fb_userid\n\n# def _get_others(length):\n# others = None\n# if length == 8:\n# others = 'and 1 other'\n# if length >= 9:\n# others = 'and %s others' % str(length - 7)\n# return others\n\n# def _get_keens_others(keens, first=0):\n# keeners = [{'pic_link' : get_fb_profile_pic_link(keen['user_id']), 'profile_link' : get_fb_profile_link(keen['fb_username'])} for keen in keens]\n# keens = [keeners[first]]\n# del keeners[first]\n# random.shuffle(keeners)\n# keens.extend(keeners)\n# return keens[:7], _get_others(len(keens)) \n\n# def _get_activity_html(text, keyword):\n# return text.replace('#'+keyword, '#'+keyword+'')\n\n# def activity_invite(request, first_name, hash):\n# activity = get_activity_by_hash(hash)\n# if not activity:\n# return HttpResponseNotFound()\n# user = get_user_by_id(activity['suggester_id'])\n# keeners, others = _get_keens_others(activity['keens'])\n# vars_ = {\n# 'STATIC_DIR' : '/static/invite_activate',\n# 'name':user['first_name'][:9],\n# 'message' : _get_activity_html(activity['text'], activity['keyword']),\n# 'interests' : user['interests'],\n# 'keeners' : keeners,\n# 'others' : others,\n# 'behalf' : None\n# }\n# return render_to_response('invite_activate/activity.html', vars_)\n\n# def activity_behalf_invite(request, first_name, hash, keen_index):\n# activity = get_activity_by_hash(hash)\n# if not activity:\n# return HttpResponseNotFound()\n# behalf = get_user_by_id(activity['suggester_id'])\n# behalf = behalf['first_name']\n# keens = activity['keens']\n# keen_index = int(keen_index)\n# keener_id = keens[keen_index].get('user_id')\n# keener = get_user_by_id(keener_id)\n# keeners, others = _get_keens_others(activity['keens'], keen_index)\n# vars_ = {\n# 'STATIC_DIR' : '/static/invite_activate',\n# 'name': keener['first_name'][:9],\n# 'message' : _get_activity_html(activity['text'], activity['keyword']),\n# 'keeners' : keeners,\n# 'others' : others,\n# 'behalf' : behalf\n# }\n# return render_to_response('invite_activate/activity.html', vars_)\n\n# def user_invite(request, first_name, hash):\n# user = get_user_by_hash(hash)\n# if not user:\n# return HttpResponseNotFound()\n# return render_to_response('invite_activate/user.html', {'STATIC_DIR' : '/static/invite_activate', 'name':user['first_name'], 'interests' : user['interests']})\n\n# def activation(request, hash):\n# user = activate_account(hash)\n# if not user:\n# return HttpResponseNotFound()\n# return render_to_response('invite_activate/activation.html', {'STATIC_DIR' : '/static/invite_activate', 'name':user['first_name']})\n\n\n# methods for returning demo mobclient to webclient\ndef webclient(request):\n return render_to_response('webclient/index.html')\ndef webclient_static(request, path='', rpath=''):\n if path:\n abspath = open('webclient/static/webclient/' + path,'r')\n mimetype = 'application/x-javascript'\n elif rpath:\n abspath = open('webclient/static/webclient/resources/' + rpath,'r')\n if rpath.endswith('css'):\n mimetype = 'text/css'\n elif rpath.endswith('png'):\n mimetype = 'image/png'\n elif rpath.endswith('jpg'):\n mimetype = 'image/jpeg'\n \n response = HttpResponse(content=abspath.read())\n response['Content-Type'] = mimetype\n return response\n \ndef w(request):\n return render_to_response('w/index.html')\ndef w_static(request, path='', rpath=''):\n if path:\n #abspath = open('D:/Projects/keenpin/webclient2/webclient/templates/w/' + path,'r')\n abspath = open('/srv/www/keen.to/current/webclient/templates/w/' + path,'r')\n mimetype = 'application/x-javascript'\n elif rpath:\n #abspath = open('D:/Projects/keenpin/webclient2/webclient/templates/w/resources/' + rpath,'r')\n abspath = open('/srv/www/keen.to/current/webclient/templates/w/resources/' + rpath,'r')\n if rpath.endswith('css'):\n mimetype = 'text/css'\n elif rpath.endswith('png'):\n mimetype = 'image/png'\n elif rpath.endswith('jpg'):\n mimetype = 'image/jpeg'\n elif rpath.endswith('ttf'):\n mimetype = 'application/octet-stream'\n\n response = HttpResponse(content=abspath.read())\n response['Content-Type'] = mimetype\n return response\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[-1].strip()\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\noptions = [\n 'Coffee @ Midnight Espresso',\n 'Soda @ Six Barrel Soda',\n 'Coffee @ Fidels',\n 'Lunch @ Dixon St Deli',\n 'Lunch @ Viva Mexico',\n 'Dinner @ Sweet Mothers Kitchen',\n 'Dinner @ Laundry',\n 'Dinner @ Monsoon Poon',\n 'Drinks @ Crumpet',\n 'Drinks @ Cigars, Gin and Rum Merchant & Co',\n 'Drinks @ Hawthorn Lounge',\n 'DTF'\n ]\n\ndef match_landing(request):\n \"\"\"\n Returns the matching landing page\n \"\"\"\n landings = list(Landing.objects.all())\n if len(landings) == 0:\n landing = Landing()\n else:\n landing = landings[0]\n landing.views.append(get_client_ip(request))\n landing.save()\n \n return render_to_response('match_landing.html', {'choices' : options}, context_instance=RequestContext(request))\n\ndef match_post_email(request, hash_):\n \n email = request.POST.get('email')\n match = Match.objects.get(hash = hash_)\n match.email_1 = email\n match.save()\n return render_to_response('match_post.html', {'link' : 'http://keen.to/match/' + hash_, 'hash' : hash_, 'showform' : False}, context_instance=RequestContext(request))\n\ndef match_post(request):\n \"\"\"\n Form posted to this url, creates the match and returns a link to send the next one to\n \"\"\"\n if request.method != 'POST':\n return HttpResponseRedirect(\"http://keen.to/match\")\n chosen = request.POST.getlist('chosen[]')\n email = request.POST.get('email')\n suggested = []\n for c in chosen:\n if c not in options:\n suggested.append(c)\n new_match = Match(email_1 = email, chose_1 = chosen, views=[get_client_ip(request)], suggested = suggested)\n new_match.save()\n id_ = new_match.id\n hasher = hashlib.sha1(str(id_))\n hash_ = base64.urlsafe_b64encode(hasher.digest())[:5]\n new_match.hash = hash_\n new_match.save()\n #Email them the number?\n return render_to_response('match_post.html', {'link' : 'http://keen.to/match/' + hash_, 'hash' : hash_, 'showform' : True}, context_instance=RequestContext(request))\n \ndef send_email(email_address, link):\n subject = 'We know what you guys should do!'\n body = 'Hell yeah, your tinder babe has replied! Follow the link to see what you guys should do %s' % link\n msg = EmailMessage(subject=subject, body=body,from_email='no-reply@keen.to', to=[email_address])\n msg.send()\n\ndef match_reply(request, hash_):\n \n match = Match.objects.get(hash = hash_)\n new_options = match.suggested + options\n matches = []\n if request.method == 'POST':\n chosen = request.POST.getlist('chosen[]')\n email = request.POST.get('email')\n match.chose_2 = chosen\n match.email_2 = email\n match.submit_2 = True\n try:\n send_email(match.email_1, 'http://keen.to/match/' + hash_)\n except:\n pass\n \n matches = list(set(match.chose_1) & set(match.chose_2))\n \n match.views.append(get_client_ip(request))\n match.save()\n \n return render_to_response('match_reply.html', {'choices' : new_options, 'matches' : matches, 'showform' : not match.submit_2}, context_instance=RequestContext(request))\n \n\n# def index_clubs(request):\n# return render_to_response('landing/index_clubs.html', {'STATIC_DIR' : '/static/landing/'}, context_instance=RequestContext(request))\n\n# def clubs(request):\n# return render_to_response('clubs/index.html', {'STATIC_DIR' : '/static/clubs/'}, context_instance=RequestContext(request))\n\n","sub_path":"resources/code/webclient/webclient/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"577576006","text":"import npyscreen\nfrom reprodutor import Reprodutor\nimport threading\n\n\nclass App(npyscreen.NPSAppManaged):\n\t'''\n\tEsta é a classe que vai gerenciar toda nossa aplicação\n\t'''\n\n\tdef onStart(self):\n\t\t'''\n\t\tEsta função é chamada quando o app começa a rodar\n\t\t'''\n\t\t# Registra um formulario\n\t\tself.registerForm('MAIN', Screen1())\n\n\nclass Screen1(npyscreen.Form):\n\t'''\n\tEste é um formulario que sera a tela base do nosso app\n\t'''\n\n\tpausado = True\n\n\tdef create(self):\n\t\t'''\n\t\tEsta função é chamada quando o formulario é criado\n\t\tela servirar para construir todos os widgets filhos\n\t\t'''\n\t\t\n\t\t# Variavel que vai armazenar uma Thread\n\t\tself.t = False\n\n\t\tself.rep = Reprodutor()\n\t\tself.rep.buscar_musicas()\n\t\tself.rep.metodo_customizado = self.atualizar_progresso\n\n\t\t# Adiciona dois widgets para mensagens\n\t\tself.add_widget(npyscreen.TitleText, name='Mensagem: ', value='Boas vindas', editable=False)\n\t\tself.add_widget(npyscreen.TitleText, name='Sobre: ', value='Este é um simples reprodutor de musica feito em python', editable=False)\n\t\t\n\t\t# Guarda as musicas encontradas pelo reprodutor\n\t\tself.selecao = []\n\n\t\tfor key, value in self.rep.buscador.data['archives'].items():\n\t\t\t\n\t\t\tfor musica in value:\n\n\t\t\t\tself.selecao.append(musica)\n\t\t# Este é um widget que é tipo uma caixa em quê o usuario vai selecionar a musica desejada\n\t\tself.musica = self.add_widget(npyscreen.TitleMultiLine, name='Musicas: ', values=self.selecao, max_height=20)\n\t\t# Um botão usado para reproduzir a musica selecionada\n\t\tself.confirmar = self.add_widget(npyscreen.ButtonPress, name='Reproduzir', relx=50)\n\t\t# Armazena a função que vai ser chamada quando o usuario pressionar o botão\n\t\tself.confirmar.whenPressed = self.reproduzir\n\t\t# Botão para dar pause/play na musica\n\t\tself.pausa_play = self.add_widget(npyscreen.ButtonPress, name='Pausar', relx=70, rely=-16)\n\t\t# Faz um bind para a função pausar_reproduzir\n\t\t# Este botão pausa e continua a reprodução da musica atual\n\t\tself.pausa_play.whenPressed = self.pausar_reproduzir\n\t\t# Cria um widget do tipo slider\n\t\tself.progresso = self.add_widget(npyscreen.Slider, label=False, out_of=100, lowest=0, width=80, relx=30, rely=27)\n\t\t# Função para alterar o tempo da musica.\n\t\tself.progresso.when_value_edited = self.alterar_tempo\n\n\tdef reproduzir(self, continuar=False):\n\t\t''''\n\t\tO parametro continuar diz se é para reproduzir a mesma musica do local onde parou\n\t\t'''\n\n\t\t# Troca o estado atual da musica\n\t\tif not continuar:\n\n\t\t\tself.pausado = False\n\t\t\n\t\t# Pega o valor que o usuario selecionou\n\t\tself.musica_selecionada = self.selecao[self.musica.value]\n\t\t# Passa o arquivo e o formato do arquivo para o reprodutor\n\t\tself.rep.selecionar_arquivo(arquivo=self.musica_selecionada, formato=self.musica_selecionada.split('.')[len(self.musica_selecionada.split('.')) - 1])\n\t\t# Diz pro reprodutor para a execução de qualquer musica no momento\n\t\tself.rep.reproduzindo = False\n\t\t\n\t\t# Verifica se ha alguma Thread ativa\n\t\tif self.t:\n\t\t\t# Espera o termino da Thread\n\t\t\tself.t.join()\n\t\t# Inicia uma nova Thread para reproduzir a musica\n\t\tself.t = threading.Thread(target=self.rep.reproduzir, args=(continuar,))\n\t\tself.t.start()\n\n\tdef atualizar_progresso(self, progresso):\n\t\t'''\n\t\t\tEsta função atualiza o tempo da barra conforme a musica passa.\n\t\t'''\n\n\t\tself.progresso.value = progresso\n\t\tself.progresso.display()\n\n\tdef alterar_tempo(self):\n\n\t\ttry:\n\t\t\t# Pega o valor do widget slide que controla o tempo da musica e transforma ele em um valor compativel com o tempo da musica\n\t\t\tnovo_progresso = (self.progresso.value / 100) * len(self.rep.arquivo)\n\t\t\t# Para a reprodução\n\t\t\tself.rep.reproduzindo = False\n\t\t\t# Espera a thread terminar\n\t\t\tself.t.join()\n\t\t\t# Salva o novo progresso\n\t\t\tself.rep.progresso = int(novo_progresso)\n\t\t\t# Muda o botão de pausa/play para play\n\t\t\tself.pausa_play.name = 'Play'\n\t\t\t# Atualiza o widget\n\t\t\tself.pausa_play.update()\n\t\t\t# Diz que o estado do reprodutor é pausado\n\t\t\tself.pausado = True\n\n\t\texcept:\n\n\t\t\tpass\n\n\tdef pausar_reproduzir(self):\n\t\t\n\t\t# Verifica se a musica esta pausada\n\t\tif self.pausado:\n\t\t\t\n\t\t\t# Chama a função para voltar a reproduzir\n\t\t\tself.reproduzir(True)\n\t\t\t# Altera o estado da musica\n\t\t\tself.pausado = False\n\t\t\t# Alterar o name do botao de pausa/play\n\t\t\tself.pausa_play.name = 'Pausar'\n\t\t\t# Atualiza o botão\n\t\t\tself.pausa_play.update()\n\n\t\telse:\n\t\t\t\n\t\t\t# Para a reprodução\n\t\t\tself.rep.reproduzindo = False\n\t\t\t# Espera a Thread terminar\n\t\t\tself.t.join()\n\t\t\t# Altera o estado da musica\n\t\t\tself.pausado = True\n\t\t\t# Altera o nome do botão pausa/play\n\t\t\tself.pausa_play.name = 'Play'\n\t\t\t# Atualiza o botão\n\t\t\tself.pausa_play.update()\n\n\tdef afterEditing(self):\n\t\t\n\t\t# Usado para fecha o reprodutor quando fecha o App\n\t\tself.rep.reproduzindo = False\n\t\tself.parentApp.setNextForm(None)\n\n\nif __name__ == '__main__':\n\n\tapp = App()\n\tapp.run()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"398225967","text":"import os\nimport re\nfrom joblib import Parallel, delayed\nimport logging\nimport math\nfrom collections import defaultdict\n\n\ndef ensure_dir(directory):\n\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError as e:\n # Raising any errors except concurrent access\n if e.errno != 17:\n raise\n\n\ndef remove_abs(path):\n\n if os.path.isabs(path):\n return path.lstrip(\"/\")\n else:\n return path\n\n\ndef get_other_extension(filename, target_extension):\n\n basename, extension = os.path.splitext(filename)\n\n return \"{0}.{1}\".format(basename, target_extension)\n\n\ndef _get_token_count(file_path, lowercase=None, replace_digits=None):\n\n token_count = defaultdict(int)\n\n with open(file_path, \"r\", encoding=\"UTF-8\") as input_file:\n for line in input_file:\n if re.match(\"^$\", line):\n continue\n\n line_str = line.rstrip(\"\\n\")\n\n if lowercase:\n line_str = line_str.lower()\n\n if replace_digits:\n line_str = re.sub(\"\\d\", \"0\", line_str)\n\n tokens = line_str.split(\" \")\n\n for token in tokens:\n token_count[token] += 1\n\n return token_count\n\n\ndef regroup_files(input_path, output_path, nb_output_files=100, n_jobs=1):\n\n logging.info(\"Fetching number of lines\")\n nb_files, nb_lines = _fetch_number_lines(input_path, n_jobs=n_jobs)\n logging.info(\"* nb. lines: {:,}\".format(nb_lines))\n\n logging.info(\"Creating chunks\")\n packets = _chunk_list(nb_lines, nb_output_files)\n\n line_counter = 0\n file_counter = 0\n\n progress_counter = 0\n display_every_n = math.ceil(nb_files * 0.05)\n\n logging.info(\"Processing files\")\n\n target_file = os.path.join(os.path.abspath(output_path), \"{:05d}.txt\".format(\n file_counter + 1\n ))\n\n current_output_file = open(target_file, \"w\", encoding=\"UTF-8\")\n\n for root, dirs, files in os.walk(os.path.abspath(input_path)):\n for filename in files:\n progress_counter += 1\n\n with open(os.path.join(root, filename), \"r\", encoding=\"UTF-8\") as input_file:\n for line in input_file:\n if re.match(\"^$\", line):\n continue\n\n if line_counter >= packets[file_counter]:\n line_counter = 0\n file_counter += 1\n\n target_file = os.path.join(os.path.abspath(output_path), \"{:05d}.txt\".format(\n file_counter + 1\n ))\n\n current_output_file.close()\n current_output_file = open(target_file, \"w\", encoding=\"UTF-8\")\n\n current_output_file.write(line)\n\n line_counter += 1\n\n cur_percentage = (float(progress_counter) / nb_files) * 100\n if progress_counter % display_every_n == 0 or cur_percentage >= 100:\n logging.info(\"* ({:5.2f}%), processed={}\".format(\n round(cur_percentage, 2),\n progress_counter\n ))\n\n\ndef _fetch_number_lines(input_path, n_jobs=1):\n\n processing_list = list()\n\n for root, dirs, files in os.walk(os.path.abspath(input_path)):\n for filename in files:\n processing_list.append(os.path.join(root, filename))\n\n results = Parallel(n_jobs=n_jobs)(delayed(_fetch_number_lines_one_file)(input_file)\n for input_file in processing_list)\n\n return len(processing_list), sum(results)\n\n\ndef _fetch_number_lines_one_file(input_file):\n\n nb_lines = 0\n\n with open(input_file, \"r\", encoding=\"UTF-8\") as input_file:\n for line in input_file:\n if re.match(\"^$\", line):\n continue\n\n nb_lines += 1\n\n return nb_lines\n\n\ndef _chunk_list(nb_lines, nb_parts):\n \"\"\"\n Divide a list into chunk of equal size\n :param the_list: list to chunk\n :param nb_parts: number of chunks\n :return: list of lists\n \"\"\"\n\n division = nb_lines / nb_parts\n temp_list = range(nb_lines)\n\n return [len(temp_list[round(division * i):round(division * (i + 1))]) for i in range(nb_parts)]\n","sub_path":"embed/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"615018880","text":"### Reverse polish notation calculator ###\n### Syntax:\n### ,\n### Example 1:\n### 5,3+\n### evaluates to 8\n### Example 2:\n### 1,2.5*3,4*+\n### evaluates to 14.5\nimport sys\n\n## Get File Input ##\nif len(sys.argv) != 2:\n raise Exception(\"incorrect number of arguments\")\n\nwith open(sys.argv[1]) as f:\n input_str = f.read()\n\n## Remove Invalid Characters ##\ndef is_numeric_char(char):\n return char.isdigit() or (char == \".\")\n\noperators = \"+-*/\"\nexpression = \"\"\nfor c in input_str:\n if is_numeric_char(c) or (c == \",\") or (c in operators):\n expression += c\nprint(\"Expression:\\n\" + expression + \"\\n\")\n\n## Perform Operations ##\nstack = []\ncurr_num = \"\"\nfor c in expression:\n print(\"Character: \" + c)\n if is_numeric_char(c):\n print(\"NUMERIC\")\n curr_num += c\n elif c == \",\":\n print(\"PUSH NUMBER\")\n print(\"curr_num = \" + curr_num)\n stack.append(float(curr_num))\n curr_num = \"\"\n elif c in operators:\n if curr_num != \"\":\n stack.append(float(curr_num))\n curr_num = \"\"\n print(\"POP WITH OPERATOR \" + c)\n first = stack.pop()\n second = stack.pop()\n if c == \"+\":\n stack.append(first + second)\n elif c == \"-\":\n stack.append(first - second)\n elif c == \"*\":\n stack.append(first * second)\n elif c == \"/\":\n stack.append(first / second)\n else:\n raise Exception(\"wat\")\n print(\"Stack: \" + str(stack) + \"\\n\")\n\n## Show Final Answer ##\nprint(\"Answer:\\n\" + str(stack.pop()))","sub_path":"rpn_calc.py","file_name":"rpn_calc.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"333742351","text":"def triangle (num):\n if num % 2 == 0:\n num += 1\n\n space = num // 2 + 1\n diamond_core = (num / 2) - 0.5\n j = num // 2 + 1\n\n for i in range (0, num):\n if i < diamond_core:\n i += 1\n space -= 1\n print ((space*' ') + (i*'*') + ((i-1)*'*'))\n elif i == diamond_core:\n i += 1\n space -= 1\n print ((space*' ') + (i*'*') + ((i-1)*'*'))\n else:\n space += 1\n j -= 1\n print ((space*' ') + (j*'*') + ((j-1)*'*'))\n\ndiamond = 10\ntriangle(diamond)\n","sub_path":"week-03/day-3/weekend/tibi.py","file_name":"tibi.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"584640450","text":"\nimport torch\nimport torch.nn as nn\n\n\nclass ConfL1Loss(nn.Module):\n def __init__(self, args):\n super(ConfL1Loss, self).__init__()\n\n self.args = args\n self.t_valid = 0.0001\n\n def forward(self, pred, gt, cout):\n gt = torch.clamp(gt, min=0, max=self.args.max_depth)\n pred = torch.clamp(pred, min=0, max=self.args.max_depth)\n cout = torch.clamp(cout, min=0, max=self.args.max_depth)\n\n mask = (gt > self.t_valid).type_as(pred).detach()\n\n d = (1/cout * torch.abs(pred - gt) - torch.log(cout)) * mask\n\n d = torch.sum(d, dim=[1, 2, 3])\n num_valid = torch.sum(mask, dim=[1, 2, 3])\n\n loss = d / (num_valid + 1e-8)\n\n loss = loss.sum()\n\n return loss\n","sub_path":"src/loss/submodule/confl1loss.py","file_name":"confl1loss.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"194136921","text":"# -*- coding: utf-8 -*-\n\nimport pdb # Python Debugger\nimport sys # system related stuff\nimport os # os related stuff\nimport pprint # print stuff\nimport random # for random user selection\nimport gevent # syncrhonous loop api\nfrom gevent import monkey # for monkey patching\nmonkey.patch_all() # make sure to monkey patch before importing praw\n# But at least it makes a suggestion - try doing the monkey patching before you import praw/anything else (would be a stupid workaround, but it might work)\n# https://github.com/kennethreitz/requests/issues/3752\n\nimport time # for sleepin'\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport praw # for reddit API\nimport xmltodict # xml to json | fast as well :)\nimport json # for parsing json\nimport datetime # time of the week\nimport urllib.request # for more HTTP requests\nimport argparse # for parsing arguments\n\nparser = argparse.ArgumentParser(description='MALbot automatically updates a Reddit post to feature a daily overview of MyAnimeList.net')\nparser.add_argument('--once', action='store_true', help='Only execute once.')\narguments = parser.parse_args()\n\nd = datetime.datetime.now() # set current date as d to check weekday\n\ndef scheduled_job(): # will call at the bottom\n str(datetime.datetime.now()) # output datetime so we can know.\n #if d.isoweekday() == 3: # 1 is monday | 7 is sunday ()\n\n reddit = praw.Reddit(\n user_agent='MyAnimeList Daily Bot v0.1',\n client_id='kBddA1U8dPkUtA',\n client_secret='SVFGuKd6hgpz2_X9UodRzjgpYvs',\n username=os.environ['REDDIT_USERNAME'],\n password=os.environ['REDDIT_PASSWORD'])\n \n subreddit = reddit.subreddit('MyAnimeList') # testing subreddit\n\n post = reddit.submission(id='6d5wyk') # A post on /r/malbottesting\n\n # Top Anime\n request = urllib.request.Request(\"https://www.matomari.tk/api/0.4/methods/anime.top.php\", headers={'User-Agent': \"Magic Browser\"})\n connection = urllib.request.urlopen(request);\n anime_top_response_raw = connection.read()\n anime_top_response = json.loads(anime_top_response_raw.decode('utf-8'))\n\n # First random\n request = urllib.request.Request(\"https://www.matomari.tk/api/0.4/methods/anime.random.php\", headers={'User-Agent': 'Magic Browser'})\n connection = urllib.request.urlopen(request)\n anime_random1_response_raw = connection.read();\n print(anime_random1_response_raw)\n anime_random1_response = json.loads(anime_random1_response_raw.decode('utf-8'))\n anime_random1_id = anime_random1_response[\"id\"]\n\n # Second random\n request = urllib.request.Request(\"https://www.matomari.tk/api/0.4/methods/anime.random.php?not=\" + str(anime_random1_id), headers={'User-Agent': 'Magic Browser'})\n connection = urllib.request.urlopen(request)\n anime_random2_response_raw = connection.read();\n print(anime_random2_response_raw)\n anime_random2_response = json.loads(anime_random2_response_raw.decode('utf-8'))\n anime_random2_id = anime_random2_response[\"id\"]\n\n # Third random\n request = urllib.request.Request(\"https://www.matomari.tk/api/0.4/methods/anime.random.php?not=\" + str(anime_random1_id) + \",\" + str(anime_random2_id), headers={'User-Agent': 'Magic Browser'})\n connection = urllib.request.urlopen(request)\n anime_random3_response_raw = connection.read();\n print(anime_random3_response_raw)\n anime_random3_response = json.loads(anime_random3_response_raw.decode('utf-8'))\n anime_random3_id = anime_random3_response[\"id\"]\n\n # Fourth random\n request = urllib.request.Request(\"https://www.matomari.tk/api/0.4/methods/anime.random.php?not=\" + str(anime_random1_id) + \",\" + str(anime_random2_id) + \",\" + str(anime_random3_id), headers={'User-Agent': 'Magic Browser'})\n connection = urllib.request.urlopen(request)\n anime_random4_response_raw = connection.read();\n print(anime_random4_response_raw)\n anime_random4_response = json.loads(anime_random4_response_raw.decode('utf-8'))\n anime_random4_id = anime_random4_response[\"id\"]\n\n # Fifth random\n print(\"https://www.matomari.tk/api/0.4/methods/anime.random.php?not=\" + str(anime_random1_id) + \",\" + str(anime_random2_id) + \",\" + str(anime_random3_id) + \",\" + str(anime_random4_id))\n request = urllib.request.Request(\"https://www.matomari.tk/api/0.4/methods/anime.random.php?not=\" + str(anime_random1_id) + \",\" + str(anime_random2_id) + \",\" + str(anime_random3_id) + \",\" + str(anime_random4_id), headers={'User-Agent': 'Magic Browser'})\n connection = urllib.request.urlopen(request)\n anime_random5_response_raw = connection.read();\n print(anime_random5_response_raw)\n anime_random5_response = json.loads(anime_random5_response_raw.decode('utf-8'))\n\n\n print(\"Grabbing random person...\")\n # Grab random person with flair\n flairs = []\n for flair in subreddit.flair():\n flairs.append(flair)\n\n chosen_flair = random.choice(flairs);\n chosen_mal_username = chosen_flair[\"flair_text\"].split(\"/\")[-1]\n chosen_mal_username = chosen_mal_username.split(\"?\")[0].split(\"#\")[0]\n\n print(\"Chosen username:\")\n print(chosen_mal_username)\n\n def checkProfileExists(username):\n try:\n print(\"Checking if profile exists...\")\n request = urllib.request.Request(\"https://www.matomari.tk/api/0.4/methods/user.info.USERNAME.php?username=\" + username, headers={'User-Agent': 'Magic Browser'})\n connection = urllib.request.urlopen(request) # No need to store response, it's just for testing connection\n except urllib.error.HTTPError as e:\n if e.code == 404: # Doesn't exist\n print(\"Username doesn't exist... trying new one:\")\n chosen_flair = random.choice(flairs) # New random\n chosen_mal_username = chosen_flair[\"flair_text\"].split(\"/\")[-1]\n chosen_mal_username = chosen_mal_username.split(\"?\")[0].split(\"#\")[-1]\n checkProfileExists(chosen_mal_username)\n else:\n print(\"user.info.USERNAME returned an error othan than 404!\")\n return;\n checkProfileExists(chosen_mal_username)\n\n endpoints = [\n \"https://www.matomari.tk/api/0.3/user/info/\" + chosen_mal_username + \".json\",\n \"https://www.matomari.tk/api/0.3/general/malappinfo.php?u=\" + chosen_mal_username + \"&type=anime&status=all\"\n ]\n\n user_response = {}\n\n def getInfoFromUsername(endpoint):\n print('Starting download from ' + endpoint)\n request = urllib.request.Request(endpoint, headers={'User-Agent': 'Magic Browser'})\n connection = urllib.request.urlopen(request)\n if(\"malappinfo\" in endpoint):\n user_list_response_raw = connection.read()\n user_response[\"user_list_response\"] = xmltodict.parse(user_list_response_raw)\n else:\n user_profile_response_raw = connection.read()\n user_response[\"user_profile_response\"] = json.loads(user_profile_response_raw.decode('utf-8'))\n \n jobs = [gevent.spawn(getInfoFromUsername, endpoint) for endpoint in endpoints]\n\n gevent.joinall(jobs)\n\n\n\n user_favourites = {}\n \n # for anime in user_profile_response[\"favourites\"][\"anime\"]:\n def favouriteToArray(id):\n print('Starting to download anime info: ' + id)\n request = urllib.request.Request(\"https://www.matomari.tk/api/0.4/methods/anime.info.ID.php?id=\" + id, headers={'User-Agent': 'Magic Browser'})\n connection = urllib.request.urlopen(request)\n user_favourite_response_raw = connection.read()\n user_favourites[id] = json.loads(user_favourite_response_raw.decode('utf8'));\n print(\"Beginning loop through user list to find anime with id: \" + id)\n for user_animeinfo in user_response[\"user_list_response\"][\"myanimelist\"][\"anime\"]:\n if(user_animeinfo[\"series_animedb_id\"] == id):\n user_favourites[id][\"user_score\"] = user_animeinfo[\"my_score\"]\n\n\n jobs = [gevent.spawn(favouriteToArray, animeid) for animeid in user_response[\"user_profile_response\"][\"favourites\"][\"anime\"]]\n\n gevent.joinall(jobs) # call all gvents\n\n favourite_arr = [] # Will fill up with markdown\n for key, favourite in user_favourites.items():\n favourite_arr.append(\n favourite[\"type\"] + \"\"\" | \"\"\" + str('{0:.2f}'.format(favourite[\"score\"])) + \"\"\" | \"\"\" + str('{0:.2f}'.format(int(favourite[\"user_score\"]))) + \"\"\" | [\"\"\" + favourite[\"title\"] + \"\"\"](\"\"\" + favourite[\"url\"] + \"\"\") \\n\"\"\"\n )\n\n favourite_str = \"*None* | *None* | *None*\"\n if(len(favourite_arr) != 0):\n favourite_str = ''.join(favourite_arr)\n\n\n\n\n # submit(title, selftext=None, url=None resubmit=True, send_replies=True)\n # edit(body)\n post.edit(\"\"\"#Here is today's overview of MyAnimeList!\n^^Generated ^^automatically ^^by ^^MAL-bot ^^at ^^\"\"\" + d.strftime('%Y-%m-%d') + \"\"\"\n\n---\n## Top 5 ranking anime\nRank | MAL Score | Title\n:--:|:--:|:--\n\"\"\" +\nstr(anime_top_response[\"items\"][0][\"rank\"]) + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_top_response[\"items\"][0][\"score\"])) + \"\"\" | [\"\"\" + anime_top_response[\"items\"][0][\"title\"] + \"\"\"](\"\"\" + anime_top_response[\"items\"][0][\"url\"] + \"\"\") \\n\"\"\" +\nstr(anime_top_response[\"items\"][1][\"rank\"]) + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_top_response[\"items\"][1][\"score\"])) + \"\"\" | [\"\"\" + anime_top_response[\"items\"][1][\"title\"] + \"\"\"](\"\"\" + anime_top_response[\"items\"][1][\"url\"] + \"\"\") \\n\"\"\" +\nstr(anime_top_response[\"items\"][2][\"rank\"]) + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_top_response[\"items\"][2][\"score\"])) + \"\"\" | [\"\"\" + anime_top_response[\"items\"][2][\"title\"] + \"\"\"](\"\"\" + anime_top_response[\"items\"][2][\"url\"] + \"\"\") \\n\"\"\" +\nstr(anime_top_response[\"items\"][3][\"rank\"]) + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_top_response[\"items\"][3][\"score\"])) + \"\"\" | [\"\"\" + anime_top_response[\"items\"][3][\"title\"] + \"\"\"](\"\"\" + anime_top_response[\"items\"][3][\"url\"] + \"\"\") \\n\"\"\" +\nstr(anime_top_response[\"items\"][4][\"rank\"]) + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_top_response[\"items\"][4][\"score\"])) + \"\"\" | [\"\"\" + anime_top_response[\"items\"][4][\"title\"] + \"\"\"](\"\"\" + anime_top_response[\"items\"][4][\"url\"] + \"\"\") \\n\"\"\" + \"\"\"\n\n---\n## 5 Random anime\nType | MAL Score | Title\n:--|:--:|:--\n\"\"\" +\nanime_random1_response[\"type\"] + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_random1_response[\"score\"])) + \"\"\" | [\"\"\" + anime_random1_response[\"title\"] + \"\"\"](\"\"\" + anime_random1_response[\"url\"] + \"\"\") \\n\"\"\" +\nanime_random2_response[\"type\"] + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_random2_response[\"score\"])) + \"\"\" | [\"\"\" + anime_random2_response[\"title\"] + \"\"\"](\"\"\" + anime_random2_response[\"url\"] + \"\"\") \\n\"\"\" +\nanime_random3_response[\"type\"] + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_random3_response[\"score\"])) + \"\"\" | [\"\"\" + anime_random3_response[\"title\"] + \"\"\"](\"\"\" + anime_random3_response[\"url\"] + \"\"\") \\n\"\"\" +\nanime_random4_response[\"type\"] + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_random4_response[\"score\"])) + \"\"\" | [\"\"\" + anime_random4_response[\"title\"] + \"\"\"](\"\"\" + anime_random4_response[\"url\"] + \"\"\") \\n\"\"\" +\nanime_random5_response[\"type\"] + \"\"\" | \"\"\" + str('{0:.2f}'.format(anime_random5_response[\"score\"])) + \"\"\" | [\"\"\" + anime_random5_response[\"title\"] + \"\"\"](\"\"\" + anime_random5_response[\"url\"] + \"\"\") \\n\"\"\" + \"\"\"\n\n---\nToday's random user is... \"\"\" + chosen_mal_username + \"\"\"!\n## [\"\"\" + chosen_mal_username + \"\"\"](https://myanimelist.net/profile/\"\"\" + chosen_mal_username + \"\"\")'s favourite anime\nType | MAL Score | \"\"\" + chosen_mal_username + \"\"\"'s Score | Title\n:--|:--:|:--:|:--\n\"\"\" + favourite_str)\n print(\"Done.\")\n\n# def end\n\n\nif arguments.once == True:\n scheduled_job()\nelse:\n scheduler = BlockingScheduler()\n\n scheduler.add_job(scheduled_job, 'interval', hours=24)\n scheduler.start()","sub_path":"MALbot/mybot.py","file_name":"mybot.py","file_ext":"py","file_size_in_byte":11545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"442070084","text":"# © 2020 Danimar Ribeiro, Trustcode\n# Part of Trustcode. See LICENSE file for full copyright and licensing details.\n\nimport os\n\nfrom lxml import objectify\nfrom mock import patch\n\nfrom odoo.tests.common import TransactionCase\n\nroot = os.path.dirname(__file__)\nxmls = os.path.join(root, 'xmls')\n\n\nclass TestDeliveryCorreios(TransactionCase):\n\n def setUp(self):\n super(TestDeliveryCorreios, self).setUp()\n correio = {\n 'name': 'Correio',\n 'correio_login': 'sigep',\n 'correio_password': 'n5f9t8',\n 'cod_administrativo': '08082650',\n 'num_contrato': '9912208555',\n 'cartao_postagem': '0057018901',\n 'delivery_type': 'correios',\n 'mao_propria': 'N',\n 'valor_declarado': False,\n 'aviso_recebimento': 'N',\n 'ambiente': 1,\n }\n self.delivery = self.env['delivery.carrier'].create(correio)\n self.servico = self.env['delivery.correios.service'].create({\n 'ano_assinatura': '2016',\n 'name': 'Serviço 1',\n 'code': '40215',\n 'identifier': 'foo bar baz',\n 'delivery_id': self.delivery.id,\n })\n self.delivery.write({\n 'service_id': self.servico.id,\n })\n partner = {\n 'name': 'Parceiro 1',\n 'company_type': 'person',\n 'cnpj_cpf': '515.741.801-93',\n 'zip': '27336-400',\n }\n self.partner = self.env['res.partner'].create(partner)\n self.company = self.env['res.company'].create({\n 'l10n_br_legal_name': 'Nome Legal',\n 'name': 'Company 1',\n 'cnpj_cpf': '1234567890123234',\n })\n product_uom = {\n 'name': 'UOM',\n 'category_id': self.env['product.uom.categ'].create(\n {'name': 'Unity'}).id,\n 'uom_type': 'reference',\n 'active': True,\n 'rounding': 0.00100,\n }\n self.product_uom = self.env['product.uom'].create(product_uom)\n produto = {\n 'name': 'Produto 1',\n 'weight': 10,\n 'comprimento': 20,\n 'altura': 20,\n 'largura': 20,\n 'list_price': 20,\n 'uom_id': self.product_uom.id,\n 'uom_po_id': self.product_uom.id,\n }\n self.produto = self.env['product.product'].create(produto)\n sale_order = {\n 'partner_id': self.partner.id,\n }\n self.sale_order = self.env['sale.order'].create(sale_order)\n sale_order_line = {\n 'product_id': self.produto.id,\n 'product_uom_qty': 2,\n 'product_uom': self.product_uom.id,\n 'order_id': self.sale_order.id,\n }\n self.sale_order_line =\\\n self.env['sale.order.line'].create(sale_order_line)\n self.sale_order.write({\n 'order_line': [(4, self.sale_order_line.id, 0)],\n })\n\n @patch('odoo.addons.delivery_correios.models.delivery.\\\ncheck_for_correio_error')\n @patch('odoo.addons.delivery_correios.models.delivery.\\\ncalcular_preco_prazo')\n def test_correios_get_shipping_price_from_so(self, preco, erro):\n calcular_preco_prazo = os.path.join(xmls, 'calcular_preco_prazo.xml')\n with open(calcular_preco_prazo, 'r') as correio_return_xml:\n preco.return_value = objectify.fromstring(\n correio_return_xml.read())\n erro.return_value = None\n entrega = self.env['delivery.carrier'].create({\n 'name': 'Metodo 1',\n 'delivery_type': 'correios',\n 'margin': 0,\n 'integration_level': 'rate_and_ship',\n 'correio_login': 'sigep',\n 'correio_password': 'n5f9t8',\n 'cod_administrativo': '08082650',\n 'num_contrato': '9912208555',\n 'cartao_postagem': '0057018901',\n 'ambiente': 1,\n })\n servico = self.env['delivery.correios.service'].create({\n 'ano_assinatura': '2016',\n 'name': 'Serviço 1',\n 'code': '40215',\n 'identifier': 'foo bar baz',\n 'delivery_id': entrega.id,\n })\n entrega.write({\n 'service_id': servico.id,\n })\n self.sale_order.write({\n 'carrier_id': entrega.id\n })\n preco = entrega.correios_get_shipping_price_from_so(\n self.sale_order)\n self.assertEqual(preco[0], 42.00)\n\n @patch('odoo.addons.delivery_correios.models.delivery.\\\ncheck_for_correio_error')\n @patch('odoo.addons.delivery_correios.models.delivery.busca_cliente')\n def test_action_get_correio_services(self, services, erro):\n # mock servicos\n busca_cliente = os.path.join(xmls, 'busca_cliente.xml')\n with open(busca_cliente, 'r') as correio_return_xml:\n services.return_value = objectify.fromstring(\n correio_return_xml.read())\n erro.return_value = None\n self.delivery.action_get_correio_services()\n servicos = self.env['delivery.correios.service'].search(\n [('code', '=', '40096')])\n self.assertTrue(len(servicos) == 1,\n \"Número de serviços: %d \" % len(servicos))\n\n @patch('odoo.addons.delivery_correios.models.delivery.\\\ncheck_for_correio_error')\n @patch('odoo.addons.delivery_correios.models.delivery.\\\ncalcular_preco_prazo')\n @patch('odoo.addons.delivery_correios.models.delivery.get_eventos')\n def test_correios_get_tracking_link(self, eventos, preco, erro):\n get_eventos = os.path.join(xmls, 'get_eventos.xml')\n calcular_preco_prazo = os.path.join(xmls, 'calcular_preco_prazo.xml')\n with open(get_eventos, 'r') as correios_eventos:\n eventos.return_value = objectify.fromstring(\n correios_eventos.read())\n with open(calcular_preco_prazo, 'r') as correio_return_xml:\n preco.return_value = objectify.fromstring(\n correio_return_xml.read())\n erro.return_value = None\n move_line = [(0, 0, {\n 'name': 'Move 1',\n 'product_id': self.produto.id,\n 'product_uom_qty': 1.0,\n 'product_uom': self.product_uom.id,\n 'state': 'draft',\n })]\n pack_operation = [(0, 0, {\n 'qty_done': 0,\n 'location_id': 1,\n 'location_dest_id': 1,\n 'product_id': self.produto.id,\n })]\n picking = self.env['stock.picking'].create({\n 'name': 'Picking 1',\n 'partner_id': self.partner.id,\n 'move_lines': move_line,\n 'location_id': 1,\n 'location_dest_id': 1,\n 'picking_type_id': 1,\n 'pack_operation_product_ids': pack_operation,\n })\n entrega = self.env['delivery.carrier'].create({\n 'name': 'Metodo 1',\n 'delivery_type': 'correios',\n 'margin': 0,\n 'integration_level': 'rate_and_ship',\n 'correio_login': 'sigep',\n 'correio_password': 'n5f9t8',\n 'cod_administrativo': '08082650',\n 'num_contrato': '9912208555',\n 'cartao_postagem': '0057018901',\n 'ambiente': 1,\n })\n servico = self.env['delivery.correios.service'].create({\n 'ano_assinatura': '2016',\n 'name': 'Serviço 1',\n 'code': '40215',\n 'identifier': 'foo bar baz',\n 'delivery_id': entrega.id,\n })\n entrega.write({\n 'service_id': servico.id,\n })\n self.sale_order.write({\n 'carrier_id': entrega.id\n })\n tracks_link = entrega.correios_get_tracking_link(picking)\n evento = self.env['delivery.correios.postagem.eventos'].search([])\n self.assertEqual(1, len(evento))\n self.assertEqual(evento.etiqueta, u'JF598971235BR')\n self.assertEqual(evento.data, u'2014-03-18')\n self.assertEqual(evento.status, u'23')\n self.assertEqual(evento.local_origem,\n u'CTCE MACEIO - 57060971, MACEIO/AL')\n self.assertFalse(evento.local_destino)\n self.assertEqual(\n tracks_link,\n ['/web#min=1&limit=80&view_type=list&model=delivery.correios.\\\npostagem.plp&action=396'])\n\n @patch('odoo.addons.delivery_correios.models.delivery.\\\ncheck_for_correio_error')\n @patch('odoo.addons.delivery_correios.models.delivery.\\\ncalcular_preco_prazo')\n @patch('odoo.addons.delivery_correios.models.delivery.\\\nsolicita_etiquetas_com_dv')\n def test_correios_send_shipping(self, etiquetas, preco, erro):\n calcular_preco_prazo = os.path.join(xmls, 'calcular_preco_prazo.xml')\n with open(calcular_preco_prazo, 'r') as correio_return_xml:\n preco.return_value = objectify.fromstring(\n correio_return_xml.read())\n etiquetas.return_value = ['DL760237272BR']\n erro.return_value = None\n move_line = [(0, 0, {\n 'name': 'Move 1',\n 'product_id': self.produto.id,\n 'product_uom_qty': 1.0,\n 'product_uom': self.product_uom.id,\n 'state': 'draft',\n })]\n pack_operation = [(0, 0, {\n 'qty_done': 0,\n 'product_qty': 1,\n 'location_id': 1,\n 'location_dest_id': 1,\n 'product_id': self.produto.id,\n })]\n picking = self.env['stock.picking'].create({\n 'name': 'Picking 1',\n 'partner_id': self.partner.id,\n 'move_lines': move_line,\n 'location_id': 1,\n 'location_dest_id': 1,\n 'picking_type_id': 1,\n 'pack_operation_product_ids': pack_operation,\n })\n entrega = self.env['delivery.carrier'].create({\n 'name': 'Metodo 1',\n 'delivery_type': 'correios',\n 'margin': 0,\n 'integration_level': 'rate_and_ship',\n 'correio_login': 'sigep',\n 'correio_password': 'n5f9t8',\n 'cod_administrativo': '08082650',\n 'num_contrato': '9912208555',\n 'cartao_postagem': '0057018901',\n 'ambiente': 1,\n })\n servico = self.env['delivery.correios.service'].create({\n 'ano_assinatura': '2016',\n 'name': 'Serviço 1',\n 'code': '40215',\n 'identifier': 'foo bar baz',\n 'delivery_id': entrega.id,\n })\n entrega.write({\n 'service_id': servico.id,\n })\n self.sale_order.write({\n 'carrier_id': entrega.id\n })\n send = entrega.correios_send_shipping(picking)\n self.assertTrue(len(send) == 1)\n exact_price = send[0]['exact_price']\n track_ref = send[0]['tracking_number']\n self.assertEqual(track_ref, 'DL760237272BR')\n self.assertEqual(exact_price, 42)\n postagem = self.env['delivery.correios.postagem.objeto'].search([])\n self.assertEqual(1, len(postagem))\n self.assertEqual(postagem.name, 'DL760237272BR')\n plp = self.env['delivery.correios.postagem.plp'].search([])\n self.assertEqual(1, len(plp))\n self.assertEqual(plp.state, 'draft')\n self.assertEqual(plp.total_value, 42)\n","sub_path":"delivery_correios/tests/test_delivery.py","file_name":"test_delivery.py","file_ext":"py","file_size_in_byte":11260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"560736115","text":"from flask import Flask, render_template, redirect, request, flash, url_for\nfrom forms import EnterDict\nimport os\nfrom helper_func import *\n\n\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\nSECRET_KEY = os.urandom(32)\napp.config['SECRET_KEY'] = SECRET_KEY\n\n# route for homepage\n\n\n@app.route('/', methods=('GET', 'POST'))\ndef index():\n form = EnterDict()\n if form.validate_on_submit():\n # flash('You have been logged in!', 'success')\n return redirect(url_for('search', movie_name=form.body.data, page=1))\n daily_quote, author = get_quote()\n return render_template('index.html', title='Movie finder', form=form, daily_quote=daily_quote, author=author)\n\n# route for searching movie\n\n\n@app.route('/search//')\ndef search(movie_name, page):\n flash('Here is your results', 'success')\n datas = get_movie(movie_name, page)\n return render_template('search_results.html', title=f'Searched for {movie_name}',\n datas=datas, page=[page, page - 1, page + 1], movie_name=movie_name)\n\n# route for showing specific movie\n\n\n@app.route('/show/')\ndef show(movie_id):\n flash('Here is your results', 'success')\n data = get_movie(movie_id)\n return render_template('search_movie.html', data=data)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"Flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"323312229","text":"'''\nCC:\n\n1) https://www.geeksforgeeks.org/rotate-doubly-linked-list-n-nodes/\n'''\n#solution\nclass Node: \n def __init__(self, next = None, \n prev = None, data = None): \n self.next = next \n self.prev = prev \n self.data = data \n \ndef push(head, new_data): \n \n new_node = Node(data = new_data) \n \n new_node.next = head \n new_node.prev = None\n \n if head is not None: \n head.prev = new_node \n \n head = new_node \n return head \n \ndef printList(head): \n \n node = head \n \n print(\"Given linked list\") \n while(node is not None): \n print(node.data, end = \" \"), \n last = node \n node = node.next\n \ndef rotate(start, N): \n if N == 0 : \n return\n\n current = start \n count = 1\n while count < N and current != None : \n current = current.next\n count += 1\n\n if current == None : \n return\n NthNode = current \n \n while current.next != None : \n current = current.next \n current.next = start \n start.prev = current \n start = NthNode.next\n start.prev = None\n NthNode.next = None\n \n return start\nif __name__ == \"__main__\": \n head = None\n \n head = push(head, 1) \n head = push(head, 2) \n head = push(head, 3) \n head = push(head, 4) \n head = push(head, 5) \n \n printList(head) \n print(\"\\n\") \n \n N = 2\n head = rotate(head, 3) \n \n printList(head) \n\ninput() \n'''\n2) https://www.geeksforgeeks.org/insertion-sort-doubly-linked-list/\n'''\n#solution\nclass Node: \n \n def __init__(self, data): \n self.data = data \n self.prev = None\n self.next = None\n\ndef getNode(data): \n newNode = Node(0) \n newNode.data = data \n newNode.prev = newNode.next = None\n return newNode \n\ndef sortedInsert(head_ref, newNode): \n \n current = None\n if (head_ref == None): \n head_ref = newNode \n elif ((head_ref).data >= newNode.data) : \n newNode.next = head_ref \n newNode.next.prev = newNode \n head_ref = newNode \n else : \n current = head_ref \n \n while (current.next != None and\n current.next.data < newNode.data): \n current = current.next\n newNode.next = current.next\n \n if (current.next != None): \n newNode.next.prev = newNode \n current.next = newNode \n newNode.prev = current \n return head_ref; \n \n\ndef insertionSort( head_ref): \n sorted = None\n current = head_ref \n while (current != None) : \n next = current.next\n current.prev = current.next = None\n sorted = sortedInsert(sorted, current) \n current = next\n head_ref = sorted\n return head_ref \n \n\ndef printList(head): \n while (head != None) : \n print( head.data, end = \" \") \n head = head.next\n\ndef push(head_ref, new_data): \n new_node = Node(0) \n new_node.data = new_data \n new_node.next = (head_ref) \n new_node.prev = None\n \n if ((head_ref) != None): \n (head_ref).prev = new_node \n (head_ref) = new_node \n return head_ref \n \nif __name__ == \"__main__\": \n \n head = None\n head = push(head, 5) \n head = push(head, 1) \n head = push(head, 5) \n head = push(head, 14) \n head = push(head, 10) \n head = push(head, 8) \n \n print( \"Doubly Linked List Before Sorting\") \n printList(head) \n \n head = insertionSort(head) \n \n print(\"\\n\\nDoubly Linked List After Sorting\") \n printList(head) \n \ninput()","sub_path":"coding-challenges/week05/day-5.py","file_name":"day-5.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"17952968","text":"from .structs import Currency, Scope, Claim, ClaimStatus, Balance\nfrom .errors import MissingScope, BadRequest, NotFound\nfrom .client import VirtualCryptoClientBase, VIRTUALCRYPTO_TOKEN_ENDPOINT, VIRTUALCRYPTO_API\nfrom typing import Optional, List\nimport datetime\nimport aiohttp\nimport asyncio\n\n\nclass AsyncVirtualCryptoClient(VirtualCryptoClientBase):\n def __init__(self, client_id: str, client_secret: str, scopes: List[Scope], loop=asyncio.get_event_loop()):\n super().__init__(client_id, client_secret, scopes)\n self.loop = loop\n self.session = aiohttp.ClientSession(loop=self.loop)\n self.wait_ready = asyncio.Event(loop=self.loop)\n\n async def wait_for_ready(self):\n await self.wait_ready.wait()\n\n async def start(self):\n await self.set_token()\n self.wait_ready.set()\n\n async def close(self):\n await self.session.close()\n\n async def set_token(self):\n body = {\n 'scope': ' '.join(map(lambda x: x.value, self.scopes)),\n 'grant_type': 'client_credentials'\n }\n async with self.session.post(\n VIRTUALCRYPTO_TOKEN_ENDPOINT,\n data=body,\n auth=aiohttp.BasicAuth(self.client_id, self.client_secret)) as response:\n data = await response.json()\n\n self.token = data['access_token']\n self.expires_in = data['expires_in']\n self.token_type = data['token_type']\n self.when_set_token = datetime.datetime.utcnow()\n\n async def get_headers(self):\n if (datetime.datetime.utcnow() - self.when_set_token).seconds >= self.expires_in:\n await self.set_token()\n return {\n \"Authorization\": \"Bearer \" + self.token\n }\n\n async def get(self, path, params) -> aiohttp.ClientResponse:\n headers = await self.get_headers()\n\n return await self.session.get(VIRTUALCRYPTO_API + path, params=params, headers=headers)\n\n async def post(self, path, data) -> aiohttp.ClientResponse:\n headers = await self.get_headers()\n return await self.session.post(VIRTUALCRYPTO_API + path, data=data, headers=headers)\n\n async def patch(self, path, data) -> aiohttp.ClientResponse:\n headers = await self.get_headers()\n return await self.session.patch(VIRTUALCRYPTO_API + path, data=data, headers=headers)\n\n async def get_currency_by_unit(self, unit: str) -> Optional[Currency]:\n response = await self.get(\"/currencies\", {\"unit\": unit})\n\n return Currency.by_json(await response.json())\n\n async def get_currency_by_guild(self, guild_id: int) -> Optional[Currency]:\n response = await self.get(\"/currencies\", {\"guild\": str(guild_id)})\n\n return Currency.by_json(await response.json())\n\n async def get_currency_by_name(self, name: str) -> Optional[Currency]:\n response = await self.get(\"/currencies\", {\"name\": name})\n return Currency.by_json(await response.json())\n\n async def get_currency(self, currency_id: int):\n response = await self.get(\"/currencies/\" + str(currency_id), {})\n\n return Currency.by_json(await response.json())\n\n async def create_user_transaction(self, unit: str, receiver_discord_id: int, amount: int) -> None:\n if Scope.Pay not in self.scopes:\n raise MissingScope(\"vc.pay\")\n\n response = await self.post(\n \"/users/@me/transactions\",\n {\n \"unit\": unit,\n \"receiver_discord_id\": str(receiver_discord_id),\n \"amount\": str(amount)\n }\n )\n if response.status == 400:\n raise BadRequest((await response.json())[\"error_info\"])\n\n pay = create_user_transaction\n\n async def get_claims(self):\n if Scope.Claim not in self.scopes:\n raise MissingScope(\"vc.claim\")\n\n response = await self.get(\n \"/users/@me/claims\",\n {}\n )\n return list(map(Claim.by_json, await response.json()))\n\n async def get_claim(self, claim_id: int):\n response = await self.get(\"/users/@me/claims/\" + str(claim_id), {})\n return Claim.by_json(await response.json())\n\n async def update_claim(self, claim_id: int, status: ClaimStatus):\n if status == ClaimStatus.Pending:\n raise ValueError(\"can't update to pending\")\n\n response = await self.patch(\n \"/users/@me/claims/\" + str(claim_id),\n {\"status\": status.value}\n )\n\n if response.status == 404:\n raise NotFound((await response.json())[\"error_description\"])\n elif response.status == 400:\n raise BadRequest((await response.json())[\"error_info\"])\n\n return response\n\n async def get_balances(self):\n response = await self.get(\n \"/users/@me/balances\",\n {}\n )\n return list(map(Balance.by_json, await response.json()))\n","sub_path":"virtualcrypto/async_client.py","file_name":"async_client.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"482505844","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 4 23:12:01 2018\n\n@author: srinivasarao\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfilename = 'https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'\nheaders = [\"symboling\",\"normalized-losses\",\"make\",\"fuel-type\",\"aspiration\", \"num-of-doors\",\"body-style\",\n \"drive-wheels\",\"engine-location\",\"wheel-base\", \"length\",\"width\",\"height\",\"curb-weight\",\"engine-type\",\n \"num-of-cylinders\", \"engine-size\",\"fuel-system\",\"bore\",\"stroke\",\"compression-ratio\",\"horsepower\",\n \"peak-rpm\",\"city-mpg\",\"highway-mpg\",\"price\"]\n\ndf= pd.read_csv(filename, names= headers)\n\nprint (df.head())\n\ndf.replace(\"?\",np.nan,inplace= True)\n\nprint (df.head())\n\nmissing_data= df.isnull()\nprint (missing_data.head())\n\nfor column in missing_data.columns.values.tolist():\n print (column)\n print (missing_data[column].value_counts())\n print (\" \")\n\navg_1= df[\"normalized-losses\"].astype(\"float\").mean(axis=0)\ndf[\"normalized-losses\"].replace(np.nan,avg_1,inplace= True)\n\nprint (df[\"noramlized-losses\"].ix[0])","sub_path":"dataanalysis.py","file_name":"dataanalysis.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"609208520","text":"import random\n\nword_list=(\"canteen\",\"scent\",\"community\",\"autopilot\",\"foothold\",\"spring\",\"lunatic\",\"unpredictability\",\"vowel\",\"turnaround\")\nword=word_list[random.randint(0,9)]\n\ncopy_word=word\n\nobs_word=\"\"\nfor char in word:\n obs_word+='*'\n\nend_word=\"\"\nfor char in word:\n end_word+='#'\n\ntries=10\n\nprint(\"The game has begun, you have {} tries\\n\".format(tries))\nprint(obs_word)\n\nguess=\"\"\nfound=0\nwon=0\n\nwhile tries>0:\n if won==1:\n break\n won=1\n guess=input(\"Guess a letter: \")\n found=0\n for c in word:\n if c==guess:\n found=1\n pos=word.find(c)\n word=word[:pos]+'#'+word[pos+1:]\n obs_word=obs_word[:pos]+c+obs_word[pos+1:]\n if found==1:\n print(\"\\nCorrect!\\n\")\n print(obs_word)\n found=0\n else:\n print(\"\\nWrong\\n\")\n tries-=1\n print(\"You have {} tries remaining\".format(tries))\n for c in word:\n if c!='#':\n won=0\n break\n\nif won==0:\n print(\"The word is {}.\".format(copy_word))\n print(\"========= Game Over =========\")\nelse:\n print(\"========= You've Won =========\")\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"446770033","text":"import argparse,subprocess,os,hashlib,requests,sys,time\ndef gyazoMD5Exists(md5_hash):return requests.get(\"http://gyazo.com/%s\"%(md5_hash)).status_code==200\ndef printExists(h):print(\"%s FOUND\"%h if gyazoMD5Exists(h) else \"%s NOT FOUND\"%h)\ndef getMD5(path,buffer_size=1024*1024*32):\n\twith open(path,'rb')as inp:\n\t\tdata=inp.read(buffer_size)\n\t\tmday5=hashlib.md5()\n\t\twhile len(data)>0:\n\t\t\tmday5.update(data)\n\t\t\tdata=inp.read(buffer_size)\n\t\treturn mday5.hexdigest()\ndef validHex(s):\n\ttry:int(s,16);return True\n\texcept:return False\ndef err(code,message=None):\n\tif message:print(message)\n\tsys.exit(code)\ndef uploadFile(path):\n\tglobal DEFAULT_GYAZO_EXEC\n\treturn subprocess.Popen((DEFAULT_GYAZO_EXEC,os.path.abspath(path)))\ndef check4FinishedProcesses():\n\tglobal processes\n\tplen=len(processes)\n\ttoremove=[]\n\tfor i in range(plen):\n\t\tif processes[i].poll()!=None:toremove.append(i)\n\ttoremove.reverse()\n\tfor i in range(len(toremove)):\n\t\tdel processes[toremove[i]]\ndef w8forprocesses(max_count=20,timer=5):\n\tglobal processes\n\tcheck4FinishedProcesses()\n\t#keelChromes()\n\twhile len(processes)>max_count:\n\t\tprint(\"W8ing for upload processes to reach %s (currently have %s)\"%(max_count,len(processes)))\n\t\tcheck4FinishedProcesses()\n\t\ttime.sleep(timer)\n\"\"\"\nPOWERSHELL_SCREPT=\"keelchromes.ps1\"\nPOWERSHELL_EXE=\"powershell\"\ndef keelChromes():\n\tglobal POWERSHELL_EXE\n\tglobal POWERSHELL_SCREPT\n\tsubprocess.run((POWERSHELL_EXE,\"-File\",os.path.abspath(POWERSHELL_SCREPT)))\n\"\"\"\nDEFAULT_GYAZO_EXEC=\"C:\\\\Program Files (x86)\\\\Gyazo\\\\Gyazowin.exe\"\nDEFAULT_IGNORE_CASE=True\nDEFAULT_EXTENSIONS=[\".png\"]\nparser=argparse.ArgumentParser()\nparser.add_argument(\"input\",help=\"input directory\")\nparser.add_argument(\"-ext\",\"--extensions\",type=str,help=\"comma seperated list of extensions to search for\")\nparser.add_argument(\"-ign\",\"--ignore\",type=int,help=\"determines whether to ignore case (0 to not ignore case, 1 to ignore case)\",choices=[0,1],nargs=\"?\",const=(1 if DEFAULT_IGNORE_CASE else 0))\nargs=parser.parse_args()\nextensions=DEFAULT_EXTENSIONS\nignore_case=args.ignore==1\nif args.extensions:\n\tsplit=args.extensions.split(\",\")\n\tif len(split)>0:extensions=split[:]\n\telse:err(1,\"Supplied extensions result in empty\")\nif args.ignore:extensions=[ext.lower() for ext in extensions]\nprint(\"Beginning search for these extensions %s\"%extensions)\nprint(\"I SHALL IGNORE THE CASE\" if ignore_case else \"NAH NO CASE IGNORING GOING ON HERE\")\nprocesses=[]\nif os.path.exists(args.input):\n\tif os.path.isdir(args.input):\n\t\tfor root,dirs,files in os.walk(args.input):\n\t\t\tprint(\"Checking %s for files\"%root)\n\t\t\tfor f in files:\n\t\t\t\tfsplit=os.path.splitext(f)\n\t\t\t\tif len(fsplit)>1:\n\t\t\t\t\tif (ignore_case and (fsplit[1].lower() in extensions)) or (not ignore_case and (fsplit[1] in extensions)):\n\t\t\t\t\t\tfullpath=os.path.join(root,f)\n\t\t\t\t\t\tmday5=getMD5(fullpath)\n\t\t\t\t\t\texists=gyazoMD5Exists(mday5)\n\t\t\t\t\t\tif not exists:\n\t\t\t\t\t\t\tw8forprocesses()\n\t\t\t\t\t\t\tprocesses.append(uploadFile(fullpath))\n\t\t\t\t\t\t\tprint(\"\\tUPLOADING %s (%s)\"%(fullpath,mday5))\n\t\t\t\t\t\telse:print(\"\\tOK %s (%s)\"%(fullpath,mday5))\n\telse:print(\"Supplied path is not a directory\")\nelse:print(\"Supplied path does not exist\")\nw8forprocesses(max_count=0)\nprint(\"Uploads complete\")","sub_path":"Image Data Storage/img_upload.py","file_name":"img_upload.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"190345043","text":"'''\n\nIntegration test for testing thick/thick data volume operations on mini.\n\n@author: zhaohao.chen\n'''\n\nimport apibinding.inventory as inventory\nimport zstackwoodpecker.test_util as test_util\nimport zstackwoodpecker.test_state as test_state\nimport zstackwoodpecker.test_lib as test_lib\nimport zstackwoodpecker.operations.resource_operations as res_ops\nimport zstackwoodpecker.zstack_test.zstack_test_volume as test_volume_header\nimport zstackwoodpecker.operations.volume_operations as vol_ops\nimport zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header\nimport zstackwoodpecker.operations.vm_operations as vm_ops\nimport zstackwoodpecker.header.volume as volume_header\nimport time\nimport os\nimport random\nimport threading\n\nPROVISION = [\"volumeProvisioningStrategy::ThinProvisioning\",\"volumeProvisioningStrategy::ThickProvisioning\"]\nround_num = 10\nvolume = None\nvm = None\nvolumes = []\nts_attach = []\nts = []\ntest_obj_dict = test_state.TestStateDict()\n#state\nCREATED = volume_header.CREATED\nDETACHED = volume_header.DETACHED\nATTACHED = volume_header.ATTACHED\nDELETED = volume_header.DELETED\nEXPUNGED = volume_header.EXPUNGED\n\n\ndef vol_random_ops(vol, vm, vm_uuid):\n i = 0\n while vol.state != EXPUNGED:\n test_util.test_logger(\"op round%s: volume state %s\" % (i,vol.state))\n i += 1\n if vol.state == CREATED or vol.state == DETACHED:\n op = random.choice([vol.delete, vol.attach])\n if op == vol.attach:\n op(vm)\n else:\n op()\n continue\n elif vol.state == ATTACHED:\n op = random.choice([vol.delete, vol.detach])\n if op == vol.detach:\n op(vm_uuid)\n else:\n op()\n continue\n elif vol.state == DELETED:\n op = random.choice([vol.expunge, vol.recover])\n op()\n continue \n return \n\ndef test():\n global vm\n global test_obj_dict\n VM_CPU= 2\n VM_MEM = 2147483648\n #1.create vm\n vm_creation_option = test_util.VmOption()\n image_name = os.environ.get('imageName_s')\n image_uuid = test_lib.lib_get_image_by_name(image_name).uuid\n l3_name = os.environ.get('l3VlanNetworkName1')\n l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid\n vm_creation_option.set_l3_uuids([l3_net_uuid])\n vm_creation_option.set_image_uuid(image_uuid)\n vm_creation_option.set_name('Mini_vm_datavolume_test')\n vm_creation_option.set_cpu_num(VM_CPU)\n vm_creation_option.set_memory_size(VM_MEM)\n vm = test_vm_header.ZstackTestVm()\n vm.set_creation_option(vm_creation_option)\n vm.create()\n vm.check()\n test_obj_dict.add_vm(vm)\n vm_uuid = vm.get_vm().uuid\n\n #2.data volume operations test\n volume_creation_option = test_util.VolumeOption()\n ps_uuid = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0].uuid\n volume_creation_option.set_primary_storage_uuid(ps_uuid)\n #create thin/thick data volume with random disksize and random provision type\n for i in range(round_num):\n volume_name = \"volume_%s\" % i\n volume_creation_option.set_name(volume_name)\n max_size = (res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0].availableCapacity - 1073741824)/(20 * 512)\n disk_size = random.randint(2048, max_size) * 512\n volume_creation_option.set_diskSize(disk_size)\n volume_creation_option.set_system_tags([random.choice(PROVISION)])\n volume = test_volume_header.ZstackTestVolume()\n volume.set_volume(vol_ops.create_volume_from_diskSize(volume_creation_option))\n volume.check()\n test_obj_dict.add_volume(volume)\n volume.state = CREATED\n volumes.append(volume)\n for vol in volumes:\n t = threading.Thread(target=vol_random_ops,args=(vol, vm, vm_uuid))\n ts.append(t)\n test_util.test_logger(\"thread added\")\n t.start()\n test_util.test_logger(\"thread started\")\n for t in ts:\n t.join()\n test_util.test_pass(\"Mini Data Volume Operations Test Success\")\n\ndef error_cleanup():\n global test_obj_dict\n test_lib.lib_error_cleanup(test_obj_dict)\n \ndef env_recover():\n global test_obj_dict\n test_lib.lib_error_cleanup(test_obj_dict)\n","sub_path":"integrationtest/vm/mini/volume/test_data_volume_operations.py","file_name":"test_data_volume_operations.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"370194418","text":"from django import forms\nfrom django.core.exceptions import ValidationError as DjangoValidationError\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import ngettext_lazy\n\nfrom hcap_accounts.models import RegionManager\nfrom hcap_geo.models import Region\n\n\nclass ManagerCitiesAuthorizationRequestForm(forms.Form):\n state = forms.ModelChoiceField(\n Region.objects.filter(kind=Region.KIND_STATE),\n label=_(\"State\"),\n empty_label=None,\n disabled=True,\n required=False,\n )\n\n state_hidden = forms.CharField(widget=forms.HiddenInput())\n\n cities = forms.ModelMultipleChoiceField(\n Region.objects.none(), label=_(\"Cities\"), widget=forms.CheckboxSelectMultiple\n )\n\n managers = []\n\n def __init__(self, *args, user, state_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user = user\n self.fields[\"state\"].queryset = Region.objects.filter(id=state_id)\n self.fields[\"state_hidden\"].initial = state_id\n self.fields[\"cities\"].queryset = Region.objects.filter(\n kind=Region.KIND_CITY, parents__id=state_id\n )\n\n def clean(self):\n cleaned_data = super().clean()\n\n managers = []\n invalid_cities = []\n\n for city in cleaned_data[\"cities\"]:\n try:\n manager = RegionManager(user=self.user, region=city, is_authorized=False)\n manager.full_clean()\n managers.append(manager)\n except DjangoValidationError as exception:\n error = exception.error_dict.get(\"__all__\", [None])[0]\n if error is not None and error.code == \"unique_together\":\n invalid_cities.append(city.name)\n else:\n raise exception\n\n length = len(invalid_cities)\n if length != 0:\n message = ngettext_lazy(\n \"You already have authorization request for the following city: %(cities)s.\",\n \"You already have authorization request for the following cities: %(cities)s.\",\n length,\n )\n self.add_error(\"cities\", message % {\"cities\": \", \".join(invalid_cities)})\n\n self.managers = managers\n\n return cleaned_data\n\n def save(self):\n if len(self.managers) != 0:\n RegionManager.objects.bulk_create(self.managers)\n","sub_path":"hcap/forms/manager_cities_authorization_request_form.py","file_name":"manager_cities_authorization_request_form.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"351189264","text":"import psycopg2\nimport nltk\nimport pickle\nfrom collections import Counter\nimport os\nfrom nltk.tokenize import RegexpTokenizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse.linalg import svds\nfrom sklearn.preprocessing import normalize\nimport numpy as np\n\ndocuments = []\n\nhostname = '35.236.208.84'\nusername = 'postgres'\npassword = 'dnmSWIMS!'\ndatabase = 'postgres'\n\ndef connect():\n connection = None\n try:\n connection = psycopg2.connect(host=hostname, user=username, password=password, dbname=database)\n cursor = connection.cursor()\n # Print PostgreSQL Connection properties\n print ( connection.get_dsn_parameters(),\"\\n\")\n # Print PostgreSQL version\n cursor.execute(\"SELECT version();\")\n record = cursor.fetchone()\n print(\"You are connected to - \", record,\"\\n\")\n\n postgreSQL_select_Query = \"select * from production_tbl\"\n cursor.execute(postgreSQL_select_Query)\n document_records = cursor.fetchall()\n\n print(len(document_records))\n for row in document_records:\n documents.append(row[1]) #append content\n #print(\"doc_id = \", row[0], )\n #print(\"content = \", row[4], \"\\n\")\n print(\"Data read successfully in PostgreSQL \")\n except (Exception, psycopg2.Error) as error :\n print (\"Error while connecting to PostgreSQL\", error)\n finally:\n #closing database connection.\n if(connection != None):\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n\ndef svd():\n connect()\n vectorizer = TfidfVectorizer(stop_words = 'english', max_df = .7, min_df=75)\n matrix = vectorizer.fit_transform(documents).transpose()\n print(\"matrix shape =\", matrix.shape)\n\n u, s, v_trans = svds(matrix, k=10)\n #print(\"u shape =\", u.shape)\n word_to_index = vectorizer.vocabulary_\n index_to_word = {i:t for t,i in word_to_index.items()}\n u = normalize(u, axis = 1)\n\n with open('word_to_index.pickle', 'wb') as handle:\n pickle.dump(word_to_index, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open('index_to_word.pickle', 'wb') as handle:\n pickle.dump(index_to_word, handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n with open('u_matrix.pickle', 'wb') as handle:\n pickle.dump(u, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nsvd()","sub_path":"svd.py","file_name":"svd.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"200698019","text":"import pygame\nimport random\n\npygame.init()\npygame.font.init()\n\n\nclass Card(object):\n \"\"\" The Card Class \"\"\"\n\n def __init__(self, left, top, width, height,\n back_color, front_color, solved_color,\n display,\n font_color, text_font, value=None):\n self._rect = pygame.Rect(left, top, width, height)\n self._display = display\n self._back_color = back_color # color of card when face down\n self._front_color = front_color # color of card when face up\n self._solved_color = solved_color # color of card after it is matched\n self._font_color = font_color\n self._text_font = text_font\n self._value = value # the number we are trying to match\n self._unsolved = True # is set to false once matched\n self._hidden = True # card is face down to start\n self._times_seen = 0 # number of times player viewed card\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = value\n\n @property\n def times_seen(self):\n return self._times_seen\n\n def solved(self):\n self._unsolved = False\n pygame.draw.rect(self._display, self._solved_color, self._rect)\n\n def is_unsolved(self):\n return self._unsolved\n\n def is_clicked(self, pos):\n x_pos, y_pos = pos\n return self._rect.collidepoint(x_pos, y_pos) # did player click on this card?\n\n def is_hidden(self):\n return self._hidden\n\n def show_card(self):\n self._hidden = False\n self._times_seen += 1\n pygame.draw.rect(self._display, self._front_color, self._rect)\n text_surface = self._text_font.render(self._value, True, self._font_color)\n self._display.blit(text_surface, (self._rect.center[0] - (text_surface.get_width() / 2),\n self._rect.center[1] - (text_surface.get_height() / 2)))\n\n def hide_card(self):\n self._hidden = True\n pygame.draw.rect(self._display, self._back_color, self._rect)\n\n\ndef get_matching_card(card_list, card_to_match):\n \"\"\" This function returns the card that matches the one passed in \"\"\"\n the_matching_card = None\n for test_card in card_list:\n if test_card.value == card_to_match.value and test_card != card_to_match:\n the_matching_card = test_card\n break\n return the_matching_card\n\n\ndef cards_remaining(card_list):\n \"\"\" this function returns the number of cards that have not been matched yet \"\"\"\n num_remaining = 0\n for c in card_list:\n if c.is_unsolved():\n num_remaining += 1\n return num_remaining\n\n\nif __name__ == \"__main__\":\n\n display_width = 600\n display_height = 600\n\n card_font = pygame.font.SysFont('Comic Sans MS', 48)\n front_col = pygame.Color('white')\n solved_col = pygame.Color('#636363')\n back_col = pygame.Color('#293a32')\n font_col = pygame.Color('black')\n\n score_font = pygame.font.SysFont('Comic Sans MS', 24)\n score_txt_col = pygame.Color('#d4c38f')\n score_y_margin = 50\n score_x_margin = 20\n\n player_closed_app = False\n new_game = False\n\n cards = []\n\n game_display = pygame.display.set_mode((display_width, display_height))\n pygame.display.set_caption('Matching Game')\n game_display.fill(pygame.Color('#b5c9a6'))\n\n score_rect = pygame.draw.rect(game_display, pygame.Color('black'), pygame.Rect(0, 0, display_width, score_y_margin))\n\n surf_8x8_txt = score_font.render(\"8 x 8\", True, score_txt_col)\n left_pos = (game_display.get_width() - score_x_margin - surf_8x8_txt.get_width())\n surf_8x8_rect = game_display.blit(surf_8x8_txt, (left_pos, (score_y_margin - surf_8x8_txt.get_height()) / 2))\n\n surf_6x6_txt = score_font.render(\"6 x 6\", True, score_txt_col)\n left_pos = left_pos - surf_6x6_txt.get_width() - score_x_margin\n surf_6x6_rect = game_display.blit(surf_6x6_txt, (left_pos, (score_y_margin - surf_6x6_txt.get_height()) / 2))\n\n surf_4x4_txt = score_font.render(\"4 x 4\", True, score_txt_col)\n left_pos = left_pos - surf_4x4_txt.get_width() - score_x_margin\n surf_4x4_rect = game_display.blit(surf_4x4_txt, (left_pos, (score_y_margin - surf_4x4_txt.get_height()) / 2))\n\n surf_sel_txt = score_font.render(\"Select Game:\", True, score_txt_col)\n left_pos = left_pos - surf_sel_txt.get_width() - score_x_margin\n game_display.blit(surf_sel_txt, (left_pos, (score_y_margin - surf_sel_txt.get_height()) / 2))\n \n num_cols = 0\n num_rows = 0\n pick_1 = None # variable to hold first card selected by player\n score = 0\n max_score = 0 # maximum score a player can get\n while not player_closed_app:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n player_closed_app = True\n if new_game:\n pygame.draw.rect(game_display, pygame.Color('#b5c9a6'),\n pygame.Rect(0, score_y_margin, display_width, display_height - score_y_margin))\n total_pairs = (num_cols * num_rows) // 2\n max_score = total_pairs - 1 # player gets no credit for last two cards remaining\n pairs = list(range(1, total_pairs + 1)) + list(range(1, total_pairs + 1)) # create numbered pairs\n\n # calculate the width and height of the cards and the space between them\n card_horz_width = int((display_width * 0.8) / num_cols)\n space_horz_width = int((display_width * 0.2) / (num_cols + 1))\n card_vert_height = int(((display_height - score_y_margin) * 0.8) / num_rows)\n space_vert_height = int(((display_height - score_y_margin) * 0.2) / (num_rows + 1))\n\n # create cards and randomly assign the numbered pairs\n random.random()\n del cards[:]\n for row in range(1, num_rows + 1):\n for col in range(1, num_cols + 1):\n rnd_item = random.choice(pairs)\n pairs.remove(rnd_item)\n\n new_card_x = ((col - 1) * card_horz_width) + (col * space_horz_width)\n new_card_y = ((row - 1) * card_vert_height) + (row * space_vert_height) + score_y_margin\n crd = Card(new_card_x, new_card_y, card_horz_width, card_vert_height,\n back_col, front_col, solved_col, game_display, font_col, card_font, str(rnd_item))\n\n cards.append(crd)\n crd.hide_card()\n\n score = 0\n new_game = False\n if pygame.mouse.get_pressed()[0]:\n if surf_4x4_rect.collidepoint(pygame.mouse.get_pos()): # start new game 4 x 4\n new_game = True\n num_cols = 4\n num_rows = 4\n pygame.time.wait(200) # wait 200ms to avoid multiple new game mouse click events\n if surf_6x6_rect.collidepoint(pygame.mouse.get_pos()): # start new game 6 x 6\n new_game = True\n num_cols = 6\n num_rows = 6\n pygame.time.wait(200)\n if surf_8x8_rect.collidepoint(pygame.mouse.get_pos()): # start new game 8 x 8\n new_game = True\n num_cols = 8\n num_rows = 8\n pygame.time.wait(200)\n for crd in cards:\n if crd.is_clicked(pygame.mouse.get_pos()) and crd.is_hidden() and crd.is_unsolved():\n crd.show_card()\n pygame.display.flip()\n if pick_1 is None:\n pick_1 = crd # player picked first card\n else: # player picked second card.\n if pick_1.value == crd.value: # it is a match!\n pick_1.solved()\n crd.solved()\n if crd.times_seen > 1 and cards_remaining(cards) > 0:\n score += 1 # if you have seen the matching card at least once before, you get a point\n elif crd.times_seen == 1 and cards_remaining(cards) > 0:\n max_score -= 1 # no points for luck, we just reduce the max possible score\n pygame.time.wait(500) # show matching values for 500ms\n else: # it did not match\n pick_1.hide_card()\n crd.hide_card()\n matching_card = get_matching_card(cards, pick_1)\n if matching_card.times_seen > 0:\n score -= 1 # player has seen the matching card before! 1 point penalty!\n if crd.times_seen > 1:\n score -= 1 # player should have known this card was not a match! 1 point penalty!\n pygame.time.wait(1500) # show card values for 1.5sec\n pick_1 = None # get ready for next pair of selections by player\n break\n # update score\n surf_wrong = score_font.render(\"Score = \" + str(score) + \" out of \" + str(max_score), True, score_txt_col)\n pygame.draw.rect(game_display, pygame.Color('black'),\n pygame.Rect(score_x_margin, 0, surf_wrong.get_width() + 100, score_y_margin))\n game_display.blit(surf_wrong, (score_x_margin, (score_y_margin - surf_wrong.get_height()) / 2))\n pygame.display.flip()\n\n # player existed application\n pygame.quit()\n quit()\n","sub_path":"matchingGame.py","file_name":"matchingGame.py","file_ext":"py","file_size_in_byte":9562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"569485096","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nWINDOWS 进程守护程序\n作者:韦俊杰\n最后编辑: 2018年07月02日\n\n进程未处于系统进程列表时,自动重启程序\n\n\"\"\"\n\nimport wmi\nimport os\nimport sys\nimport time\nfrom configparser import ConfigParser\n\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nimport UI_main\n\nimport threading\n\nLOG_FORMAT = '%(asctime)s - %(module)s.%(funcName)s.%(lineno)d - %(levelname)s - %(message)s'\nformatter = logging.Formatter(LOG_FORMAT)\n\nlogger = logging.getLogger('mylogger')\nlogger.setLevel(logging.DEBUG)\n\nfh = logging.FileHandler(os.path.join(os.getcwd(), 'log.txt'))\nfh.setLevel(logging.DEBUG)\nfh = TimedRotatingFileHandler(filename='log.txt',when='midnight',interval=1,backupCount=7)\n#logging.handlers.suffix = \"%Y-%m-%d\"\n\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\nCONFIGFILE = './config.ini'\nconfig = ConfigParser()\nconfig.read(CONFIGFILE)\n\nProgramPath = config.get('MonitorProgramPath', 'ProgramPath')\nProcessName = config.get('MonitorProcessName', 'ProcessName')\nScanTime = int(config.get('MonitorScanTime', 'ScanTime')) * 1000\n\ncount = 0\n\nclass mainshow(QtWidgets.QWidget, UI_main.Ui_Form):\n def __init__(self):\n super(mainshow,self).__init__()\n self.setupUi(self)\n\n #界面风格\n QApplication.setStyle('Fusion')\n self.setWindowFlags(Qt.MSWindowsFixedSizeDialogHint\\\n |Qt.WindowMinimizeButtonHint|Qt.WindowCloseButtonHint)\n\n self.pname.setText(str(ProcessName))\n self.textBrowser.setText(str(ProgramPath))\n self.lcdNumber.setStyleSheet(\"background-color: yellow;color:red\")\n self.lcdNumber.display(count)\n\n # loopt = threading.Thread(target=self.loop, daemon=True) #自动刷新数据的线程\n # loopt.start()\n # self.main()\n\n #self.pushButton_2.clicked.connect(self.loop)\n self.main()\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.main)\n self.timer.start(ScanTime)\n\n def main(self):\n ProList = [] # 如果在main()函数之外ProList 不会清空列表内容.\n global count\n c = wmi.WMI()\n for process in c.Win32_Process():\n ProList.append(str(process.Name))\n\n if ProcessName in ProList:\n self.STU.setStyleSheet(\"background-color: none;color:green\")\n self.STU.setText(str(\"进程 \" + ProcessName + \" 正常运行\"))\n # if os.path.isdir(\"c:\\MonitorWin32Process\"):\n # pass\n # else:\n # os.makedirs(\"c:\\MonitorWin32Process\")\n\n else:\n self.STU.setStyleSheet(\"background-color: yellow;color:red\")\n self.STU.setText(str(\"进程 \" + ProcessName + \" 已停止!正在重启...\"))\n logger.warning(\"进程 \" + ProcessName + \" 已停止\")\n count = count + 1\n self.lcdNumber.display(count)\n os.startfile(ProgramPath)\n\n # def loop(self):\n # #self.pushButton_2.setDisabled(True)\n # while True:\n # self.main()\n # time.sleep(300)\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n MainW = mainshow()\n MainW.show()\n sys.exit(app.exec_())","sub_path":"Daemon.py","file_name":"Daemon.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"414319316","text":"from __future__ import with_statement\n'''\nCreated on Dec 5, 2009\n\n@author: marat\n'''\nfrom __future__ import with_statement\n\nimport logging\nimport time\nimport threading\nimport copy\nimport sys\n\nfrom scalarizr.bus import bus\nfrom scalarizr.messaging import MessageService, Message, Queues, MetaOptions, MessagingError\nfrom scalarizr.messaging.p2p.security import P2pMessageSecurity\n\n\n\"\"\"\nInFilter\nOutFilter\n\"\"\"\n\nLOG = logging.getLogger(__name__)\n\nclass P2pConfigOptions:\n SERVER_ID = \"server_id\"\n CRYPTO_KEY_PATH = \"crypto_key_path\"\n PRODUCER_URL = \"producer_url\"\n PRODUCER_RETRIES_PROGRESSION = \"producer_retries_progression\"\n PRODUCER_SENDER = \"producer_sender\"\n CONSUMER_URL = \"consumer_url\"\n MSG_HANDLER_ENABLED = 'msg_handler_enabled'\n\n\nclass P2pMessageService(MessageService):\n _params = {}\n _default_producer = None\n _default_consumer = None\n\n def __init__(self, **params):\n self._params = params\n self._security = P2pMessageSecurity(\n self._params[P2pConfigOptions.SERVER_ID],\n self._params[P2pConfigOptions.CRYPTO_KEY_PATH]\n )\n\n def new_message(self, name=None, meta=None, body=None):\n return P2pMessage(name, meta, body)\n\n def get_consumer(self):\n if not self._default_consumer:\n self._default_consumer = self.new_consumer(\n endpoint=self._params[P2pConfigOptions.CONSUMER_URL],\n msg_handler_enabled=self._params.get(P2pConfigOptions.MSG_HANDLER_ENABLED, True)\n )\n return self._default_consumer\n\n def new_consumer(self, **params):\n from scalarizr.messaging.p2p import consumer\n c = consumer.P2pMessageConsumer(**params)\n c.filters['protocol'].append(self._security.in_protocol_filter)\n return c\n\n def get_producer(self):\n if not self._default_producer:\n self._default_producer = self.new_producer(\n endpoint=self._params[P2pConfigOptions.PRODUCER_URL],\n retries_progression=self._params[P2pConfigOptions.PRODUCER_RETRIES_PROGRESSION],\n )\n return self._default_producer\n\n def new_producer(self, **params):\n from scalarizr.messaging.p2p import producer\n p = producer.P2pMessageProducer(**params)\n p.filters['protocol'].append(self._security.out_protocol_filter)\n return p\n\n def send(self, name, body=None, meta=None, queue=None):\n msg = self.new_message(name, meta, body)\n self.get_producer().send(queue or Queues.CONTROL, msg)\n\n\ndef new_service(**kwargs):\n return P2pMessageService(**kwargs)\n\nclass _P2pMessageStore:\n _logger = None\n\n TAIL_LENGTH = 50\n\n def __init__(self):\n self._logger = logging.getLogger(__name__)\n self._local_storage_lock = threading.Lock()\n ex = bus.periodical_executor\n if ex:\n self._logger.debug('Add rotate messages table task for periodical executor')\n ex.add_task(self.rotate, 3600, 'Rotate messages sqlite table') # execute rotate task each hour\n\n def _conn(self):\n return bus.db\n\n\n @property\n def _unhandled_messages(self):\n if not hasattr(self, '_unhandled'):\n self._unhandled = self._get_unhandled_from_db()\n return self._unhandled\n\n\n def rotate(self):\n conn = self._conn()\n cur = conn.cursor()\n cur.execute('SELECT * FROM p2p_message ORDER BY id DESC LIMIT %d, 1' % self.TAIL_LENGTH)\n row = cur.fetchone()\n if row:\n self._logger.debug('Deleting messages older then messageid: %s', row['message_id'])\n cur.execute('DELETE FROM p2p_message WHERE id <= ?', (row['id'],))\n conn.commit()\n\n def put_ingoing(self, message, queue, consumer_id):\n with self._local_storage_lock:\n self._unhandled_messages.append((queue, message))\n\n conn = self._conn()\n cur = conn.cursor()\n try:\n sql = 'INSERT INTO p2p_message (id, message, message_id, ' \\\n 'message_name, queue, is_ingoing, in_is_handled, in_consumer_id, format) ' \\\n 'VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?)'\n\n #self._logger.debug('Representation mes: %s', repr(str(message)))\n cur.execute(sql, [message.tojson().decode('utf-8'), message.id, message.name, queue, 1, 0, consumer_id, 'json'])\n '''\n cur.execute(sql, [str(message), message.id.decode('utf-8'),\n message.name.decode('utf-8'), queue.encode('utf-8'), 1, 0,\n consumer_id.encode('utf-8')])\n '''\n if message.meta.has_key(MetaOptions.REQUEST_ID):\n cur.execute(\"\"\"UPDATE p2p_message\n SET response_uuid = ? WHERE message_id = ?\"\"\",\n [message.id, message.meta[MetaOptions.REQUEST_ID]])\n\n self._logger.debug(\"Commiting put_ingoing\")\n conn.commit()\n self._logger.debug(\"Commited put_ingoing\")\n finally:\n cur.close()\n\n\n def get_unhandled(self, consumer_id):\n with self._local_storage_lock:\n ret = []\n for queue, message in self._unhandled_messages:\n msg_copy = P2pMessage()\n msg_copy.fromjson(message.tojson())\n ret.append((queue, msg_copy))\n\n return ret\n\n\n def _get_unhandled_from_db(self):\n \"\"\"\n Return list of unhandled messages in obtaining order\n @return: [(queue, message), ...]\n \"\"\"\n cur = self._conn().cursor()\n try:\n sql = 'SELECT queue, message_id FROM p2p_message ' \\\n 'WHERE is_ingoing = ? AND in_is_handled = ? ' \\\n 'ORDER BY id'\n cur.execute(sql, [1, 0])\n\n ret = []\n for r in cur.fetchall():\n ret.append((r[\"queue\"], self.load(r[\"message_id\"], True)))\n return ret\n finally:\n cur.close()\n\n\n def mark_as_handled(self, message_id):\n with self._local_storage_lock:\n filter_fn = lambda x: x[1].id != message_id\n self._unhandled = filter(filter_fn, self._unhandled_messages)\n\n for _ in xrange(0, 5):\n try:\n msg = self.load(message_id, True)\n break\n except:\n self._logger.debug('Failed to load message %s', message_id, exc_info=sys.exc_info())\n time.sleep(1)\n else:\n self._logger.debug(\"Cant load message in several attempts, assume it doesn't exists. Leaving\")\n return\n\n\n if 'platform_access_data' in msg.body:\n del msg.body['platform_access_data']\n msg_s = msg.tojson().decode('utf-8')\n\n conn = self._conn()\n cur = conn.cursor()\n try:\n sql = 'UPDATE p2p_message SET in_is_handled = ?, message = ?, out_last_attempt_time = datetime(\"now\")' \\\n 'WHERE message_id = ? AND is_ingoing = ?'\n cur.execute(sql, [1, msg_s, message_id, 1])\n conn.commit()\n finally:\n cur.close()\n\n\n def put_outgoing(self, message, queue, sender):\n conn = self._conn()\n cur = conn.cursor()\n try:\n sql = 'INSERT INTO p2p_message (id, message, message_id, message_name, queue, ' \\\n 'is_ingoing, out_is_delivered, out_delivery_attempts, out_sender, format) ' \\\n 'VALUES ' \\\n '(NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n\n cur.execute(sql, [message.tojson().decode('utf-8'), message.id,\n message.name, queue, 0, 0, 0, sender, 'json'])\n conn.commit()\n finally:\n cur.close()\n\n\n def get_undelivered(self, sender):\n \"\"\"\n Return list of undelivered messages in outgoing order\n \"\"\"\n cur = self._conn().cursor()\n try:\n sql = 'SELECT queue, message_id FROM p2p_message ' \\\n 'WHERE is_ingoing = ? AND out_is_delivered = ? AND out_sender = ? ORDER BY id'\n cur.execute(sql, [0, 0, sender])\n ret = []\n for r in cur.fetchall():\n ret.append((r[0], self.load(r[1], False)))\n return ret\n finally:\n cur.close()\n\n def mark_as_delivered(self, message_id):\n return self._mark_as_delivered(message_id, 1)\n\n\n def mark_as_undelivered(self, message_id):\n return self._mark_as_delivered(message_id, 0)\n\n def _mark_as_delivered (self, message_id, delivered):\n conn = self._conn()\n cur = conn.cursor()\n try:\n sql = 'UPDATE p2p_message SET out_delivery_attempts = out_delivery_attempts + 1, ' \\\n 'out_last_attempt_time = datetime(\"now\"), out_is_delivered = ? ' \\\n 'WHERE message_id = ? AND is_ingoing = ?'\n cur.execute(sql, [int(bool(delivered)), message_id, 0])\n conn.commit()\n finally:\n cur.close()\n\n def load(self, message_id, is_ingoing):\n cur = self._conn().cursor()\n try:\n cur.execute('SELECT * FROM p2p_message ' \\\n 'WHERE message_id = ? AND is_ingoing = ?',\n [message_id, int(bool(is_ingoing))])\n row = cur.fetchone()\n if not row is None:\n message = P2pMessage()\n self._unmarshall(message, row)\n return message\n else:\n raise MessagingError(\"Cannot find message (message_id: %s)\" % message_id)\n finally:\n cur.close()\n\n def is_handled(self, message_id):\n with self._local_storage_lock:\n filter_fn = lambda x: x[1].id == message_id\n filtered = filter(filter_fn, self._unhandled_messages)\n return not filtered\n\n\n def is_delivered(self, message_id):\n cur = self._conn().cursor()\n try:\n cur.execute('SELECT is_delivered FROM p2p_message ' \\\n 'WHERE message_id = ? AND is_ingoing = ?',\n [message_id, 0])\n return cur.fetchone()[\"out_is_delivered\"] == 1\n finally:\n cur.close()\n\n def is_response_received(self, message_id):\n cur = self._conn().cursor()\n try:\n sql = 'SELECT response_id FROM p2p_message ' \\\n 'WHERE message_id = ? AND is_ingoing = ?'\n cur.execute(sql, [message_id, 0])\n return cur.fetchone()[\"response_id\"] != \"\"\n finally:\n cur.close()\n\n def get_response(self, message_id):\n cur = self._conn().cursor()\n try:\n cur.execute('SELECT response_id FROM p2p_message ' \\\n 'WHERE message_id = ? AND is_ingoing = ?',\n [message_id, 0])\n response_id = cur.fetchone()[\"response_id\"]\n if not response_id is None:\n return self.load(response_id, True)\n return None\n finally:\n cur.close()\n\n def _unmarshall(self, message, row):\n #message.fromxml(row[\"message\"].encode('utf-8'))\n format = row[\"format\"]\n if 'json' == format:\n message.fromjson(row[\"message\"])\n else:\n message.fromxml(row[\"message\"])\n\n_message_store = None\ndef P2pMessageStore():\n global _message_store\n if _message_store is None:\n _message_store = _P2pMessageStore()\n return _message_store\n\n\nclass P2pMessage(Message):\n\n def __init__(self, name=None, meta=None, body=None):\n Message.__init__(self, name, meta, body)\n self.__dict__[\"_store\"] = P2pMessageStore()\n if bus.cnf:\n cnf = bus.cnf; ini = cnf.rawini\n # XXX: when it is incoming message\n self.meta[MetaOptions.SERVER_ID] = ini.get('general', 'server_id')\n\n def is_handled(self):\n return self._store.is_handled(self.id)\n\n def is_delivered(self):\n return self._store.is_delivered(self.id)\n\n def is_responce_received(self):\n return self._store.is_response_received(self.id)\n\n def get_response(self):\n return self._store.get_response(self.id)\n","sub_path":"src/scalarizr/messaging/p2p/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"521911616","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0006_auto_20150202_0106'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TextCampaignMembership',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date_joined', models.DateField()),\n ('is_callable', models.NullBooleanField(default=True)),\n ('campaign', models.ForeignKey(to='app.Campaign')),\n ('contact', models.ForeignKey(to='app.Contact')),\n ],\n options={\n 'managed': True,\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"app/migrations/0007_textcampaignmembership.py","file_name":"0007_textcampaignmembership.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"366030829","text":"# Michael P. Hayes UCECE, Copyright 2018--2019\nimport numpy as np\nfrom matplotlib.pyplot import subplots, arrow\nfrom ipywidgets import interact, interactive, fixed\nfrom matplotlib.pyplot import subplots\nfrom .lib.robot import robot_draw, Robot\nfrom .lib.pose import Pose\n\n\ndef motion_model(pose0, v, omega, dt):\n\n x0, y0, theta0 = pose0 \n \n if omega == 0.0:\n x1 = x0 + v * np.cos(theta0) * dt\n y1 = y0 + v * np.sin(theta0) * dt\n theta1 = theta0\n else:\n x1 = x0 - v / omega * np.sin(theta0) + v / omega * np.sin(theta0 + omega * dt)\n y1 = y0 + v / omega * np.cos(theta0) - v / omega * np.cos(theta0 + omega * dt)\n theta1 = theta0 + omega * dt\n\n return (x1, y1, theta1)\n\n\ndef odom_decompose(pose1, pose0):\n\n x1, y1, p1 = pose0\n x2, y2, p2 = pose1\n\n phi1 = np.arctan2(y2 - y1, x2 - x1) - p1\n d = np.sqrt((y2 - y1)**2 + (x2 - x1)**2)\n phi2 = p2 - p1 - phi1\n\n return phi1, d, phi2\n\n\ndef speeds_decompose(pose1, pose0, dt):\n\n x1, y1, p1 = pose0\n x2, y2, p2 = pose1 \n omega = (pose1[2] - pose0[2]) / dt\n\n d = np.sqrt((y2 - y1)**2 + (x2 - x1)**2) \n\n if omega == 0:\n v = d / dt\n else:\n v = omega * d / (2 * np.tan(omega * dt / 2))\n return v, omega\n\n\ndef motion_decompose_demo1_plot(x0=0, y0=0, theta0=0, v=2, omega=0):\n\n dt = 1.0\n pose0 = (x0, y0, np.radians(theta0))\n pose1 = motion_model(pose0, v, np.radians(omega), dt)\n \n phi1, d, phi2 = odom_decompose(pose1, pose0)\n\n v, omega = speeds_decompose(pose1, pose0, 1.0) \n\n fig, ax = subplots(figsize=(10, 5)) \n Pose(0, 0, 0).draw_axes(ax)\n \n ax.axis('equal')\n ax.set_xlim(-2.5, 2.5)\n ax.set_ylim(-2.5, 2.5)\n ax.grid(True)\n\n robot_draw(ax, pose0[0], pose0[1], pose0[2], colour='blue')\n robot_draw(ax, pose1[0], pose1[1], pose1[2], colour='orange') \n \n ax.set_title('$\\phi_1 = %.1f$ deg, $d = %.1f$ m, $\\phi_2 = %.1f$ deg' %\n (np.degrees(phi1), d, np.degrees(phi2)))\n \n\ndef motion_decompose_demo1():\n interact(motion_decompose_demo1_plot,\n x0=(-2, 2, 0.1), y0=(-2, 2, 0.1), theta0=(-180, 180, 15),\n v=(0, 2, 0.1), omega=(-60, 60, 15),\n continuous_update=False)\n","sub_path":"sensor-fusion/demos/motion_decompose_demo1.py","file_name":"motion_decompose_demo1.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"34063381","text":"# %%\nimport os\nimport glob\nimport pandas as pd\n# %% [markdown]\n'''\n# 각 폴더 별로 정리한 파일을 하나의 csv 파일 형태로 묶음\n- 27개의 항목을 가지고 진행\n- 파일을 합치는 함수를 만들어 호출\n- 27개의 csv 폴더를 만든 후 결측치 확인\n'''\n# %%\ndef get_merged_csv(flist, **kwargs):\n return pd.concat([pd.read_csv(f, **kwargs) for f in flist], ignore_index = True)\n\npath = input('Enter the your dataset path : ')\npath_list = os.listdir(path)\n\nfor p in path_list:\n fmask = os.path.join(path + \"\\\\\" + p, p + \"_*.csv\")\n globals()[p.lower()] = get_merged_csv(glob.glob(fmask))","sub_path":"src/DsProject/data/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"182551568","text":"# -*- coding: utf-8 -*-\n\nfrom collections import OrderedDict\n\nfrom time import sleep\nimport random\nimport json\nimport logging\nlogger = logging.getLogger()\n#logger.setLevel(logging.INFO)\n\n\nclass MessageBuilder(object):\n def __init__(self, build_info, message):\n self.buildInfo = build_info\n self.actions = []\n self.messageId = None\n self.isStarted = False\n\n logger.info(\"MessageBuilder#__init__ build_info:{}, message:{}\".format(vars(build_info), json.dumps(message)))\n if message:\n att = message['attachments'][0]\n self.fields = att['fields']\n self.actions = att.get('actions', [])\n self.messageId = message['ts']\n return\n\n # logger.info(\"Actions {}\".format(self.actions))\n self.fields = [\n {\n \"title\" : build_info.pipeline,\n \"value\" : \"UNKNOWN\",\n \"short\" : True\n },\n {\n \"title\": \"Stages\",\n \"value\": \"\",\n \"short\": True\n }\n ]\n\n def hasField(self, name):\n return len([f for f in self.fields if f['title'] == name]) > 0\n\n def needsRevisionInfo(self):\n return not self.hasField('Revision')\n\n def attachRevisionInfo(self, rev):\n if not self.needsRevisionInfo():\n return\n if rev is None:\n return\n\n if 'revisionUrl' in rev:\n url = rev['revisionUrl']\n commit = rev['revisionId'][:7]\n message = rev['revisionSummary']\n # 複数行の場合、リンクにならないので改行文字まででcut\n line_sp_pos = message.find('\\n')\n if line_sp_pos > 0:\n message = message[:line_sp_pos]\n message = message.encode('utf-8')\n\n self.fields.append({\n \"title\": \"Revision\",\n \"value\": \"<{}|{}: {}>\".format(url, commit, message),\n \"short\": False\n })\n return\n\n self.fields.append({\n \"title\": \"Revision\",\n \"value\": rev['revisionSummary'],\n \"short\": False\n })\n\n def attachLogs(self, logs):\n self.findOrCreateAction('Build Logs', logs['deep-link'])\n\n def findOrCreateAction(self, name, link):\n for action in self.actions:\n if action['text'] == name:\n return action\n\n action = { \"type\": \"button\", \"text\": name, \"url\": link }\n self.actions.append(action)\n return action\n\n def pipelineStatus(self):\n return self.fields[0]['value']\n\n def findOrCreatePart(self, title, short=True):\n for action in self.fields:\n if action['title'] == title:\n return action\n\n p = { \"title\": title, \"value\": \"\", \"short\": short }\n self.fields.append(p)\n return p\n\n def updateBuildStageInfo(self, name, phases, info):\n url = info.get('latestExecution', {}).get('externalExecutionUrl')\n if url:\n self.findOrCreateAction('Build dashboard', url)\n\n si = self.findOrCreatePart(name, short=False)\n def pi(p):\n p_status = p.get('phase-status', 'IN_PROGRESS')\n return BUILD_PHASES[p_status]\n def fmt_p(p):\n msg = \"{} {}\".format(pi(p), p['phase-type'])\n d = p.get('duration-in-seconds')\n if d:\n return msg + \" ({})\".format(d)\n return msg\n\n def show_p(p):\n d = p.get('duration-in-seconds')\n return p['phase-type'] != 'COMPLETED' and d is None or d > 0\n\n def pc(p):\n ctx = p.get('phase-context', [])\n if len(ctx) > 0:\n if ctx[0] != ': ':\n return ctx[0]\n return None\n\n context = [pc(p) for p in phases if pc(p)]\n\n if len(context) > 0:\n self.findOrCreatePart(\"Build Context\", short=False)['value'] = \" \".join(context)\n\n pp = [fmt_p(p) for p in phases if show_p(p)]\n si['value'] = \" \".join(pp)\n\n def updateStatusInfo(self, stage_info, stage, status):\n stage_dict = OrderedDict()\n stage_delimiter = \"\\n\"\n status_delimiter = \" \"\n\n if len(stage_info) > 0:\n for part in stage_info.split(stage_delimiter):\n (cur_icon, cur_stage) = part.split(status_delimiter)\n stage_dict[cur_stage] = cur_icon\n\n # 完了ステータスの場合は上書きしない\n if not is_status_icon_already_completed(stage_dict.get(stage)):\n stage_dict[stage] = STATE_ICONS[status]\n\n part_format = '%s' + status_delimiter + '%s'\n return stage_delimiter.join([part_format % (v, k) for (k, v) in stage_dict.items()])\n\n def updatePipelineEvent(self, event):\n if event['detail-type'] == \"CodePipeline Pipeline Execution State Change\":\n state = event['detail']['state']\n if not is_status_already_completed(self.fields[0]['value']):\n self.fields[0]['value'] = state\n\n if state == 'STARTED':\n self.isStarted = True\n # 完了系ステータスの場合、同時更新をさけるために少し待つ\n if is_status_already_completed(state):\n sleep(random.random())\n return\n\n if event['detail-type'] == \"CodePipeline Stage Execution State Change\":\n stage = event['detail']['stage']\n state = event['detail']['state']\n self.updatePipelineEventStage(stage, state)\n return\n\n if event['detail-type'] == \"CodePipeline Action Execution State Change\":\n stage = event['detail']['stage']\n state = event['detail']['state']\n self.updatePipelineEventStage(stage, state)\n\n action = event['detail']['action']\n provider = event['detail']['type']['provider']\n action_state = event['detail']['state']\n self.updatePipelineEventAction(action, provider, action_state)\n return\n\n raise ValueError('event.detail-type:' + event['detail-type'] + ' is not supported.')\n\n\n def updatePipelineEventStage(self, stage, state):\n stage_info = self.findOrCreatePart('Stages')\n stage_info['value'] = self.updateStatusInfo(stage_info['value'], stage, state)\n\n def updatePipelineEventAction(self, action, provider, state):\n # TODO 未実装\n logger.info(\"updatePipelineEventAction action={}, provider={}, state={}\".format(__name__, action, provider, state))\n\n def color(self):\n return STATE_COLORS.get(self.pipelineStatus(), '#eee')\n\n def message(self):\n return [\n {\n \"fields\": self.fields,\n \"color\": self.color(),\n \"footer\": self.buildInfo.executionId,\n \"actions\": self.actions\n }\n ]\n\n\ndef is_status_already_completed(status):\n if status is None:\n return False\n\n if status == \"SUCCEEDED\":\n return True\n# rerunでの更新を考慮\n# if status == \"FAILED\":\n# return True\n if status == \"CANCELED\":\n return True\n\n return False\n\n\ndef is_status_icon_already_completed(status_icon):\n if status_icon is None:\n return False\n\n if status_icon == STATE_ICONS[\"SUCCEEDED\"]:\n return True\n# rerunでの更新を考慮\n# if status_icon == STATE_ICONS[\"FAILED\"]:\n# return True\n if status_icon == STATE_ICONS[\"CANCELED\"]:\n return True\n\n return False\n\n\n# https://docs.aws.amazon.com/codepipeline/latest/userguide/detect-state-changes-cloudwatch-events.html \nSTATE_ICONS = {\n 'STARTED': \":building_construction:\",\n 'SUCCEEDED': \":white_check_mark:\",\n 'RESUMED': \"\",\n 'FAILED': \":x:\",\n 'CANCELED': \":no_entry:\",\n 'SUPERSEDED': \"\"\n}\n\nSTATE_COLORS = {\n 'STARTED': \"#9E9E9E\",\n 'SUCCEEDED': \"good\",\n 'RESUMED': \"\",\n 'FAILED': \"danger\",\n 'CANCELED': \"\",\n 'SUPERSEDED': \"\"\n}\n\n# https://docs.aws.amazon.com/codebuild/latest/APIReference/API_BuildPhase.html\nBUILD_PHASES = {\n 'SUCCEEDED': \":white_check_mark:\",\n 'FAILED': \":x:\",\n 'FAULT': \"\",\n 'TIMED_OUT': \":stop_watch:\",\n 'IN_PROGRESS': \":building_construction:\",\n 'STOPPED': \"\"\n}\n","sub_path":"src/message_builder.py","file_name":"message_builder.py","file_ext":"py","file_size_in_byte":8279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"559795296","text":"from util.data_container import NfNuTestContainer\nfrom util.data_interface import JsonDAO\nfrom util.plot import Plotter\nimport numpy as np\n\nnfs_to_test = (2000, 4000, 10000, 100000)\nnus_to_test = (40, 80, 100, 500, 1000)\n\npaths = ['results/van_der_pol/2020-10-24-12-31-28-Nf-Nu-proportion-test/data.json',\n 'results/van_der_pol/2020-10-28-09-52-26-Nf-Nu-proportion-test/data.json',\n 'results/van_der_pol/2020-10-28-09-57-25-Nf-Nu-proportion-test/data.json',\n 'results/van_der_pol/2020-10-28-10-10-01-Nf-Nu-proportion-test/data.json',\n 'results/van_der_pol/2020-10-28-10-12-16-Nf-Nu-proportion-test/data.json']\n\nfinal_val_losses_matrixes = list()\nval_losses = {4000: {100: list()}, 10000: {80: list(), 500: list()}, 100000: {100: list(), 1000: list()}}\nval_losses_len = 6350\n\ndao = JsonDAO()\n\nfor path in paths:\n dictionary = dao.load(path)\n data_container = NfNuTestContainer()\n data_container.results = dictionary\n final_losses = data_container.get_final_val_losses(nfs_to_test, nus_to_test)\n final_val_losses_matrixes.append(final_losses)\n for nf in val_losses.keys():\n nf_dict = val_losses[nf]\n for nu in nf_dict.keys():\n val_losses[nf][nu].append(np.array(data_container.get_val_loss(nf, nu)[:val_losses_len]))\n\nlabels = list()\ny_axis_list = list()\nfor nf in val_losses.keys():\n nf_dict = val_losses[nf]\n for nu in nf_dict.keys():\n y_axis_list.append(sum(val_losses[nf][nu])/len(val_losses[nf][nu]))\n labels.append(str(nf) + 'Nf and ' + str(nu) + 'Nu')\n\nplot_matrix = sum(final_val_losses_matrixes)/len(final_val_losses_matrixes)\n\nplotter = Plotter(fontsize=11)\nfigsize=(4.5, 3.2)\n\nplotter.plot_heatmap(data=np.log10(plot_matrix),\n title='$\\log$(MSE)', # validation\n x_label='Nt',\n y_label='Nf',\n row_labels=nfs_to_test,\n col_labels=nus_to_test,\n figsize=figsize)\n\nplotter.plot(x_axis=np.linspace(1, val_losses_len, val_losses_len),\n y_axis_list=y_axis_list,\n labels=labels,\n title='MSE',\n x_label='Epoch',\n y_label=None,\n y_scale='log')\nplotter.show()\n","sub_path":"Nf_Nu_test_avg_plot.py","file_name":"Nf_Nu_test_avg_plot.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"604946081","text":"from dataclasses import dataclass\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Generator, Sequence\nfrom decimal import Decimal\n\ntry:\n import pandas as pd\nexcept ImportError:\n \"Can't find pandas, won't be able to use the functions that return DataFrames.\"\n pass\n\nfrom calcbench.api_client import _json_POST, set_field_values\nfrom calcbench.api_query_params import (\n APIQueryParams,\n CompanyIdentifiers,\n Period,\n PeriodParameters,\n)\n\n\nclass FormatType(str, Enum):\n Text = \"text\"\n\n\n@dataclass\nclass PressReleaseDataPoint:\n \"\"\"\n Corresponds to PressReleaseDataPoint on the server\n \"\"\"\n\n fact_id: int\n sec_filing_id: int\n effective_value: Decimal\n reported_value: Decimal\n range_high_value: Decimal\n reported_value: Decimal\n is_instant_value: bool\n UOM: str\n format_type: FormatType\n period_start: datetime\n period_end: datetime\n presentation_order: int\n statement_type: str\n table_id: str\n\n def __init__(self, **kwargs) -> None:\n set_field_values(self, kwargs=kwargs)\n\n\n@dataclass\nclass PressReleaseData:\n \"\"\"\n Corresponds to PressReleaseDataWrapper on the server\n \"\"\"\n\n facts: Sequence[PressReleaseDataPoint]\n entity_name: str\n entity_id: int\n ticker: str\n cik: str\n fiscal_year: int\n fiscal_period: int\n url: str\n date: datetime\n # filing_id: int\n filing_type: str\n fiscal_year_end_date: datetime\n date_reported: datetime\n\n def __init__(self, **kwargs) -> None:\n set_field_values(self, kwargs, {\"date_reported\"})\n\n\ndef press_release_raw(\n company_identifiers: CompanyIdentifiers,\n year: int,\n period: Period,\n) -> Generator[PressReleaseData, None, None]:\n\n periodParameters = PeriodParameters(\n year=year,\n period=period,\n )\n payload = APIQueryParams(\n **{\n \"companiesParameters\": {\"companyIdentifiers\": company_identifiers},\n \"periodParameters\": periodParameters,\n \"pageParameters\": {\n \"standardizeBOPPeriods\": True,\n \"allowTwoToOneTableMatching\": False,\n \"matchToPreviousPeriod\": False,\n },\n }\n )\n data = _json_POST(\"pressReleaseGroups\", payload)\n if data:\n for d in data:\n yield PressReleaseData(**d)\n\n\nCATEGORICAL_COLUMNS = [\"fiscal_period\", \"UOM\", \"statement_type\"]\n\n\ndef press_release_data(\n company_identifiers: CompanyIdentifiers,\n year: int,\n period: Period,\n) -> \"pd.DataFrame\":\n filings = press_release_raw(\n company_identifiers=company_identifiers,\n year=year,\n period=period,\n )\n\n df = pd.DataFrame(\n [\n {\n **fact,\n **{\n \"ticker\": filing.ticker,\n \"CIK\": filing.cik,\n \"date_reported\": filing.date_reported,\n },\n }\n for filing in filings\n for fact in filing.facts\n ]\n )\n if df.empty:\n return df\n for c in CATEGORICAL_COLUMNS:\n df[c] = pd.Categorical(df[c])\n return df\n","sub_path":"calcbench/press_release.py","file_name":"press_release.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"336401127","text":"\"\"\"\nA command line interface to create reporec data.\n\"\"\"\nimport os\nimport json\nimport yaml\nimport argparse\n\nimport pandas as pd\n\nimport reporec\n\n# Options\nparser = argparse.ArgumentParser(description='A CLI for the Repository Record.')\nparser.add_argument(\"config_file\", help=\"A configuration file to use.\")\nparser.add_argument(\"--dir\", default=\"rrdata\", type=str, help=\"Folder to add the data to.\")\n\n\ndef read_config_file(fname):\n \"\"\"Reads a JSON or YAML file.\n \"\"\"\n if fname.endswith(\".yaml\") or fname.endswith(\".yml\"):\n rfunc = yaml.load\n elif fname.endswith(\".json\"):\n rfunc = json.load\n else:\n raise TypeError(\"Did not understand file type {}.\".format(fname))\n\n with open(fname, \"r\") as handle:\n ret = rfunc(handle)\n\n return ret\n\n\ndef main():\n\n args = vars(parser.parse_args())\n\n # Handle paths\n config_path = os.path.join(os.getcwd(), args[\"config_file\"])\n config = read_config_file(config_path)\n\n directory = os.path.join(os.getcwd(), args[\"dir\"])\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n for proj, records in config.items():\n write_path = os.path.join(directory, proj)\n\n # Handle conda\n conda_path = write_path + \"-conda.csv\"\n conda_df = None\n if os.path.exists(conda_path):\n conda_df = pd.read_csv(conda_path)\n\n # Handle github\n github_path = write_path + \"-github.csv\"\n github_df = None\n if os.path.exists(github_path):\n github_df = pd.read_csv(github_path)\n\n # Loop over records\n for num, r in enumerate(records):\n\n missing = {\"type\", \"username\"} - r.keys()\n if len(missing):\n raise KeyError(\"Did not find keys '{}' for record {}:{}\".format(missing, proj, num))\n\n username = r[\"username\"]\n repository = r.get(\"repository\", proj)\n\n if r[\"type\"].lower() == \"conda\":\n conda_df = reporec.conda.build_table(username, repository, old_data=conda_df)\n\n elif r[\"type\"].lower() == \"github\":\n github_df = reporec.github.build_table(username, repository, old_data=github_df)\n else:\n raise KeyError(\"Did not understand type key '{}'.\".format(r[\"type\"]))\n\n # Write it out\n if github_df is not None:\n github_df.sort_values(by=[\"timestamp\"], inplace=True)\n github_df.to_csv(github_path, index=False)\n\n if conda_df is not None:\n conda_df.sort_values(by=[\"timestamp\"], inplace=True)\n conda_df.to_csv(conda_path, index=False)\n\n print(\"Finished project '{}'\".format(proj))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"reporec/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"145726864","text":"import MySQLdb\nimport _mysql\nimport os\nimport webapp2\n\nclass Page(webapp2.RequestHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'text/plain'\n\n if (os.getenv('SERVER_SOFTWARE') and\n os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):\n db = MySQLdb.connect(unix_socket='/cloudsql/whats-4-dinner-1199:instance', user='root')\n else:\n db = MySQLdb.connect(host='localhost', user='root')\n\n db.query('SHOW VARIABLES')\n result = db.store_result()\n while True:\n row = result.fetch_row()\n if row:\n self.response.write('%s\\n' % str(row[0]))\n else:\n break\n\n db.close()\n\napp = webapp2.WSGIApplication([\n ('/mysql', Page),\n ])\n","sub_path":"app_mysql.py","file_name":"app_mysql.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"239394556","text":"#! python\nimport sys\nimport math\nimport euler\n\ndef main():\n begin = 3\n end = 1001\n step = 2\n sum = 1\n for n in range(begin, end + 1, step):\n ne = n ** 2\n nw = ne - n + 1\n sw = nw - n + 1\n se = sw - n + 1\n sum += ne + nw + sw + se\n print(sum)\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"p28.py","file_name":"p28.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"77597876","text":"class Chapter:\n title = ''\n\n def __init__(self, title):\n self.title = title\n\n\nchapters = [\n Chapter('All Hail The King'),\n Chapter('All Kings Die'),\n Chapter('Return Of The Lord'),\n]\n\nold_chapter = chapters[1]\nprint('Before Replacement')\nfor chapter in chapters:\n print(chapter.title)\n\nnew_chapter = Chapter('The New Chapter Begins')\nindex_to_replace = chapters.index(old_chapter)\nchapters[index_to_replace] = new_chapter\nprint('\\n\\nAfter Replacement')\nfor chapter in chapters:\n print(chapter.title)\n\n\n\n\n\n\n\n","sub_path":"Others/replacing_object_inside_list.py","file_name":"replacing_object_inside_list.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"341566991","text":"from Indigo.portfolio.PortfolioBase import StockPortfolio\nfrom Indigo.event.Events import *\nfrom Indigo.indicator.Indicator import *\nfrom Indigo.setting import *\n\n\nclass ETF_Portfolio(StockPortfolio):\n def __init__(self, events, statistics, cash):\n super(ETF_Portfolio, self).__init__(events, statistics)\n self.cash = cash\n\n def update_signal(self, signalevent, feed):\n instrument = signalevent.tick.instrument\n df = feed.get_latest_bars(instrument, 120)\n ma10 = Indicator().ma_list(df, 10).iloc[-1, 1]\n ma20 = Indicator().ma_list(df, 20).iloc[-1, 1]\n ma20_max = df.iloc[-20, :][FIELD_HIGH].max()\n ma20_min = df.iloc[-20, :][FIELD_LOW].min()\n ma120_max = df[FIELD_HIGH].max()\n ma120_min = df[FIELD_LOW].min()\n\n isInPortfolio = self._is_in_porforlio(instrument)\n # Signal为多头\n if signalevent.direction is FIELD_DIRECTION_LONG:\n per_pos = round((ma120_max - ma20) / (ma120_max - ma120_min), 2)\n buy_amount = per_pos * self.get_position() - self.portfolio[PORTFOLIO_MARKET_VALUE].sum()\n # 分配购买仓位\n if buy_amount > 0:\n ask_price = signalevent.tick.ask_price\n per_vol = round((ma10 - ask_price) / (ma10 - ma20_min))\n # 低于20日最低值全仓购入\n if ask_price < ma20_min:\n buy_volume = buy_amount / (ask_price * 100)\n # 1手以上起购\n if buy_volume >= 1:\n buy_volume = round(buy_volume) * 100\n self.events.put(OrderEvent(signalevent, buy_volume, FIELD_MKT))\n # 高于20日最低值且小于10日均值\n elif ask_price < ma10 and ask_price > ma20_min:\n buy_volume = buy_amount * per_vol / (ask_price * 100)\n # 1手以上起购\n if buy_volume >= 1:\n buy_volume = round(buy_volume) * 100\n self.events.put(OrderEvent(signalevent, buy_volume, FIELD_MKT))\n # Signal为空头\n elif signalevent.direction is FIELD_DIRECTION_SHORT and isInPortfolio:\n index = self._instrument_index(instrument)\n sell_volume = self.portfolio.loc[index, PORTFOLIO_VOLUME]\n cost_price = self.portfolio.loc[index, PORTFOLIO_COST_PRICE]\n bid_price = signalevent.tick.bid_price\n # 高于20日最高值清仓\n if bid_price >= ma20_max and bid_price > cost_price:\n self.events.put(OrderEvent(signalevent, sell_volume, FIELD_MKT))\n # 高于10日均值且小于20日最高值\n elif bid_price > ma10 and bid_price < ma20_max and bid_price > cost_price:\n per_vol = round((ma20_max - bid_price) / (ma20_max - ma10))\n sell_volume = sell_volume * per_vol / 100\n # 1手以上起购\n if sell_volume >= 1:\n sell_volume = round(sell_volume) * 100\n self.events.put(OrderEvent(signalevent, sell_volume, FIELD_MKT))\n\n def update_detail(self, fillevent):\n if self._is_in_porforlio(fillevent.tick.instrument):\n index = self._instrument_index(fillevent.tick.instrument)\n if self.portfolio.loc[index, PORTFOLIO_DETAIL] is not {}:\n if self.portfolio.loc[index, PORTFOLIO_VOLUME] > 0:\n if fillevent.direction is FIELD_DIRECTION_LONG:\n self.portfolio.loc[index, PORTFOLIO_DETAIL]['pre_buy_price'] = fillevent.trade_price\n # 多头时上一次卖价置空\n self.portfolio.loc[index, PORTFOLIO_DETAIL]['pre_sell_price'] = None\n elif fillevent.direction is FIELD_DIRECTION_SHORT:\n self.portfolio.loc[index, PORTFOLIO_DETAIL]['pre_sell_price'] = fillevent.trade_price\n # 空头时上一次买价置空\n self.portfolio.loc[index, PORTFOLIO_DETAIL]['pre_buy_price'] = None\n else:\n if fillevent.direction is FIELD_DIRECTION_LONG:\n detail = {'pre_buy_price': None, 'pre_sell_price': None}\n detail['pre_buy_price'] = fillevent.trade_price\n print(self.portfolio.loc[index, PORTFOLIO_DETAIL])\n self.portfolio.loc[index, PORTFOLIO_DETAIL] = detail\n\n\nif __name__ == '__main__':\n from Indigo.statistics.Statistics import Statistics\n\n st=Statistics()\n\n p = ETF_Portfolio(events, st, 400000)\n\n from Indigo.data.Tick import Tick\n\n print('cash:', p.cash, '\\tposition', p.get_position())\n\n f0 = FillEvent(\n OrderEvent(SignalEvent(MarketEvent(Tick('000001', '平安银行', '2018-04-26', 10.85, 11.86)), FIELD_DIRECTION_LONG),\n 4000,\n FIELD_MKT),\n '2018-04-26', '09:39:20', 10.86)\n f1 = FillEvent(\n OrderEvent(SignalEvent(MarketEvent(Tick('600000', '浦发银行', '2018-04-26', 11.65, 11.66)), FIELD_DIRECTION_LONG),\n 2000,\n FIELD_MKT),\n '2018-04-26', '13:39:20', 11.66)\n f2 = FillEvent(\n OrderEvent(SignalEvent(MarketEvent(Tick('600000', '浦发银行', '2018-04-27', 11.69, 11.70)), FIELD_DIRECTION_LONG),\n 4000,\n FIELD_MKT),\n '2018-04-27', '10:39:20', 11.7)\n f3 = FillEvent(\n OrderEvent(SignalEvent(MarketEvent(Tick('600000', '浦发银行', '2018-04-30', 15, 15.1)), FIELD_DIRECTION_SHORT),\n 6000,\n FIELD_MKT),\n '2018-04-30', '13:39:20', 15)\n\n f0.set_commision(0.00025, 0.001)\n f1.set_commision(0.00025, 0.001)\n f2.set_commision(0.00025, 0.001)\n f3.set_commision(0.00025, 0.001)\n\n m1 = MarketEvent(Tick('600000', '浦发银行', '2018-04-28', 12, 13))\n\n print(p.portfolio)\n\n print('--------')\n p.update_fill(f1)\n print(p.portfolio)\n print('--------')\n p.update_fill(f0)\n print(p.portfolio)\n print('--------')\n p.update_fill(f2)\n print(p.portfolio)\n print('--------')\n p.update_fill(f3)\n print(p.portfolio)\n print('--------')\n p.update_fill(f1)\n print(p.portfolio)\n print('--------')\n p.update_fill(f2)\n print(p.portfolio)\n print('--------')\n p.update_fill(f3)\n print(p.portfolio)\n\n print('--------')\n print('cash:', p.cash, '\\tposition', p.get_position())\n","sub_path":"Indigo/portfolio/ETF_Portfolio.py","file_name":"ETF_Portfolio.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"63670954","text":"import os\nimport datetime\nimport threading\nimport inspect\nimport logging\n\n# pman local dependencies\nfrom .message import Message\n\nlogging.basicConfig(level=logging.DEBUG,\n format='(%(threadName)-10s) %(message)s')\n\nclass debug(object):\n \"\"\"\n A simple class that provides some helper debug functions. Mostly\n printing function/thread names and checking verbosity level\n before printing.\n \"\"\"\n\n def log(self, *args):\n \"\"\"\n get/set the log object.\n\n Caller can further manipulate the log object with object-specific\n calls.\n \"\"\"\n if len(args):\n self._log = args[0]\n else:\n return self._log\n\n def name(self, *args):\n \"\"\"\n get/set the descriptive name text of this object.\n \"\"\"\n if len(args):\n self.__name = args[0]\n else:\n return self.__name\n\n def __init__(self, **kwargs):\n \"\"\"\n Constructor\n \"\"\"\n\n self.verbosity = 0\n self.level = 0\n\n self.b_useDebug = False\n self.str_debugDirFile = '/tmp'\n for k, v in kwargs.items():\n if k == 'verbosity': self.verbosity = v\n if k == 'level': self.level = v\n if k == 'debugToFile': self.b_useDebug = v\n if k == 'debugFile': self.str_debugDirFile = v\n\n if self.b_useDebug:\n str_debugDir = os.path.dirname(self.str_debugDirFile)\n str_debugName = os.path.basename(self.str_debugDirFile)\n if not os.path.exists(str_debugDir):\n os.makedirs(str_debugDir)\n self.str_debugFile = '%s/%s' % (str_debugDir, str_debugName)\n self.debug = Message(logTo = self.str_debugFile)\n self.debug._b_syslog = False\n self.debug._b_flushNewLine = True\n self._log = Message()\n self._log._b_syslog = True\n self.__name = \"pman\"\n\n\n def __call__(self, *args, **kwargs):\n self.qprint(*args, **kwargs)\n\n def qprint(self, *args, **kwargs):\n \"\"\"\n The \"print\" command for this object.\n\n :param kwargs:\n :return:\n \"\"\"\n\n self.level = 0\n self.msg = \"\"\n\n for k, v in kwargs.items():\n if k == 'level': self.level = v\n if k == 'msg': self.msg = v\n\n if len(args):\n self.msg = args[0]\n\n if self.b_useDebug:\n write = self.debug\n else:\n write = print\n\n if self.level <= self.verbosity:\n\n if self.b_useDebug:\n write('| %50s | %30s | ' % (\n threading.current_thread(),\n inspect.stack()[1][3]\n ), end='', syslog = True)\n else:\n write('%26s | %50s | %30s | ' % (\n datetime.datetime.now(),\n threading.current_thread(),\n inspect.stack()[1][3]\n ), end='')\n for t in range(0, self.level): write(\"\\t\", end='')\n write(self.msg)","sub_path":"pfcon/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"191281820","text":"#!/usr/bin/python3\nfrom math import exp\nfrom random import random\nfrom random import seed\n\nimport pickle\nimport time\n\n\n# Initializes a neural network of a given layer sizes\n# Network is an array of layers, where each layer is an array of dictionaries,\n# containing weights, outputs, etc. of each neuron\ndef initialize_network(n_inputs,n_hidden,n_outputs):\n network = list()\n\n # for each neuron in hidden layer initializes (n_inputs+1) weights\n # n_inputs for each input neuron and 1 for bias\n hidden_layer = [{'weights': [random() for i in range(n_inputs+1)]} for i in range(n_hidden)]\n network.append(hidden_layer)\n # for each neuron in output layer initializes (n_hidden+1) weights\n # n_hidden for each hidden layer neuron and 1 for bias\n output_layer = [{'weights': [random() for i in range(n_hidden+1)]} for i in range(n_outputs)]\n network.append(output_layer)\n return network\n\n\n# Calculates activation ie. net input of a neuron (weights * inputs)\ndef activate(weights, inputs):\n activation = weights[-1]\n for i in range(len(weights)-1):\n activation += weights[i] * inputs[i]\n return activation\n\n\n# Applies a transfer function to the activation (net input)\ndef transfer(activation):\n return 1.0/(1.0 + exp(-activation))\n\n\n# Forward propagates an input through all layers\n# Loops thorough neurons and saves their outputs as records in dictionaries 'outputs'\ndef forward_propagate(network, input_vector):\n inputs = input_vector\n for layer in network:\n new_inputs = []\n for neuron in layer:\n activation = activate(neuron['weights'], inputs)\n neuron['output'] = transfer(activation)\n new_inputs.append(neuron['output'])\n inputs = new_inputs\n return inputs\n\n\n# Calculates transfer derivative - rate of change of neuron's output function\ndef transfer_derivative(output):\n return output*(1.0-output)\n\n\n# Back propagates and error\n# Error is calculated with following equation: error = (expected - output) * transfer_derivative(output)\n# For hidden layer neurons is takes another form: error = (weight_k * error_j) * transfer_derivative(output)\n# It takes this form because we have to trace how much impact this neuron weight does\n# by following its connections to output. Weight_k is weight of connection to output neuron and error_j\n# is calculated error of that neuron\n# In function we define neuron delta, which reflects the change the error implies on a neuron\ndef back_propagate_error(network, expected_output):\n # Iterating through layers in reverse\n for i in reversed(range(len(network))):\n layer = network[i]\n errors = list()\n # If not the output layer\n if i != len(network)-1:\n # For every neuron in hidden layer\n for j in range(len(layer)):\n error = 0.0\n # For every neuron in next layer (every connection forward)\n for neuron in network[i+1]:\n error += (neuron['weights'][j] * neuron['delta'])\n errors.append(error)\n else:\n for j in range(len(layer)):\n neuron = layer[j]\n errors.append(expected_output[j] - neuron['output'])\n for j in range(len(layer)):\n neuron = layer[j]\n neuron['delta'] = errors[j] * transfer_derivative(neuron['output'])\n\n\n# Updates weights of the network with given formula:\n# weight = weight + learning_rate * error * input\n# Where error is delta of given neuron and input is the activation of a neuron (net input) - value that cause the error\ndef update_weights(network, input_vector, learning_rate):\n # For every layer in neural network\n for i in range(len(network)):\n # inputs = input_vector[:-1] ??????\n inputs = input_vector\n # If not hidden layer\n if i != 0:\n # Neuron inputs are outputs of connected neurons from previous layer\n inputs = [neuron['output'] for neuron in network[i-1]]\n # For neurons in layer\n for neuron in network[i]:\n # For every weight\n for j in range(len(inputs)):\n neuron['weights'][j] += learning_rate * neuron['delta'] * inputs[j]\n # Update bias\n neuron['weights'][-1] += learning_rate * neuron['delta']\n\n\n# Function training network for a specified number of epochs with given inputs and outputs\n# Outputs must be already converted to a one hot encoding\ndef train(network, train_inputs, train_outputs, train_out, learning_rate, n_epochs):\n for epoch in range(n_epochs):\n sum_error = 0\n good_guesses = 0\n # For every row in training data set\n for i in range(len(train_inputs)):\n outputs = forward_propagate(network, train_inputs[i])\n sum_error += sum([(train_outputs[i][j] - outputs[j])**2 for j in range(len(train_outputs[i]))])\n back_propagate_error(network, train_outputs[i])\n update_weights(network, train_inputs[i], learning_rate)\n if get_result(outputs) == train_out[i]:\n good_guesses += 1\n if i % 1000 == 0:\n print(good_guesses)\n print('>epoch=%d, learning_rate=%.3f, error=%.3f' % (epoch, learning_rate, sum_error), file=open(\"log\", \"a\"))\n\n\n# Converts output to single float (from one hot)\ndef get_result(output):\n return output.index(max(output))\n\n\nseed(time.time())\n\n# Load data set\nwith open(\"pickled_mnist.pkl\", \"br\") as fh:\n data = pickle.load(fh)\ntrain_imgs = data[0]\ntest_imgs = data[1]\ntrain_labels = data[2]\ntest_labels = data[3]\ntrain_labels_one_hot = data[4]\ntest_labels_one_hot = data[5]\n\n# Train network\nnetwork = initialize_network(784, 100, 10)\n\n\n# Save empty network to file\nwith open(\"net_empty.pkl\", \"bw\") as fh:\n data = network\n pickle.dump(data, fh)\nnet100 = network\nprint(\"Training for 100 epochs...\")\ntrain(net100, test_imgs, test_labels_one_hot, test_labels, 0.01, 100)\n# Save to file\nwith open(\"net_100.pkl\", \"bw\") as fh:\n data = net100\n pickle.dump(data, fh)\n\nnet1000 = network\ntrain(net1000, train_imgs, train_labels, train_labels_one_hot, 0.01, 1000)\n# Save to file\nwith open(\"net_1000.pkl\", \"bw\") as fh:\n data = net1000\n pickle.dump(data, fh)\n\nnet10000 = network\ntrain(net10000, train_imgs, train_labels, train_labels_one_hot, 0.01, 10000)\nwith open(\"net_10000.pkl\", \"bw\") as fh:\n data = net10000\n pickle.dump(data, fh)\n","sub_path":"digits_server.py","file_name":"digits_server.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"5205089","text":"from unittest.mock import Mock\n\nimport pytest\nfrom evolution import __version__\nfrom evolution.evolution import event, send_message\n\n\nclass AsyncMock(Mock):\n async def __call__(self, *args, **kwargs):\n return super(AsyncMock, self).__call__(*args, **kwargs)\n\n\ndef test_version():\n assert __version__ == \"0.1.0\"\n\n\ndef test_event():\n topic = \"test\"\n data = {\"a\": 10, \"b\": 9.9, \"c\": \"data\"}\n expected = '{\"topic\": \"test\", \"a\": 10, \"b\": 9.9, \"c\": \"data\"}'\n assert event(topic, data) == expected\n\n\n@pytest.mark.asyncio\nasync def test_send_message_with_no_clients():\n clients = set()\n assert await send_message(clients, None, None) is None\n\n\n@pytest.mark.asyncio\nasync def test_send_message_sends_to_multiple_clients():\n ws1 = type(\"\", (), {})()\n ws1.send = AsyncMock()\n\n ws2 = type(\"\", (), {})()\n ws2.send = AsyncMock()\n\n clients = set()\n clients.add(ws1)\n clients.add(ws2)\n\n await send_message(clients, \"test\", {\"a\": 1})\n expected_call = '{\"topic\": \"test\", \"a\": 1}'\n\n for client in clients:\n client.send.assert_called_once_with(expected_call)\n","sub_path":"tests/test_evolution.py","file_name":"test_evolution.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"443791885","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 월이라는 변수값을 48584로 지정한다.\n월 = 48584\n# 총금액이라는 변수값을 월 * 36으로 지정한다.\n총금액 = 월 * 36\n# 총금액 변수값을 print함수를 사용해 출력한다.\nprint(총금액)\n\n","sub_path":"ex020.py","file_name":"ex020.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"4848454","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 27 13:12:30 2016\n\n@author: andrej\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom cycler import cycler\nimport pandas as pd\nimport os\n\nplt.style.use('ggplot')\nmpl.rcParams['font.size'] = 16\nmpl.rcParams['axes.facecolor'] = 'white'\n#mpl.rcParams['axes.grid'] = True\n\n#default_colors = mpl.rcParams['axes.color_cycle']\n#mpl.rcParams['axes.color_cycle'] = ['#5DBA42', '#42BAB2', '#E24A33',\n# '#777777', '#348ABD', '#FBC15E', '#E27533']\n# followoing hex-codes I would describe as:\n# ['#5DBA42', '#42BAB2', '#E24A33','#777777', '#348ABD', '#FBC15E', '#E27533']\n# 'green', 'turquise', 'red', 'dark gray', 'blue', 'yellow'\nmpl.rcParams['axes.prop_cycle'] = cycler('color',['#E24A33', '#348ABD' , '#777777',\n '#5DBA42','#42BAB2', '#FBC15E', '#E27533'])\n\n#E24A33', '#348ABD', '#988ED5', '#777777', '#FBC15E', '#8EBA42', '#FFB5B8']),\n\nfiles_path = './figures/'\n\n#%% ifg data from bmi website and fragdenstaat.de admin page\n\nyears = ['2006','2007','2008','2009','2010','2011','2012','2013','2014','2015']\n\ntotal = np.array([[2278,1265,1548,1358,1556,3280,6077,4736,8703,9376]])\nbmf = np.array([[171,135,493,295,245,1179,2967,1390,5347,4170]])\n\nfds = np.array([[0,0,0,0,0,352,2405,2416,3049,4017]])\nfds_nurBund = np.array([[0,0,0,0,0,349,2253,1793,1627,2515]])\nwhout_bmf = (total-bmf)-fds_nurBund\n\nfds_in_percent = (fds_nurBund/total)*100\n\ndata = np.concatenate((bmf,fds_nurBund,whout_bmf),axis=0)\n\n#%% plot\n\ndf = pd.DataFrame(data.T,columns=['BM Finanzen','FragDenStaat','Gesamt ohne BMF und FDS'],\n index = years)\nax = df.plot(kind='area',figsize=(16,12),alpha=0.9)\nax.xaxis.set_tick_params(top='off')\nax.yaxis.set_tick_params(right='off')\nax.annotate(\"Start von FragDenStaat (Oktober 2011)\",\n xy=(4.5,2500), xycoords='data',\n size=24,\n ha=\"center\", va=\"center\",\n xytext=(3, 5000), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n linewidth=2,\n fc=\"k\",ec=\"k\",\n connectionstyle=\"arc3,rad=0.2\")\n )\nax.grid(b=True, which='major', color=[0.5,0.5,0.5], linestyle='-')\nplt.xlabel('Jahr')\nplt.ylabel('Anzahl von Anfragen')\n\nax.legend_.set_bbox_to_anchor([1.01,1.0])\nlgd = ax.get_legend()\n#plt.ylim(0,np.max(data.sum(axis=0)))\nplt.ylim(0,10000)\n\n#%% save figure\n\nif os.path.isdir(files_path):\n plt.savefig(files_path+'ifg_gesamtzahlen.png',bbox_extra_artists=[lgd],\n bbox_inches='tight',dpi=150)\nelse:\n os.mkdir(files_path)\n plt.savefig(files_path+'ifg_gesamtzahlen.png',bbox_extra_artists=[lgd],\n bbox_inches='tight',dpi=150)\nplt.close()\n\n\n\n\n","sub_path":"official_ifg_numbers.py","file_name":"official_ifg_numbers.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"653583058","text":"import collections\nimport pandas as pd\nimport os\nimport numpy as np\nfrom functools import reduce\nimport Analytics.series_utils as s_util\nSU = s_util.date_utils_collection()\nimport panormus.data.bo.econ as econ\nconn = econ.get_instance_using_credentials()\nfrom datetime import datetime,timedelta\nimport pickle\n\nTEMP_DIR = os.environ['TEMP']\n\nclass swf:\n def __init__(self, local_sample=('1943-01-01', '2025-01-01'),short_name=None):\n #check if it has been run before already:\n #if not short_name is None:\n #self.temp_pickle_dir = os.path.join(TEMP_DIR, short_name+'.pickle')\n wf = collections.OrderedDict()\n self.mysmpl = local_sample\n self.genr_empty_df()\n self.genr_empty_df_m()\n self.genr_empty_df_q()\n self.df = collections.OrderedDict()\n self.alpha = collections.OrderedDict()\n self.combo_indicator = collections.OrderedDict()\n\n def importts(self, dir, filetype='csv', repeat=True,to_freq='bday',date_parse=False,date_format='%d/%m/%Y',customColName=None,force_override = True):\n if filetype == 'csv':\n df_raw = pd.read_csv(dir, header=0, index_col=0)\n if date_parse:\n df_raw.index = pd.to_datetime(df_raw.index, format=date_format)\n\n if len(df_raw.index) < 0.001:\n _, tail = os.path.split(dir)\n new_name = tail.split('.')[0]\n if not force_override:\n while new_name in self.df.keys():\n new_name = new_name + '_new'\n self.df[new_name] = self.empty_df\n\n else:\n df_raw.index = pd.to_datetime(df_raw.index)\n if not (customColName is None):\n df_raw.columns = customColName\n for s in df_raw.columns.tolist():\n this_df = df_raw[[s]]\n # should use conversion down to bday here!!!\n if to_freq=='bday':\n this_df = SU.conversion_to_bDay(this_df)\n this_df = pd.merge(self.empty_df, this_df, left_index=True, right_index=True, how='left')\n this_df = this_df[[s]]\n elif to_freq=='M':\n this_df = SU.conversion_down_to_m(this_df)\n this_df = pd.merge(self.empty_df_m, this_df, left_index=True, right_index=True, how='left')\n this_df = this_df[[s]]\n elif to_freq=='Q':\n this_df = SU.conversion_to_q(this_df)\n this_df = pd.merge(self.empty_df_q, this_df, left_index=True, right_index=True, how='left')\n this_df = this_df[[s]]\n else:\n print ('Frequency does not identify')\n raise ValueError\n if repeat:\n first_idx = this_df.first_valid_index()\n last_idx = this_df.last_valid_index()\n this_df = SU.repeat_value(this_df,first_idx,last_idx)\n new_name = s\n if not force_override:\n while new_name in self.df.keys():\n new_name = new_name + '_new'\n self.df[new_name] = this_df\n elif filetype == 'EconDB':\n if not isinstance(dir,list):\n print ('Sorry, the database ticker should be a list! ')\n df_raw = conn.get(dir)\n\n if len(df_raw.index) < 0.001:\n print ('Sorry, the ticker is not found in EconDB')\n raise ValueError\n\n else:\n df_raw.index = pd.to_datetime(df_raw.index)\n for s in df_raw.columns.tolist():\n this_df = df_raw[[s]]\n # should use conversion down to bday here!!!\n if to_freq=='bday':\n this_df = SU.conversion_to_bDay(this_df)\n this_df = pd.merge(self.empty_df, this_df, left_index=True, right_index=True, how='left')\n this_df = this_df[[s]]\n elif to_freq=='M':\n this_df = SU.conversion_down_to_m(this_df)\n this_df = pd.merge(self.empty_df_m, this_df, left_index=True, right_index=True, how='left')\n this_df = this_df[[s]]\n elif to_freq=='Q':\n this_df = SU.conversion_to_q(this_df)\n this_df = pd.merge(self.empty_df_q, this_df, left_index=True, right_index=True, how='left')\n this_df = this_df[[s]]\n else:\n print ('Frequency does not identify')\n raise ValueError\n if repeat:\n first_idx = this_df.first_valid_index()\n last_idx = this_df.last_valid_index()\n this_df = SU.repeat_value(this_df,first_idx,last_idx)\n new_name = s\n if not force_override:\n while new_name in self.df.keys():\n new_name = new_name + '_new'\n self.df[new_name] = this_df\n\n def genr_empty_df(self):\n date_rng = pd.date_range(start=self.mysmpl[0], end=self.mysmpl[1], freq='B')\n self.empty_df = pd.DataFrame(index=date_rng, columns=[0])\n\n def genr_empty_df_m(self):\n date_rng = SU.empty_M_df()\n self.empty_df_m = pd.DataFrame(index=pd.to_datetime(date_rng['Date']), columns=[0])\n self.empty_df_m = SU.conversion_to_FOM(self.empty_df_m.loc[self.mysmpl[0]:self.mysmpl[1],:])\n\n def genr_empty_df_q(self):\n date_rng = SU.empty_Q_df()\n self.empty_df_q = pd.DataFrame(index=pd.to_datetime(date_rng['Date']), columns=[0])\n self.empty_df_q = SU.conversion_to_FOM(self.empty_df_q.loc[self.mysmpl[0]:self.mysmpl[1], :])\n self.empty_df_q.loc[:,:] = 0\n\n def genr(self, name, const=0):\n df = self.empty_df.copy()\n new_name = name\n while new_name in self.df.keys():\n new_name = new_name + '_new'\n df.columns = [new_name]\n df = const\n self.df[new_name] = df\n return df\n\n def pool_genr(self, pool, poolname, prefix='', suffix='', const=np.nan):\n df = self.empty_df.copy()\n for iso in pool:\n col_name = prefix + iso + suffix\n df[col_name] = const\n df.drop(0, inplace=True)\n while poolname in self.df.keys():\n poolname = poolname + '_new'\n self.df[poolname] = df\n return self.df[poolname]\n\n def pool_makegroup(self, pool, poolname, prefix='', suffix=''):\n for iso in pool:\n key = prefix + iso + suffix\n if not key in self.df.keys():\n print('Sorry, can not make group if the series is not in the wf: ', key)\n raise ValueError\n dfs = [self.df[prefix + iso + suffix] for iso in pool]\n if len(dfs) == 1:\n df_pool = dfs[0]\n else:\n df_pool = reduce(lambda df1, df2: pd.merge(df1, df2, left_index=True, right_index=True, how='outer'), dfs)\n while poolname in self.df.keys():\n poolname = poolname + '_new'\n self.df[poolname] = df_pool\n return self.df[poolname]\n\n def update_df(self, df_name, new_df,to_freq='bday',repeat='True'):\n new_df.index = pd.to_datetime(new_df.index)\n # should use conversion down to bday here!!!\n if to_freq == 'bday':\n new_df = SU.conversion_to_bDay(new_df)\n new_df = pd.merge(self.empty_df, new_df, left_index=True, right_index=True, how='left')\n new_df = new_df.drop([0], axis=1)\n elif to_freq == 'M':\n new_df = SU.conversion_down_to_m(new_df)\n new_df = pd.merge(self.empty_df_m, new_df, left_index=True, right_index=True, how='left')\n new_df = new_df.drop([0], axis=1)\n\n elif to_freq == 'Q':\n new_df = SU.conversion_to_q(new_df)\n new_df = pd.merge(self.empty_df_q, new_df, left_index=True, right_index=True, how='left')\n new_df = new_df.drop([0], axis=1)\n else:\n print('Frequency does not identify')\n raise ValueError\n if repeat:\n for s in new_df.columns:\n last_idx = new_df.loc[:, [s]].last_valid_index()\n new_df.loc[new_df.index <= last_idx, s].fillna(method='ffill', inplace=True)\n\n self.df[df_name] = new_df\n\n def add_df(self, df_name, new_df, repeat=True,to_freq='bday',force_override = True):\n if not force_override:\n while df_name in self.df.keys():\n df_name = df_name + '_new'\n new_df.index = pd.to_datetime(new_df.index)\n else:\n if df_name in self.df.keys():\n print (df_name, ' is already in the df keys')\n del self.df[df_name]\n\n # should use conversion down to bday here!!!\n if to_freq == 'bday':\n new_df = SU.conversion_to_bDay(new_df)\n new_df = pd.merge(self.empty_df, new_df, left_index=True, right_index=True, how='left')\n new_df = new_df.drop([0],axis=1)\n elif to_freq == 'M':\n new_df = SU.conversion_down_to_m(new_df)\n new_df = pd.merge(self.empty_df_m, new_df, left_index=True, right_index=True, how='left')\n new_df = new_df.drop([0],axis=1)\n\n elif to_freq == 'Q':\n new_df = SU.conversion_to_q(new_df)\n new_df= pd.merge(self.empty_df_q, new_df, left_index=True, right_index=True, how='left')\n new_df= new_df.drop([0],axis=1)\n else:\n print('Frequency does not identify')\n raise ValueError\n if repeat:\n for s in new_df.columns:\n last_idx = new_df.loc[:, [s]].last_valid_index()\n new_df.loc[new_df.index <= last_idx, s].fillna(method='ffill', inplace=True)\n\n self.df[df_name] = new_df\n\n def create_indicator_group_folder(self, indicator_dir):\n \"\"\"\n Tearsheet is saved to the reporting folder\n \"\"\"\n destination = indicator_dir\n\n self.create_folder(destination)\n assert os.path.isdir(destination)\n assert os.path.exists(destination)\n\n def create_folder(self, path):\n \"Creates all folders necessary to create the given path.\"\n try:\n if not os.path.exists(path):\n os.makedirs(path)\n except IOError as io:\n print(\"Cannot create dir %s\" % path)\n raise io\n\ndef initialise_wf(temp_pickle,hours=12):\n if os.path.exists(temp_pickle):\n if os.path.getmtime(temp_pickle) > datetime.timestamp(datetime.now() - timedelta(hours=hours)):\n with open(temp_pickle, 'rb') as handle:\n local_db = pickle.load(handle)\n return local_db\n else:\n return swf()\n else:\n return swf()\n\ndef use_cached(key_list,use_dict):\n '''\n this decorator is used to check if the list of keys are already in the dictionary. if yes, the function need not to be re-run, to save some time\n :param key_list: a list of keys, i.e.['Forward_sum_Z','credit_ngdp_12_z']\n :param use_dict: the dictionary used in cache\n :return:\n '''\n def decorator(func):\n def wrapper(*args,**kwargs):\n if len([i for i in key_list if i not in use_dict.df.keys()])<0.01:\n return\n else:\n return func(*args, **kwargs)\n return wrapper\n return decorator\n\n\n # check if the key are all in the dictionary, if yes, return, else, run the function\n print ('pass this function!')\n pass\n\nclass indicator_tree:\n # a indicator tree is a tree class that put signal node into parent-child type structure\n # signal node is a node object that contain 3 things: name, collection of z_score, collection of raw_data\n def __init__(self):\n pass\n\n def get_df(self):\n pass\n\n def print_all_sub(self):\n pass\n\n def nodes(self):\n pass\n\nclass signal_node:\n def __init__(self):\n pass\n\n def signal_name(self):\n pass\n\n ","sub_path":"Caxton/panormus_OLD/wfcreate.py","file_name":"wfcreate.py","file_ext":"py","file_size_in_byte":12300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"314674816","text":"\nA=[6, 5, 9, 1, 5, 3, 4, 10, 4, 1, 2, 4, 1, 50]\n\ndef merge_sort(input):\n print(\"len(input): \", len(input))\n if len(input)==1:\n return input\n output=[]\n firsthalf = input[0:(len(input)//2)]\n secondhalf = input[len(input)//2:]\n print(\"firsthalf: \", firsthalf, \"secondhalf: \", secondhalf)\n sortedfirsthalf = merge_sort(firsthalf)\n sortedsecondhalf = merge_sort(secondhalf)\n print(\"sortedfirsthalf\", sortedfirsthalf, \"sortedsecondhalf\", sortedsecondhalf)\n print(\"len(sortedfirsthalf): \", len(sortedfirsthalf), \"len(sortedsecondhalf)\", len(sortedsecondhalf))\n\n i=0\n j=0\n\n while i <= len(sortedfirsthalf)+1:\n while j <= len(sortedsecondhalf)+1:\n print(\"loop start, i, j are: \", i, j)\n if i == len(sortedfirsthalf) and j != len(sortedsecondhalf):\n print(\"i is maxed out\")\n output.append(sortedsecondhalf[j])\n j += 1\n elif j == len(sortedsecondhalf) and i != len(sortedfirsthalf):\n print(\"j is maxed out\")\n output.append(sortedfirsthalf[i])\n i += 1\n elif i == len(sortedfirsthalf) and j == len(sortedsecondhalf):\n return output\n elif sortedfirsthalf[i] >= sortedsecondhalf[j]:\n print(\"sortedsecondhalf[j]\", sortedsecondhalf[j])\n output.append(sortedsecondhalf[j])\n print(output)\n j += 1\n print(\"i, j are: \", i, j)\n else:\n print(\"sortedfirsthalf[i]\",sortedfirsthalf[i])\n output.append(sortedfirsthalf[i])\n print(output)\n i += 1\n print(\"blah i, j are: \", i, j)\n print(\"output\", output)\n return output\n\nB=merge_sort(A)\nprint(B)\n\n","sub_path":"merge-sort.py","file_name":"merge-sort.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"587621661","text":"import math;\n\n\nvalorA = float(input('Digita o valor de A: '))\n\nvalorB = float(input('Digita o valor de B: '))\n\nvalorC = float(input('Digita o valor de C: '))\n\n\ndelta = valorB ** 2 - 4 * valorA * valorC\n\nif delta == 0:\n raiz1 = (-valorB +math.sqrt(delta)) / (2 * valorA)\n print(\"A unica raiz é: \" , raiz1)\nelse:\n if delta < 0:\n print(\"Esta equação não possui raiz inteiras\") \n else:\n raiz1 = (-valorB +math.sqrt(delta)) / (2 * valorA)\n raiz2 = (-valorB -math.sqrt(delta)) / (2 * valorA)\n print(\"A primeira Raiz é :\", raiz1)\n print(\"A primeira Raiz é :\", raiz2)","sub_path":"formulaBhaskara.py","file_name":"formulaBhaskara.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"500412163","text":"import csv\nfrom django.core.management.base import BaseCommand\nfrom register.models import Switch, DeviceModel\n\n\nclass Command(BaseCommand):\n help = 'adds desktops to the database from a'\n 'csv file called desktops.csv'\n\n def handle(self, *args, **options):\n with open('switches.csv', 'rt') as csvfile:\n switches = csv.reader(csvfile)\n for row in switches:\n serialNumber = row[0].strip() # remove whitespace\n # spilt IP Address from hostname\n hostAndIp = row[1]\n leftBrace = hostAndIp.index(\"(\")\n rightBrace = hostAndIp.index(\")\")\n hostname = hostAndIp[0:leftBrace]\n ipAddress = hostAndIp[leftBrace+1:rightBrace]\n modelName = row[3]\n\n if (serialNumber):\n switch, created = Switch.objects.get_or_create(\n serialNumber=serialNumber,\n ipAddress=ipAddress,\n hostname=hostname\n )\n try:\n model = DeviceModel.objects.get(modelName=modelName)\n switch.modelName = model\n except:\n print(\"unknown modeltype %s\" % modelName)\n if created:\n print('creating switch with serialNumber %s'\n % switch.serialNumber)\n switch.save()\n \n","sub_path":"register/management/commands/importSwitches.py","file_name":"importSwitches.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"459586056","text":"from renderer import Renderer\n\n\nclass Tower:\n MAX_NUM_PLATES = 5\n\n def __init__(self, row, col):\n self.row = row\n self.col = col\n self.plates = []\n self.pin_symbol = '|'\n\n def push_plate(self, plate):\n if not self.plates or plate.size < self.plates[-1].size:\n self.plates.append(plate)\n return 1\n else:\n print('Invalid plate movement!')\n return 0\n\n def pop_plate(self):\n if self.plates:\n return self.plates.pop()\n else:\n print('Tower empty')\n return []\n\n def draw(self, renderer: Renderer):\n # Draw plates\n for ind, plate in enumerate(self.plates):\n plate.draw(self.row - ind, self.col, renderer)\n # Draw pin\n self.MAX_NUM_PLATES - len(self.plates)\n for ind in range(len(self.plates), self.MAX_NUM_PLATES):\n renderer.draw_char(self.row - ind,\n self.col,\n self.pin_symbol)\n","sub_path":"tower.py","file_name":"tower.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"36463180","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport torchvision\nfrom torchvision import models, transforms\nfrom torch.utils.data import Dataset, DataLoader\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nimport pandas as pd\n\ndata_dir = \"../data/\"\n\ninput_size = 224\n\nbatch_size = 512\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(input_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize(input_size),\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\nmodel = torch.load(\"model/pretrained.pth\")\nmodel.eval()\n\nclass ActivitySkeletalDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, root_dir, key, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.data = pd.read_hdf(root_dir+\"data_training_test.h5\", key=key+\"_data\")\n self.labels = pd.read_hdf(root_dir+\"data_training_test.h5\", key=key+\"_label\")\n self.root_dir = root_dir\n # self.transform = transform\n self.transform = transforms.Compose([transforms.ToTensor()])\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n data = self.data.iloc[idx, 0:]\n data = np.array([data, data, data])\n data = data.reshape(3, 18, 2)\n data = data.astype('double').reshape(3, -1, 2)\n\n result = self.labels.iloc[idx]\n result = np.array([result])\n result = result.astype('int')\n\n if self.transform:\n data = self.transform(data)\n result = self.transform(result)\n\n return data, result\n\n\n# Create training and test datasets\n\ntest_activity_dataset = ActivitySkeletalDataset(data_dir, 'test', data_transforms['test'])\n\n# Create training and test dataloaders\n\ntest_dataloader = torch.utils.data.DataLoader(test_activity_dataset, batch_size=batch_size, shuffle=True, num_workers=0)\n\ntotal = 0\ncorrect = 0\n\nfor inputs, labels in test_dataloader:\n inputs = inputs.to(device, dtype=torch.float)\n labels = labels.to(device, dtype=torch.long)\n labels = torch.squeeze(labels)\n\n inputs = inputs.permute(0, 2, 1, 3)\n outputs = model(inputs)\n _, predicted = torch.max(outputs, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\naccuracy = 100 * correct / total\nprint('Test Acc: {:4f}'.format(accuracy))","sub_path":"example/load_pretrained_model.py","file_name":"load_pretrained_model.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"43566336","text":"#!/usr/bin/env python\n\n#Extract longest transcript of a gene from GFF using GFF parser\n#Author: Hui Liu\n#lhui2010@gmail.com\n\nfrom BCBio import GFF\nfrom Bio import SeqIO\nfrom Bio import SeqFeature\nfrom Bio.SeqRecord import SeqRecord\nimport sys\nimport re\n#import pprint\n\nimport argparse\n\ninfile=\"\"\n\nparser = argparse.ArgumentParser(description='Extract longest transcripts from GFF')\nparser.add_argument('in_file', nargs='?', \n help='GFF3 format file')\nargs = parser.parse_args()\n\nif (len(sys.argv) < 2):\n parser.parse_args(['--help'])\n\n\n#examiner = GFFExaminer()\n#in_handle = open(in_file)\n#pprint.pprint(examiner.available_limits(in_handle))\n#in_handle.close()\n\nlen_dict = dict()\nid_dict = dict()\n\n#in_file = \"test2.gff3\"\n#in_file = sys.argv[1]\n#print in_file\n#exit(0)\nin_handle = open(args.in_file)\nfor rec in GFF.parse(in_handle):\n for feat in rec.features:\n gene_name = feat.id\n#IRGSP gff only contains mRNA feature, manual get gene_name\n gene_name = re.sub(r'-\\d\\d$', '', gene_name)\n\n#Get cds length\n length=0\n\n if(len(feat.sub_features) > 0):\n\n for sub_index in range(len(feat.sub_features)):\n\n if(feat.sub_features[sub_index].type == 'CDS'):\n\n length += feat.sub_features[sub_index].location.end.position - feat.sub_features[sub_index].location.start.position\n#Update dictionary\n if ( not(len_dict.__contains__(gene_name)) or length > len_dict[gene_name]):\n\n len_dict[gene_name] = length\n\n id_dict[gene_name] = feat.id\nin_handle.close()\n\nfor gene in list(id_dict):\n\n print (id_dict[gene], \"\\t\", gene)\n","sub_path":"lh_bin/longest_transcript_from_gff.IRGSP.py","file_name":"longest_transcript_from_gff.IRGSP.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"367601871","text":"import cv2\nfrom evaluation import *\nimport pickle\n\n\ndef rectangle_area(rect):\n x, y, w, h = rect\n return w*h\n\n\ndef contour2rectangle(contours):\n # Get bounding rectangle for each found contour and sort them by area\n rects = []\n for i in contours:\n y, x, h, w = cv2.boundingRect(i)\n rects.append([x, y, w, h])\n rects = sorted(rects, key=rectangle_area, reverse=True)\n\n return rects\n\n\ndef inside_rectangle(rectangle_a, rectangle_b):\n\n # Return false if the position of one point of rectangle B is inside rectangle A. Rectangle = [x,y,w,h]\n xa,ya,wa,ha = rectangle_a\n xb,yb,wb,hb = rectangle_b\n if xb>=xa and xb<=(xa+wa) and yb>=ya and yb<=(ya+ha): # Point xb,yb is inside A\n return True\n elif (xb+wb)>=xa and (xb+wb)<=(xa+wa) and yb>=ya and yb<=(ya+ha): # Point xb+wb,yb is inside A\n return True\n elif xb>=xa and xb<=(xa+wa) and (yb+hb)>=ya and (yb+hb)<=(ya+ha): # Point xb,yb+hb is inside A\n return True\n elif (xb+wb)>=xa and (xb+wb)<=(xa+wa) and (yb+hb)>=ya and (yb+hb)<=(ya+ha): # Point xb+wb,yb+hb is inside A\n return True\n\n xa,ya,wa,ha = rectangle_b\n xb,yb,wb,hb = rectangle_a\n\n if xb>=xa and xb<=(xa+wa) and yb>=ya and yb<=(ya+ha): # Point xb,yb is inside A\n return True\n elif (xb+wb)>=xa and (xb+wb)<=(xa+wa) and yb>=ya and yb<=(ya+ha): # Point xb+wb,yb is inside A\n return True\n elif xb>=xa and xb<=(xa+wa) and (yb+hb)>=ya and (yb+hb)<=(ya+ha): # Point xb,yb+hb is inside A\n return True\n elif (xb+wb)>=xa and (xb+wb)<=(xa+wa) and (yb+hb)>=ya and (yb+hb)<=(ya+ha): # Point xb+wb,yb+hb is inside A\n return True\n\n return False\n\n# Returns true if restrictions are satisfied\ndef satisfy_restrictions(rectangle, shape_image):\n\n min_prop_area = 0.2\n min_ratio = 0.25\n max_ratio = 4\n x, y, w, h = rectangle\n\n # rect has a minimum area\n if w * h < (shape_image[0]*min_prop_area)*(shape_image[1]*min_prop_area):\n return False\n\n # ratio of h/w isn't smaller than 1/4\n ratio = w / h\n if ratio <= min_ratio or ratio >= max_ratio:\n return False\n\n return True\n\n\ndef compute_contours(image):\n\n # Convert image to HSV an use only saturation channel (has most information)\n img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n # We apply an gaussian filter to remove possible noise from the image\n img_hsv_blur = cv2.GaussianBlur(img_hsv[:, :, 1], (5, 5), 0)\n\n # Get edges using Canny algorithm\n # edged = cv2.Canny(img_hsv_blur, 0, 255)\n edged = cv2.Canny(img_hsv_blur, 60, 120)\n # edged = cv2.Canny(img_hsv_blur, 80, 140)\n\n # Apply close transformation to eliminate smaller regions\n kernel = np.ones((5,5),np.uint8)\n edged = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)\n\n # find contours\n contours, hierarchy = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)/v.CHAIN_APPROX_SIMPLE\n\n return contours\n\n\n# Subtracts de background from the image and returns cropped_img and mask (mask = rectangle)\ndef compute_mask(image):\n\n contours = compute_contours(image)\n bbx = []\n\n # If contours not found, pass whole image\n if not contours:\n mask = np.ones(image.shape)\n else:\n rects = contour2rectangle(contours)\n x1,y1,w1,h1 = rects[0]\n\n # Search for a second painting\n found = False\n rects = rects[1:]\n cnt2 = []\n cmpt = 0\n while not found and (cmpt0:\n x2, y2, w2, h2 = cnt2\n mask[x2:x2 + w2, y2:y2 + h2] = 1\n if x2 0:\n x2, y2, w2, h2 = cnt2\n cmpt = 0\n found = False\n while not found and (cmpt < len(rects)):\n if inside_rectangle([x1, y1, w1, h1], rects[cmpt]) or \\\n inside_rectangle([x2, y2, w2, h2], rects[cmpt]) or \\\n not satisfy_restrictions(rects[cmpt], image.shape):\n cmpt = cmpt + 1\n else:\n cnt3 = rects[cmpt]\n found = True\n\n # Initialize mask & activate the pixels inside the rectangle\n mask = np.zeros(image.shape[:2], np.uint8)\n mask[x1:x1 + w1,y1:y1 + h1] = 1\n if len(cnt2) > 0:\n x2, y2, w2, h2 = cnt2\n mask[x2:x2 + w2,y2:y2 + h2] = 1\n if len(cnt3) > 2:\n x3, y3, w3, h3 = cnt3\n mask[x3:x3 + w3, y3:y3 + h3] = 1\n\n sorted_paintings = orderPaintings(cnt1, cnt2, cnt3)\n for sp in sorted_paintings:\n cropped_images.append(image[sp[0]:sp[0]+sp[2],sp[1]:sp[1]+sp[3]])\n x1 = sp[1]\n y1 = sp[0]\n x2 = x1 + sp[3]\n y2 = y1\n x3 = x2\n y3 = y2 + sp[2]\n x4 = x1\n y4 = y3\n coords = [(x1,y1), (x2,y2), (x3,y3), (x4,y4)]\n coord_images.append(coords)\n\n return cropped_images, coords\n\ndef orderPaintings(cnt1,cnt2,cnt3):\n # if cnt2 != []:\n # if (cnt1[2]+cnt1[0])destination:\n return count_steps(destination,current_vertex)\n elif current_vertex calling:\n # Make sure the vertex we will go to is greater than wherever we want to go so we don't end up in a loop\n counted = count_steps(vertex, destination, steps + 1, current_vertex)\n if counted != -1 and (min_steps == -1 or counted < min_steps):\n # If this is actually the least amount of steps we have found\n min_steps = counted\n return min_steps\n\nprint(count_steps(10,1))\n#this will create error in the output\nprint(count_steps(1,10))\nprint(count_steps(1,2))\n","sub_path":"counting_steps_in_network.py","file_name":"counting_steps_in_network.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"311883534","text":"'''\nGiven a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).\n\nExample:\n\nInput: S = \"ADOBECODEBANC\", T = \"ABC\"\nOutput: \"BANC\"\nNote:\n\nIf there is no such window in S that covers all characters in T, return the empty string \"\".\nIf there is such window, you are guaranteed that there will always be only one unique minimum window in S.\n'''\n\n#=====================================================================================================#\n\n# First\n\n\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n d = dict(collections.Counter(t)) \n \n \n formed = 0\n slow = 0\n \n \n min_str = None\n min_length = sys.maxsize - 1\n \n \n for fast in range(len(s)):\n \n \"\"\"\n skip if s[fast] doesn't matter\n \"\"\"\n ch = s[fast]\n fast += 1\n if ch not in d:\n continue\n \n \"\"\"\n use the ch to update d\n \"\"\"\n d[ch] -= 1\n if d[ch] == 0:\n formed += 1\n \n \n \"\"\"\n If all character are satisfied, then need to move the left pointer\n \"\"\"\n while formed == len(d) and slow <= fast:\n \n \"\"\"\n save the result\n \"\"\"\n curr_length = fast - slow\n if curr_length < min_length:\n min_length = curr_length\n min_str = s[slow:fast]\n \n \n \n \"\"\"\n update the left boundary\n \"\"\"\n ch = s[slow]\n slow += 1\n if ch not in d: \n continue\n d[ch] += 1\n if d[ch] == 1:\n formed -= 1\n \n return min_str if min_str is not None else \"\"\n\n# 88 ms, 13.3 MB\n# 用了 Slide Window 的解法,就是双指针,然后前一个一直往前,后一个在满足要求的情况��尽可能往前。\n# 然后中间的就是满足条件的字符串,然后统计下来取最小值即可。\n\n#=====================================================================================================#","sub_path":"76. Minimum Window Substring copy.py","file_name":"76. Minimum Window Substring copy.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"581876530","text":"def prompt_user(): #Get user selection and return\n print(\"\")\n print(\"What would you like to do?\")\n print(\"1: Look up year range\")\n print(\"2: Look up month/year\")\n print(\"3: Search for author\")\n print(\"4: Search for title\")\n print(\"Q: Quit\")\n userinput=input(\">\")\n return userinput\n\ndef get_books():# Read and process input file. return list\n all_list=[]\n try:\n fp=open('bestsellers.txt')\n for line in fp:\n book=line.split('\\t')\n all_list.append(book)\n fp.close()\n except ValueError:\n print (\"Oops! That was no valid number. Try again...\")\n except IOError:\n print (\"Cannot open good bye...\")\n except FileNotFoundError:\n print (\"File not found try again...\")\n return all_list\n\ndef print_book(book): #print single book, formatted\n print(' {}, by {} {}'.format(book[0].strip(), book[1].strip(), book[3].strip()))\n\ndef display_books_by_year(library):# find and display books by year\n while True:\n try:\n begin_str=input(\"Enter beginning year: \")#Negative years are legit\n begin=int(begin_str)\n end_str=input(\"Enter ending year: \")#Negative years are legit\n end=int(end_str)\n if begin>end:\n raise ValueError\n break\n except:\n print(\"Invalid input\")\n foundone=False\n print(\"\")\n print(\"All Titles between \",begin,\" and \",end)\n for book in library:\n year_str=book[3].strip()\n year_str2=year_str[-4:]\n year=int(year_str2)\n if year<=end and year>=begin:\n print_book(book)\n foundone=True\n if foundone==False:\n print(\"Didn't find book with search\")\n \n\ndef display_books_by_month_year(library): #Find and display books by month & year\n while True:\n month=input(\"Enter month (as a number, 1-12): \")\n if int(month)<1 or int(month)>12:\n print(\"invalid input\")\n else:\n break\n t=input(\"Enter year: \")#Negative years are legit\n foundone=False\n print(\"\")\n print(\"All Titles in month \",month,\" of \",t)\n for book in library:\n date_str=book[3].strip()\n date=date_str.split('/')\n month_int=int(date[0])\n year=int(date[2])\n if year==int(t) and int(month)==month_int:\n print_book(book)\n foundone=True\n if foundone==False:\n print(\"Didn't find book with search\")\n\ndef search_by_author(library): #Find and display books by Author\n t=input(\"Enter a author's name (or part of a name): \")\n foundone=False\n for book in library:\n if t.upper() in book[1].upper():\n print_book(book)\n foundone=True\n if foundone==False:\n print(\"Didn't find book with search\")\n\ndef search_by_title(library): #Finde and display books by Title\n t=input(\"Enter a title (or part of a title): \")\n foundone=False\n for book in library:\n if t.upper() in book[0].upper():\n print_book(book)\n foundone=True\n if foundone==False:\n print(\"Didn't find book with search\")\n\nwhile True: #Main loop\n library=get_books() #Made the library from the list returned by get_books\n if len(library)==0: #Library processing failed\n break\n else:\n selection=prompt_user()\n if selection=='1':\n display_books_by_year(library)\n elif selection=='2':\n display_books_by_month_year(library)\n elif selection=='3':\n search_by_author(library)\n elif selection=='4':\n search_by_title(library)\n elif selection==\"Q\" or selection==\"q\":\n print(\"Thank you and have a nice day\")\n break\n else:\n print(\"Invalid input. Please try again.\")\n","sub_path":"Python_projects/project06/Proj06RAM.py","file_name":"Proj06RAM.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"12812521","text":"'''\r\nFind the number of digits in a given integer.\r\n'''\r\n\r\nnum = int(input(\"Enter the number :\")) # Enter the number using keyboard\r\n#!= is not equal v\r\ndiv = 1 # Start with 1 the first power of 10 in the integer division\r\nwhile num//10**div != 0: # Check the condition: Is the quotient of the integer division num\r\n # divided by 10 to the power of div not equal to zero?\r\n div = div + 1 # If yes, execute the loop body.\r\n\r\n print(\"The number :\", num, \"has \", div,\" digits\")\r\n # the message","sub_path":"NumDigitts.py","file_name":"NumDigitts.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"55256279","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*-\nimport gzip\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom urllib import request\n\nfrom bs4 import BeautifulSoup\nimport sys\nimport sys\n\nfrom utils.DiskCacheUtils import DiskCacheUtils\n\n# sys.path.append(sys.path[0].pa+\"utils/\")\n\n# 扫描局域网可用的ip\n\nclass SearchWifiIP:\n def searchIp(self, url, lastI, lastJ):\n try:\n response = request.urlopen(url, None, 7)\n content = response.read()\n try:\n content = gzip.decompress(content)\n except OSError as e:\n pass\n title = self.getHtmlTitle(content)\n\n file = open(dir + \"/ip_history.txt\", \"a+\")\n\n file.write(\"成功访问的ip:\" + url + \"\\tTitle:\" + str(title) + \"\\n\")\n file.flush()\n file.close()\n print(\"==========成功访问ip:\" + url)\n except OSError as e:\n if lastJ % 50 == 0:\n print(\"网络超时了......\" + url)\n cache.setValue(\"lastI\", lastI)\n cache.setValue(\"lastJ\", lastJ)\n\n def getHtmlTitle(self, html):\n try:\n bsObj = BeautifulSoup(html, \"html.parser\")\n title = bsObj.title.string\n except AttributeError as e:\n return None\n return title\n\n\ndir = \"G:/python/temp_files\"\n\ncache = DiskCacheUtils(dir + \"/cache.txt\")\ncurrentI = cache.getValue(\"lastI\", 0)\ncurrentJ = cache.getValue(\"lastJ\", 0)\n\n# currentI = 1\n# currentJ = 0\n\n\nurl = 'http://192.168.'\nprint(\"上次进行到的ip:\" + url + str(currentI) + \",\" + str(currentJ))\nexecutor = ThreadPoolExecutor(max_workers=50)\nsearch = SearchWifiIP()\nfor index in range(currentI, 255):\n for index2 in range(currentJ, 255):\n ip = url + str(index) + \".\" + str(index2)\n executor.submit(search.searchIp, ip, index, index2)\n","sub_path":"wifi/SearchWifiIP.py","file_name":"SearchWifiIP.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"320318849","text":"#Starter code to import the CSV file\n\nimport csv\n\n\nwith open('setosa_v_versicolor.csv', newline='') as csvfile:\n\tcsvreader = csv.reader(csvfile, delimiter=',')\n\n\t\n\tfor column in csvreader:\n\t\tslength = column[0]\n\t\tswidth = column[1]\n\t\tplength = column[2]\n\t\tpwidth = column[3]\n\t\tspecies = column[4]\n\t#int flower = 0\n\t#while flower < 100:\n\t#\tif >= 3 & cvsreader[flower] [pwidth] >= 1:\n\t\t\t#this means that the species is Iris Virginica, speciecs 1\n\t\t#else:\n\t\t\t#this means that it is species -1\n\n\t\t#flower++\n\t\t\n\t\t\n\t\t\n\t\n\t\t\n","sub_path":"starter.py","file_name":"starter.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"557570858","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Fri Dec 04 13:10:20 2015\n\n@author: bayevskihk\n\"\"\"\n\nimport sys\n\ndef main(argv):\n number = len(argv)\n data_addr = 0\n last_addr = 0\n\n if(number != 2):\n return\n else:\n try:\n data_addr = int(argv[0])\n last_addr = int(argv[1])\n except:\n print (\"Wrong arguments\")\n return\n\n if(parse(\"build/obj/text.vh\", \"build/text.rom\", data_addr) < 0):\n print (\"Wrong text file\")\n return\n\n if(parse(\"build/obj/data.vh\", \"build/data.rom\", last_addr - data_addr) < 0):\n print (\"Wrong text file\")\n return\n\n print (\"Convertion was successfull\")\n\n\ndef parse(file_name, rom_name, addr_last):\n hex_file = open(file_name, 'r')\n rom_file = open(rom_name, 'w')\n# rom_file.truncate()\n\n hex_parts = hex_file.readline()\n line = \"\"\n\n try:\n hex_parts.index(\"@\")\n except:\n return -1\n\n attached = 0\n words = 0\n\n rom_file.write(\"@00000000\\n\");\n\n while(1):\n hex_parts = hex_file.readline()\n# hex_parts = hex_parts.translate({None: \"\\n\"})\n hex_parts = hex_parts.split();\n\n if(len(hex_parts) < 4):\n break\n\n for part in hex_parts:\n\n if(len(part) == 0):\n continue\n\n line += part\n attached += 1\n\n if(attached == 4):\n attached = 0\n words += 1\n rom_file.write(line + \"\\n\")\n line = \"\"\n\n\n for i in range(addr_last - words):\n rom_file.write(\"00000000\\n\")\n\n rom_file.close()\n\n return 0\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n","sub_path":"circuitry/lab4/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"387012497","text":"################### 传递元组 #####################\n# 函数通过元组返回多个值\ndef get_error_details():\n return (2, \"details\")\nerrnum, errstr = get_error_details() #多个返回值直接赋值\nprint(\"{},{}\".format(errnum, errstr))\n# 交换变量\nerrnum, errstr = errstr, errnum\nprint(\"{},{}\".format(errnum, errstr))\n\n\n################### 特殊方法 #####################\n# 特殊方法用来模拟内置类型的某些行为\n# 例如实现索引操作 x[key], 实现 __getitem__()方法\n\n# 部分特殊方法\n# __init__(self, ...) 构造对象时使用\n# __del__(self) 删除对象前调用\n# __str__(self) 使用print函数或者str()函数时调用\n# __lt__(self, other) 当使用小于运算符(<)被使用时调用。类似的,其他运算符有特殊的方法调用\n# __getitem(self, key) 使用x[key]索引操作时被调用\n# __len(self) 当针对序列对象使用内置len()函数时调用\n\n#################### Lambda表格 #####################\n# lambda语句可以创建一个新的函数对象。\n# lambda需要一个参数,后跟一个表达式作为函数体,这一表达式执行的值将作为这个新函数的返回值\npoints = [{\"x\":2, \"y\":3},\n {\"x\":4, \"y\": 1}]\npoints.sort(key=lambda i: i[\"y\"]) # list.sort(key)\nprint(points)\n\n#################### 列表推导 #####################\n# 列表推导用于从一份现有的列表中得到一份新列表\nlistone = [2, 3, 4]\nlisttwo = [2*i for i in listone if i > 2] #对于满足if的元素 进行操作 得到结果收集为列表\nprint(listtwo)\n\n#################### 在函数中接受元组与字典 #####################\n# 通过使用 * 或者 ** 作为元组或字典的前缀,来使他们作为一个参数为函数所接收\ndef powersum(power, *args):\n total = 0\n for i in args:\n total += pow(i, power)\n return total\n\n#################### assert语句 #####################\n# assert语句用确认条件为真,当断言为false时,会抛出AssertionError\nmylist = [\"item\"]\nassert len(mylist) >= 1\nmylist.pop()\ntry:\n assert len(mylist) >= 1\nexcept AssertionError:\n print(\"AssertionError\")\n\n#################### 装饰器 #####################\n# 装饰器(Decorators)是应用包装函数的快捷方式\nfrom time import sleep\nfrom functools import wraps\nimport logging\nlogging.basicConfig()\nlog = logging.getLogger(\"retry\")\n\n# 装饰器参数为一个函数(不一定),返回值也是一个函数\ndef retry(func):\n @wraps(func) #functools.warps 不改变使用装饰器原有函数的结构(如__name__, __doc__),否则会使用装饰器的__name__ ...\n def warpped_f(*args, **kwargs): # *args 与 **kwargs 支持所有参数\n MAX_ATTEMPTS = 5\n for attemp in range(1, MAX_ATTEMPTS + 1):\n try:\n return func(*args, **kwargs) # 调用func参数\n except:\n log.exception(\"Attempt %s/%s failed: %s\",\n attemp,\n MAX_ATTEMPTS,\n (args,kwargs))\n sleep(10 * attemp)\n log.critical(\"All %s attempt failed: %s\",\n MAX_ATTEMPTS,\n (args, kwargs))\n return warpped_f \n\ncounter = 0\n# 多个装饰器从最后一个开始执行到第一个(倒序)\n@retry\ndef save_to_database(arg):\n print(\"Write to database\")\n global counter\n counter += 1\n if counter < 2:\n raise ValueError\nif __name__ == \"__main__\":\n save_to_database(\"Some bad value\")\n","sub_path":"python/PythonSyntax/12-other.py","file_name":"12-other.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"36064635","text":"import gym\nimport torch as T\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom torch.distributions import Categorical\nimport gym_chess\nimport threading\nimport multiprocessing\nfrom typing import Optional\nfrom sharedadam import SharedAdam \nfrom categoricalmasked import CategoricalMasked \n\nimport asyncio\nimport chess\nimport chess.engine\nimport sys\n\nCUDA = False\n\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\nclass ActorCritic(nn.Module):\n def __init__(self, input_dims, n_actions, gamma=0.99):\n super(ActorCritic, self).__init__()\n\n self.gamma = gamma\n self.output_dims = n_actions\n\n self.pi1 = nn.Linear(input_dims, 256)\n self.pi2 = nn.Linear(256, 256)\n self.pi3 = nn.Linear(256, 256)\n self.pi4 = nn.Linear(256, 256)\n self.pi5 = nn.Linear(256, 256)\n self.pi6 = nn.Linear(256, 256)\n self.pi7 = nn.Linear(256, 256)\n self.pi8 = nn.Linear(256, 256)\n self.pi9 = nn.Linear(256, 256)\n self.pi10 = nn.Linear(256, 256)\n self.pi = nn.Linear(256, n_actions)\n self.v = nn.Linear(256, 1)\n\n self.w_rewards = []\n self.w_actions = []\n self.w_states = []\n\n self.b_rewards = []\n self.b_actions = []\n self.b_states = []\n\n self.device = T.device('cuda' if T.cuda.is_available() and CUDA == True else 'cpu')\n\n self.to(self.device)\n\n def remember(self, color, state, action, reward):\n if color == 'white':\n self.w_states.append(state)\n self.w_actions.append(action)\n self.w_rewards.append(reward)\n elif color == 'black':\n self.b_states.append(state)\n self.b_actions.append(action)\n self.b_rewards.append(reward)\n else:\n print('remember: color unavailable')\n\n def clear_memory(self):\n self.w_states = []\n self.w_actions = []\n self.w_rewards = []\n\n self.b_states = []\n self.b_actions = []\n self.b_rewards = []\n\n def forward(self, state):\n f = F.relu(self.pi1(state))\n f = F.relu(self.pi2(f))\n f = F.relu(self.pi3(f))\n f = F.relu(self.pi4(f))\n f = F.relu(self.pi5(f))\n f = F.relu(self.pi6(f))\n f = F.relu(self.pi7(f))\n f = F.relu(self.pi8(f))\n f = F.relu(self.pi9(f))\n f = F.relu(self.pi10(f))\n\n pi = self.pi(f)\n v = self.v(f)\n return pi, v\n\n def calc_R(self, states, rewards, done):\n states = T.tensor(states, dtype=T.float).to(self.device)\n #print(states)\n _, v = self.forward(states)\n\n R = v[-1]*(1-int(done))\n\n batch_return = []\n for reward in rewards[::-1]:\n R = reward + self.gamma*R\n batch_return.append(R)\n batch_return.reverse()\n batch_return = T.tensor(batch_return, dtype=T.float).to(self.device)\n\n return batch_return\n\n def calc_loss(self, color, done):\n states_ = []\n actions_ = []\n rewards_ = []\n\n if color == 'white':\n states_ = self.w_states\n actions_ = self.w_actions\n rewards_ = self.w_rewards\n elif color == 'black':\n states_ = self.b_states\n actions_ = self.b_actions\n rewards_ = self.b_rewards\n\n states = T.tensor(states_, dtype=T.float).to(self.device)\n actions = T.tensor(actions_, dtype=T.float).to(self.device)\n\n returns = self.calc_R(states_, rewards_, done)\n\n pi, values = self.forward(states)\n values = values.squeeze()\n critic_loss = (returns-values)**2\n\n probs = T.softmax(pi, dim=1)\n dist = Categorical(probs)\n log_probs = dist.log_prob(actions)\n actor_loss = -log_probs*(returns-values)\n\n total_loss = (critic_loss + actor_loss).mean()\n\n return total_loss\n\n def choose_action(self, observation, legal_actions):\n mask = T.zeros(self.output_dims, dtype=bool)\n mask[legal_actions] = True\n\n state = T.tensor([observation], dtype=T.float).to(self.device)\n pi, v = self.forward(state)\n probs = T.softmax(pi, dim=1)\n dist = CategoricalMasked(probs, mask=mask)\n action = dist.sample().numpy()[0]\n\n return action\n\nclass Agent(mp.Process):\n def __init__(self, global_actor_critic, optimizer, input_dims, n_actions,\n gamma, lr, name, global_ep_idx, env_id):\n super(Agent, self).__init__()\n self.local_actor_critic = ActorCritic(input_dims, n_actions, gamma)\n self.global_actor_critic = global_actor_critic\n self.name = 'w%02i' % name\n self.episode_idx = global_ep_idx\n self.env = gym.make(env_id)\n self.optimizer = optimizer\n self.render = False\n\n def run(self):\n N_GAMES = 5000\n T_MAX = 5\n t_step = 1\n while self.episode_idx.value < N_GAMES:\n c = 0\n done = False\n observation = self.env.reset()\n w_score = 0\n b_score = 0\n self.local_actor_critic.clear_memory()\n self.local_actor_critic.clear_memory()\n while not done:\n c += 1\n\n # White turn\n actions = self.env.legal_actions\n action = self.local_actor_critic.choose_action(np.array(observation).flatten(), actions)\n #print(self.env.decode(action))\n observation_, reward, done, info = self.env.step(action)\n #print(self.env.render(mode='unicode'))\n w_score += reward\n self.local_actor_critic.remember('white', np.array(observation).flatten(), action, reward)\n\n if done:\n self.local_actor_critic.b_rewards[len(self.local_actor_critic.b_actions) - 1] = -reward\n b_score += -reward\n\n observation = observation_\n\n if not done:\n actions = self.env.legal_actions\n action = self.local_actor_critic.choose_action(np.array(observation).flatten(), actions)\n observation_, reward, done, info = self.env.step(action)\n #print(self.name, self.env.decode(action))\n #print(self.env.render(mode='unicode'))\n b_score += -reward\n self.local_actor_critic.remember('black', np.array(observation_).flatten(), action, -reward)\n if done:\n self.local_actor_critic.w_rewards[len(self.local_actor_critic.w_actions) - 1] = -reward\n w_score += -reward\n observation = observation_\n \n\n #print('curr white score:', w_score, 'curr black score:', b_score, 'agent id:', self.name)\n\n if done:\n # White backprop\n loss = self.local_actor_critic.calc_loss('white', done)\n self.optimizer.zero_grad()\n loss.backward()\n for local_param, global_param in zip(\n self.local_actor_critic.parameters(),\n self.global_actor_critic.parameters()):\n global_param._grad = local_param.grad\n self.optimizer.step()\n self.local_actor_critic.load_state_dict(\n self.global_actor_critic.state_dict())\n self.local_actor_critic.clear_memory()\n\n # Black backprop\n if self.local_actor_critic.b_states:\n #print(self.local_actor_critic.states)\n loss = self.local_actor_critic.calc_loss('black', done)\n self.optimizer.zero_grad()\n loss.backward()\n for local_param, global_param in zip(\n self.local_actor_critic.parameters(),\n self.global_actor_critic.parameters()):\n global_param._grad = local_param.grad\n self.optimizer.step()\n self.local_actor_critic.load_state_dict(\n self.global_actor_critic.state_dict())\n self.local_actor_critic.clear_memory()\n\n t_step += 1\n with self.episode_idx.get_lock():\n self.episode_idx.value += 1\n if self.episode_idx.value % 4000 == 0:\n T.save(self.global_actor_critic.state_dict(), \"save.pt\")\n eprint(self.name, 'episode ', self.episode_idx.value, 'result %.1f' % reward, 'steps %d' % c)\n\nif __name__ == '__main__':\n mp.set_start_method('spawn')\n lr = 1e-4\n env_id = 'ChessAlphaZero-v0'\n n_actions = 4672\n input_dims = 7616\n global_actor_critic = ActorCritic(input_dims, n_actions)\n try:\n global_actor_critic.load_state_dict(T.load(sys.argv[1]))\n except:\n print(\"Error while loading model. Program will continue with a fresh model.\")\n global_actor_critic.share_memory()\n #optim = SharedAdam(global_actor_critic.parameters(), lr=lr, betas=(0.92, 0.999))\n optim = T.optim.Adam(global_actor_critic.parameters(), lr=lr)\n global_ep = mp.Value('i', 0)\n\n\n\n workers = [Agent(global_actor_critic,\n optim,\n input_dims,\n n_actions,\n gamma=0.99,\n lr=lr,\n name=i,\n global_ep_idx=global_ep,\n env_id=env_id) for i in range(mp.cpu_count())]\n\n workers[0].render = True\n [w.start() for w in workers]\n [w.join() for w in workers]\n\n","sub_path":"src/chess_a3c.py","file_name":"chess_a3c.py","file_ext":"py","file_size_in_byte":9818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"547318200","text":"#-*- coding: utf-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup\n\ndump_num = 2\ncount = 0\npath = './TEMP/'\n\nreq = requests.get('http://dcslab.hanyang.ac.kr/?q=node/5')\nhtml = req.text\nsoup = BeautifulSoup(html, 'html.parser')\n\nmy_cate = soup.select('div > div > div > p > strong')\nmy_tag = soup.select('div > div > div > ul')\n\ndef titles_parser(cate, tag, tar):\n tag = tag[1: len(tag)]\n tar = tar.split('\\n')\n\n old_title = \"\"\n for line in tar:\n if not tar:\n continue\n first_index = line.find(', ')\n fir_author = line[0: first_index]\n\n line = line[first_index: len(line)]\n end_index = line.find('\"')\n co_author = line[2: end_index - 2]\n\n line = line[end_index+1: len(line)]\n end_index = line.find('\"')\n title = line[0: end_index]\n\n line = line[end_index+3: len(line)]\n end_index = line.find(', ')\n name = line[0: end_index]\n\n line = line[end_index + 2: len(line)]\n location = line\n\n\n\n print(cate)\n print(tag)\n print(fir_author)\n print(co_author)\n print(title)\n print(name)\n print(location)\n write_file(path, cate, tag, fir_author, co_author, old_title, title, name, location)\n old_title = title\n print('****************************')\n\ndef write_file(path, cate, tag, fir, co, old_title, title, name, location):\n global count\n\n ptitle = title.replace(':', '-')\n ptitle = ptitle.replace('/', '-')\n ptitle = ptitle.replace(' ', '-')\n\n pold_title = old_title.replace(':', '-')\n pold_title = pold_title.replace('/', '-')\n pold_title = pold_title.replace(' ', '-')\n\n alpha = \"\"\n\n if pold_title == ptitle:\n alpha = \"1\"\n\n f = open(path + ptitle + alpha, 'w', encoding=\"utf-8\")\n data = \"---\"\n f.write(data)\n data = \"\\nlayout: publication-single\"\n f.write(data)\n data = \"\\ntitle: \" + title\n f.write(data)\n data = '\\nname: ' + name\n f.write(data)\n data = '\\nfirst-author: ' + fir\n f.write(data)\n data = '\\nco-authors: ' + co\n f.write(data)\n data = '\\nduring: ' + location\n f.write(data)\n data = '\\nlocation: \\nimpactfactor: \\ndoi: \\nnote: '\n f.write(data)\n data = '\\ncategories: \\n\\t- ' + cate\n f.write(data)\n data = '\\ntag: \\n\\t- ' + tag\n f.write(data)\n data = '\\n---'\n f.write(data)\n count += 1\n f.close()\n\n\n\ndef content_parser(strc, cate):\n st = str(strc)\n test = st.split('\\n\\n')\n\n for i in range(len(test)):\n tar = test[i]\n if i%2 == 0:\n tag = tar\n else:\n titles_parser(cate, tag, tar)\n\nfor i in range(len(my_cate)):\n cate = my_cate[i].text\n\n\nfor i in range(len(my_cate)):\n cate = my_cate[i].text\n content_parser(my_tag[dump_num+i].text, cate)\n print('====================================')\n print(count)\n\n\n\n\n","sub_path":"Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"212487002","text":"import sys\n\n\n# Part 2 - Item\n\nclass Item:\n def __init__(self, name):\n self.name = name\n self.position = None\n self.value = None\n self.previous = None\n\n def __repr__(self):\n return \"[value = \" + str(self.value) + \", position = \" + str(self.position) + \" ]\"\n\n\n# Part 3 - Priority Queues\n\nclass NoParentE : pass\nclass NoLeftChildE : pass\nclass NoRightChildE : pass\n\nclass BinaryHeap :\n def __init__ (self,fromList=None) :\n self.elems = [None]\n self.size = 0\n if fromList :\n self.size = len(fromList)\n self.elems.extend(fromList[:])\n for i in range(self.size/2,0,-1) :\n self.moveDown(i)\n\n def isEmpty (self) : return self.size == 0\n\n def getSize (self) : return self.size\n \n def findMin (self) : return self.elems[1].value\n\n def getParentIndex (self,i) :\n if i == 1 :\n raise NoParentE\n else :\n return i/2\n\n def getLeftChildIndex (self,i) :\n if i*2 > self.size :\n raise NoLeftChildE\n else :\n return i*2\n\n def getRightChildIndex (self,i) :\n if i*2+1 > self.size :\n raise NoRightChildE\n else :\n return i*2+1\n\n def swap (self, i, j) :\n\n posI = self.elems[i].position\n posJ = self.elems[j].position\n\n self.elems[i].position = posJ\n self.elems[j].position = posI\n\n newI = self.elems[i]\n newJ = self.elems[j]\n\n\n self.elems[i] = newJ\n self.elems[j] = newI\n\n \n\n def moveUp (self, i) :\n try :\n m = self.getParentIndex(i)\n if self.elems[i].value < self.elems[m].value :\n self.swap(i,m)\n self.moveUp(m)\n except NoParentE :\n pass\n\n def insert (self, k) :\n \n self.elems.append(k)\n self.size += 1\n k.position = self.size\n self.moveUp(self.size)\n\n def minChildIndex (self, i) :\n try :\n lc = self.getLeftChildIndex(i)\n rc = self.getRightChildIndex(i)\n if self.elems[lc].value < self.elems[rc].value :\n return lc\n else :\n return rc\n except NoRightChildE :\n return lc\n\n def moveDown (self, i) :\n try :\n c = self.minChildIndex(i)\n if self.elems[i].value > self.elems[c].value :\n self.swap(i,c)\n self.moveDown(c)\n except NoLeftChildE :\n pass\n\n def delMin (self) :\n frontPos = self.elems[1].position\n self.elems[1] = self.elems[self.size]\n self.elems[1].position = frontPos\n self.size -= 1\n self.elems.pop()\n self.moveDown(1)\n\n def extractMin(self) :\n minI = self.elems[1]\n self.delMin()\n return minI\n\n def updateKey(self, i, v):\n self.elems[i].value = v\n self.moveUp(i)\n \n def __repr__ (self) :\n return repr(self.elems[1:])\n\n\n# Part 4 - Path\n\nclass Path:\n def __init__(self, startPt):\n self.start = startPt\n self.nodes = [startPt]\n self.len = 0\n\n def append(self, node, leng):\n self.nodes.append(node)\n self.len+=leng\n\n def __repr__(self):\n return self.nodes\n\n\n# Part 5 - Graph\n\nclass Graph:\n def __init__(self, nodes, neighbors, weights):\n self.nodes = nodes\n self.neighbors = neighbors\n self.weights = weights\n self.q = None\n\n \n def setSource(self, s):\n i = 1\n \n for node in self.nodes:\n if node.name == s.name:\n node.value = 0\n else:\n node.value = sys.maxint\n\n node.position = i\n i+=1\n\n self.q = BinaryHeap(self.nodes)\n\n def relax(self, u, v):\n if v.value > u.value + self.weights[(u,v)]:\n newV = u.value + self.weights[(u,v)]\n self.q.updateKey(v.position, newV)\n v.previous = u\n\n def compute_shortest_paths(self, s):\n self.setSource(s)\n \n while not self.q.isEmpty():\n u = self.q.extractMin()\n \n for i in self.neighbors[u]:\n self.relax(u, i)\n\n def build_shortest_path(self, u):\n\n new = Path(u)\n\n while not u.previous == None:\n prev = u.previous\n new.append(prev, self.weights[prev, u])\n u = u.previous\n\n return new\n","sub_path":"Dijkstra/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"343952180","text":"import pygraphviz as pgv\n\nAMEGA = -1\nMAXIMUM_EDGES_IN_TREE = 50\nBASIC_ALGORITHM_STEPS = 10\nfree_id = 0\n\ndef draw_graph(file_name, res):\n print(file_name)\n g_out = pgv.AGraph(strict=False, directed=True)\n\n for i in res:\n g_out.add_edge(i[0], i[1], color='black')\n # edge = g_out.get_edge(i[0], i[1])\n\n # if i[3] == \"active\":\n # edge.attr['color'] = 'green'\n\n # edge.attr['label'] = i[2]\n g_out.layout(prog='dot')\n g_out.draw(file_name)\n\n","sub_path":"PN/lab1/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"356739908","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom flask import current_app\nimport pandas as pd\nimport db.db_module as dm\n\ndef change_date(x):\n y = x.split(' ')\n month = y[1][:-1] if len(y[1][:-1]) == 2 else '0'+y[1][:-1]\n day = y[2][:-1] if len(y[2][:-1]) == 2 else '0'+y[2][:-1]\n return f'{y[0][:-1]}-{month}-{day}'\n\ndef get_region_by_date(date):\n with open('static/data/gov_data_api_key.txt', mode='r') as key_fd:\n govapi_key = key_fd.read(100)\n start_date = date.replace('-','')\n end_date = date.replace('-','')\n page = 1\n corona_url = 'http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19SidoInfStateJson'\n url = f'{corona_url}?ServiceKey={govapi_key}&pageNo={page}&numOfRows=10&startCreateDt={start_date}&endCreateDt={end_date}'\n\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'xml')\n resultCode = soup.find('resultCode').get_text()\n if resultCode == '99':\n current_app.logger.info(soup.find('resultMsg').string)\n return\n if resultCode == '00' and soup.find('totalCount').string == '0':\n current_app.logger.info('There is no data!!!')\n return\n\n items = soup.find_all('item')\n item_count = len(items)\n print(item_count)\n for index, item in enumerate(items):\n if item_count > 30 and index >= int(item_count/2):\n break\n stdDay = change_date(item.find('stdDay').string)\n deathCnt = int(item.find('deathCnt').string) if item.find('deathCnt') else 0\n defCnt = int(item.find('defCnt').string) if item.find('defCnt') else 0\n gubun = item.find('gubun').string\n incDec = int(item.find('incDec').string)\n isolClearCnt = int(item.find('isolClearCnt').string) if item.find('isolClearCnt') else 0\n isolIngCnt = int(item.find('isolIngCnt').string) if item.find('isolIngCnt') else 0\n localOccCnt = int(item.find('localOccCnt').string) if item.find('localOccCnt') else 0\n overFlowCnt = int(item.find('overFlowCnt').string) if item.find('overFlowCnt') else 0\n qurRate = None\n if item.find('qurRate'):\n qur = item.find('qurRate').string\n if qur != None and qur.count('.') == 2:\n qur = qur[:-1]\n #print(qur)\n if qur != None and qur[0] in '0123456789':\n qurRate = float(qur)\n \n params = [stdDay, deathCnt, defCnt, gubun, incDec, isolClearCnt, isolIngCnt, \n localOccCnt, overFlowCnt, qurRate]\n dm.write_region(params)\n \n current_app.logger.info(f'{date} region data successfully inserted.')\n\ndef get_agender_by_date(date):\n with open('static/data/gov_data_api_key.txt', mode='r') as key_fd:\n govapi_key = key_fd.read(100)\n start_date = date.replace('-','')\n end_date = date.replace('-','')\n page = 1\n corona_url = 'http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19GenAgeCaseInfJson'\n url = f'{corona_url}?ServiceKey={govapi_key}&pageNo={page}&numOfRows=10&startCreateDt={start_date}&endCreateDt={end_date}'\n\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'xml')\n resultCode = soup.find('resultCode').get_text()\n if resultCode == '99':\n current_app.logger.info(soup.find('resultMsg').string)\n return\n if resultCode == '00' and soup.find('totalCount').string == '0':\n current_app.logger.info('There is no data!!!')\n return\n \n items = soup.find_all('item')\n for item in items:\n createDt = item.find('createDt').string.split(' ')[0]\n confCase = int(item.find('confCase').string)\n confCaseRate = float(item.find('confCaseRate').string)\n death = int(item.find('death').string)\n deathRate = float(item.find('deathRate').string)\n criticalRate = float(item.find('criticalRate').string)\n gubun = item.find('gubun').string\n seq = int(item.find('seq').string)\n updateDt = item.find('updateDt').string\n\n params = [createDt, confCase, confCaseRate, death, deathRate, criticalRate,\n gubun,seq,updateDt]\n dm.write_agender(params)\n\n current_app.logger.info(f'{date} agender data successfully inserted.')\n\ndef get_daily(df, col, new):\n diff = [0]\n for i in range(1, len(df)):\n diff.append(df[col][i] - df[col][i-1])\n del df[col]\n df[new] = diff\n return df\n\ndef make_corona_raw_df(start_date, end_date):\n c_rows = []\n items = 'sid, confDay, region, status'\n gu_list = ['강남구','강동구','강북구','강서구','관악구','광진구','구로구','금천구',\n '노원구','도봉구','동대문구','동작구','마포구','서대문구','서초구','성동구',\n '성북구','송파구','양천구','영등포구','용산구','은평구','종로구','중구','중랑구']\n for gu in gu_list:\n rows = dm.get_seoul_items_by_condition(items, gu, start_date, end_date)\n c_rows.extend(rows)\n df = pd.DataFrame(c_rows, columns=['sid', '확진일', 'gu', 'status'])\n df['확진일'] = pd.to_datetime(df['확진일'])\n cdf_raw = pd.pivot_table(df, values='sid', index='확진일', columns='gu', aggfunc='count')\n cdf_raw.fillna(0, inplace=True)\n cdf_raw = cdf_raw.astype(int)\n cdf_raw['합계'] = cdf_raw.sum(axis=1)\n return cdf_raw, gu_list\n\n# 최근 1년치 데이터만 보여주기 위해 수정\ndef make_corona_df(cdf_raw, start_month):\n cdfM = cdf_raw.resample('M').sum().astype(int)\n month_list = []\n for i in range(12):\n month = (start_month + i) % 12\n month_list.append(f'{month if month else 12}월')\n cdfM.index = month_list\n cdf = cdfM.T\n cdf['누적'] = cdf.sum(axis=1)\n\n pop = pd.read_csv('./static/data/cctv.csv') # 구별 인구 데이터 참조\n pop.set_index('구별', inplace=True)\n cdf['인구수'] = pop['인구수']\n cdf['천명당 확진자 수'] = cdf['누적'] / cdf['인구수'] * 1000\n return cdf.iloc[:-1, :] # 마지막 합계 행은 제거\n\ndef get_new_seoul_data():\n with open('static/data/seoul_api_key.txt', mode='r') as key_fd:\n seoulapi_key = key_fd.read(100)\n corona_url = 'http://openapi.seoul.go.kr:8088'\n type = 'xml'\n url = f'{corona_url}/{seoulapi_key}/{type}/Corona19Status/1/10'\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'xml')\n resultCode = soup.find('RESULT').find('CODE').get_text()\n if resultCode == 'INFO-000':\n list_total_count = int(soup.find('list_total_count').get_text())\n else:\n current_app.logger.info(soup.find('RESULT').find('MESSAGE').get_text())\n return\n\n last_sid = dm.get_seoul_last_sid()\n current_app.logger.debug(f'서울시 건수 - {list_total_count}, DB 보관 - {last_sid}')\n if list_total_count <= last_sid:\n return\n read_count = list_total_count - last_sid\n\n id_list, date_list, area_list, travel_list = [],[],[],[]\n contact_list, status_list, moving_list = [],[],[]\n\n for i in range(1, read_count, 1000):\n start_index = i\n end_index = i+1000-1 if i+1000-10.5]=1+y_sat[y_sat>0.5]\ny_sat[y_sat<0.5]=0.5\n\nplot(t,y_sat,label='$y(t)$',linewidth=2.0)\nylabel('$y(t)$')\nxlabel('Time (sec.)')\nlegend(loc=1)\n\nshow()\n","sub_path":"cryptic.py","file_name":"cryptic.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"210969423","text":"import pulumi\nimport pulumi_aws as aws\n\nfrom iam import lambdaRole\n\nconfig = pulumi.Config()\n\nstartQueryExecutionFunction = aws.lambda_.Function(\n resource_name=\"start-query-execution\",\n code=pulumi.AssetArchive({\".\": pulumi.FileArchive(\"lambda/start_query_execution\")}),\n description=\"Starts the execution of an Amazon Athena query\",\n environment=aws.lambda_.FunctionEnvironmentArgs(\n variables={\"AWS_ACCOUNT_ID\": config.require(\"awsAccountId\")}\n ),\n handler=\"start_query_execution.handler\",\n name=\"start-query-execution\",\n role=lambdaRole.arn,\n runtime=\"python3.8\",\n)\n\ngetQueryExecutionFunction = aws.lambda_.Function(\n resource_name=\"get-query-execution\",\n code=pulumi.AssetArchive({\".\": pulumi.FileArchive(\"lambda/get_query_execution\")}),\n description=\"Gets information about the execution of an Amazon Athena query\",\n handler=\"get_query_execution.handler\",\n name=\"get-query-execution\",\n role=lambdaRole.arn,\n runtime=\"python3.8\",\n)\n","sub_path":"step-functions/lambda_.py","file_name":"lambda_.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"287316663","text":"import sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import QPixmap, QIcon\r\nfrom PyQt5.QtCore import Qt\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\n\r\n# CropImage = cv2.imread('Before.png', cv2.IMREAD_COLOR)\r\n#\r\n# xLength = 320\r\n# yLength = 280\r\n#\r\n# # cv2.resize(원본 이미지, 결과 이미지 크기, 보간법)\r\n# ChangedImage = cv2.resize(CropImage, (xLength, yLength), cv2.INTER_AREA)\r\n#\r\n# # 자른 사진을 다른이름으로 다른 경로에 저장합니다\r\n# cv2.imwrite(\"Before2.png\", ChangedImage)\r\n\r\n# 사진을 저장할 폴더 생성\r\ndef createFolder(directory):\r\n try:\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n except OSError:\r\n print('Error: Creating directory. ' + directory)\r\n\r\ncreateFolder('Output')\r\ncreateFolder('Input')\r\n\r\n\r\nclass MyApp(QMainWindow):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n # 버튼을 눌렀는지 체크하는 변수\r\n self.BlurState = 0\r\n self.ShrpenState = 0\r\n self.GrayState = 0\r\n self.InvertState = 0\r\n\r\n # 내가 고른 파일 이름\r\n self.ChoiceFile = ''\r\n\r\n\r\n def initUI(self):\r\n ############################ CheckBox ############################\r\n self.Blur = QCheckBox('Blur', self)\r\n self.Blur.move(300, 60)\r\n self.Blur.stateChanged.connect(self.BlurButtonState)\r\n\r\n self.Shrpen = QCheckBox('Shrpen', self)\r\n self.Shrpen.move(300, 80)\r\n self.Shrpen.stateChanged.connect(self.ShrpenButtonState)\r\n\r\n self.Gray = QCheckBox('Gray', self)\r\n self.Gray.move(450, 60)\r\n self.Gray.stateChanged.connect(self.GrayButtonState)\r\n\r\n self.Invert = QCheckBox('Invert', self)\r\n self.Invert.move(450, 80)\r\n self.Invert.stateChanged.connect(self.InvertButtonState)\r\n\r\n self.none = QCheckBox('none', self)\r\n self.none.move(300, 100)\r\n self.none.stateChanged.connect(self.changeNone)\r\n ############################ CheckBox End ############################\r\n\r\n ############################## Button ################################\r\n LoadingBtn = QPushButton('사진 불러오기',self)\r\n LoadingBtn.resize(200, 100)\r\n LoadingBtn.move( 50 , 50)\r\n LoadingBtn.clicked.connect(self.showDialog)\r\n\r\n SaveBtn = QPushButton('사진 저장하기', self)\r\n SaveBtn.resize(200, 100)\r\n SaveBtn.move(550, 50)\r\n SaveBtn.clicked.connect(self.SaveImage)\r\n\r\n vbox = QHBoxLayout()\r\n vbox.addWidget(LoadingBtn)\r\n vbox.addWidget(SaveBtn)\r\n ############################# Button End ###############################\r\n\r\n ############################# Fixmap ###################################\r\n # 이미지를 띄울 라벨준비\r\n self.BeforeLabel = QLabel(self)\r\n self.AfterLabel = QLabel(self)\r\n # 라벨의 크기를 지정\r\n self.BeforeLabel.resize(320,280)\r\n self.AfterLabel.resize(320, 280)\r\n # 라벨의 위치지정\r\n self.BeforeLabel.move(50,200)\r\n self.AfterLabel.move(430, 200)\r\n # 라벨에 넣어줄 사진 지정\r\n BeforeImage = QPixmap('Before.png')\r\n AfterImage = QPixmap('Before.png')\r\n # 라벨에 사진 넣어주기\r\n self.BeforeLabel.setPixmap(QPixmap(BeforeImage))\r\n self.AfterLabel.setPixmap(QPixmap(AfterImage))\r\n ############################# Fixmap End ###################################\r\n\r\n self.setWindowTitle('이미지 수정프로그램')\r\n self.setGeometry(300, 300, 800, 540)\r\n self.show()\r\n\r\n \r\n # 파일불러오기 함수\r\n def showDialog(self):\r\n fname = QFileDialog.getOpenFileName(self, 'Open file', './')\r\n splitlist = []\r\n\r\n # self.fname, _ = QFileDialog.getOpenFileName(self, 'Open file', './')\r\n # print(self.fname)\r\n # self.showOrgImage()\r\n # self.loadImage()\r\n \r\n # 내가 선택한 파일의 이름\r\n self.ChoiceFile = fname[0]\r\n # 필요한 부분만 split\r\n splitlist = self.ChoiceFile.split('/')\r\n #print(splitlist)\r\n self.ChoiceFile = splitlist[len(splitlist)-1]\r\n #print(splitlist[len(splitlist)-1])\r\n\r\n ChangeImage = cv2.imread(self.ChoiceFile, cv2.IMREAD_COLOR)\r\n\r\n xLength = 320\r\n yLength = 280\r\n\r\n # cv2.resize(원본 이미지, 결과 이미지 크기, 보간법)\r\n ChangeImage = cv2.resize(ChangeImage, (xLength, yLength), cv2.INTER_AREA)\r\n\r\n cv2.imwrite(\"Input/ChoiceImage.png\", ChangeImage)\r\n\r\n # 사진을 고르면 새로운 사진으로 라벨 업데이트\r\n BeforeImage = QPixmap(\"Input/ChoiceImage.png\")\r\n self.BeforeLabel.setPixmap(QPixmap(BeforeImage))\r\n self.BeforeLabel.repaint()\r\n\r\n # 체크박스 상태 함수\r\n def BlurButtonState(self, state):\r\n if (state == 2): # 만약 상태가 눌린상태라면\r\n self.BlurState = 1\r\n self.Shrpen.setEnabled(False)\r\n else:\r\n self.BlurState = 0\r\n def ShrpenButtonState(self, state):\r\n if (state == 2): # 만약 상태가 눌린상태라면\r\n self.ShrpenState = 1\r\n self.Blur.setEnabled(False)\r\n else:\r\n self.ShrpenState = 0\r\n def GrayButtonState(self, state):\r\n if (state == 2): # 만약 상태가 눌린상태라면\r\n self.GrayState = 1\r\n else:\r\n self.GrayState = 0\r\n def InvertButtonState(self, state):\r\n if (state == 2): # 만약 상태가 눌린상태라면\r\n self.InvertState = 1\r\n else:\r\n self.InvertState = 0\r\n\r\n # None 체크박스 상태 함수\r\n def changeNone(self, state):\r\n if (state == 2): # 만약 상태가 눌린상태라면\r\n self.ShrpenState = 0\r\n self.BlurState = 0\r\n self.Shrpen.setCheckable(False)\r\n self.Blur.setCheckable(False)\r\n self.Shrpen.setEnabled(False)\r\n self.Blur.setEnabled(False)\r\n else:\r\n self.Shrpen.setEnabled(True)\r\n self.Blur.setEnabled(True)\r\n self.Shrpen.setCheckable(True)\r\n self.Blur.setCheckable(True)\r\n\r\n\r\n # 버튼들의 상태에 따라 이미지 변환후 저장\r\n def SaveImage(self):\r\n image = cv2.imread(self.ChoiceFile, cv2.IMREAD_COLOR)\r\n ChangeImage = 0\r\n\r\n if (self.InvertState == 1):\r\n ChangeImage = self.InvertDef(image)\r\n else:\r\n ChangeImage = image\r\n\r\n if (self.BlurState == 1):\r\n ChangeImage = self.BlurDef(ChangeImage)\r\n else:\r\n ChangeImage = ChangeImage\r\n\r\n if (self.GrayState == 1):\r\n ChangeImage = self.GrayDef(ChangeImage)\r\n else:\r\n ChangeImage = ChangeImage\r\n\r\n if (self.ShrpenState == 1):\r\n ChangeImage = self.ShrpenDef(ChangeImage)\r\n else:\r\n ChangeImage = ChangeImage\r\n\r\n xLength = 320\r\n yLength = 280\r\n\r\n # cv2.resize(원본 이미지, 결과 이미지 크기, 보간법)\r\n ChangeImage = cv2.resize(ChangeImage, (xLength, yLength), cv2.INTER_AREA)\r\n\r\n cv2.imwrite(\"Output/Result.png\", ChangeImage)\r\n\r\n # 사진을 고르면 새로운 사진으로 라벨 업데이트\r\n AfterImage = QPixmap(\"Output/Result.png\")\r\n self.AfterLabel.setPixmap(QPixmap(AfterImage))\r\n self.AfterLabel.repaint()\r\n\r\n def BlurDef(self, image):\r\n kernel = np.ones((2, 2), np.uint8)\r\n ChangedImage = cv2.filter2D(image, -1, kernel)\r\n return ChangedImage\r\n\r\n def InvertDef(self, image):\r\n dst = cv2.bitwise_not(image)\r\n return dst\r\n\r\n def GrayDef(self, image):\r\n dst = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n return dst\r\n\r\n def ShrpenDef(self, image):\r\n kernel = np.array([[1,1,1],[1,-8,1],[1,1,1]])\r\n dst = cv2.filter2D(image, -1, kernel)\r\n return dst\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = MyApp()\r\n sys.exit(app.exec_())","sub_path":"photoaction/imageConversation.py","file_name":"imageConversation.py","file_ext":"py","file_size_in_byte":8154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"308549974","text":"import sys\nsys.setrecursionlimit(100000)\ninput = sys.stdin.readline\n\ndef solution(r,c,n) :\n global result\n if (n == 1) :\n result += str(matrix[r][c])\n return\n\n chk = True\n for i in range(r,r+n) :\n for j in range(c,c+n) :\n if (matrix[r][c] != matrix[i][j]) :\n chk = False\n \n if chk :\n result += str(matrix[r][c])\n else :\n cut = n//2\n result += \"(\"\n solution(r,c,cut)\n solution(r,c+cut,cut)\n solution(r+cut,c,cut)\n solution(r+cut,c+cut,cut)\n result += \")\"\n\nn = int(input())\nmatrix = []\nresult = \"\"\nfor i in range(n) :\n matrix.append(list(map(int, input().rstrip())))\nsolution(0,0,n)\nprint(result)","sub_path":"장범규/[20.10.22]1992.py","file_name":"[20.10.22]1992.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"214289017","text":"from flask import Flask\nfrom flask_restful import Api\nfrom resources.servers import Servidores, Servidor\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napi = Api(app)\n\n@app.before_request\ndef cria_banco():\n banco.create_all()\n\napi.add_resource(Servidores, '/servidores')\napi.add_resource(Servidor,'/servidores/')\n\nif __name__== '__main__':\n from sql_alchemy import banco\n banco.init_app(app)\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"159880475","text":"\"\"\"add title to verdict\n\nRevision ID: 5f6909210e52\nRevises: 3dc47ee635e3\nCreate Date: 2020-07-22 08:32:59.391208\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5f6909210e52'\ndown_revision = '3dc47ee635e3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('verdict',\n sa.Column('title',\n sa.String(512)))\n\n\ndef downgrade():\n op.drop_column('verdict', 'title')\n","sub_path":"api/alembic/versions/5f6909210e52_add_title_to_verdict.py","file_name":"5f6909210e52_add_title_to_verdict.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"245305295","text":"from django.db import models\nfrom ico_portal.utils.datetime import datetime\n\nfrom .fields import TokenField\n\n\nDEPOSIT_STATE_CHOICES = [\n ('ACTUAL', 'Actual'),\n ('PREPARED', 'Prepared')\n]\n\nTOKENS_MOVE_DIRECTIONS = [\n ('IN', 'Incoming'),\n ('OUT', 'Outgoing')\n]\n\n\nclass TokensMove(models.Model):\n id = models.AutoField(primary_key=True)\n investor = models.ForeignKey('Investor', related_name='tokens_moves',\n on_delete=models.DO_NOTHING,\n to_field='eth_account', db_constraint=False,\n db_column='investor_account')\n amount = TokenField(blank=True, null=True)\n\n created_at = models.DateTimeField(default=datetime.utcnow)\n actualized_at = models.DateTimeField(blank=True, null=True)\n\n transfer = models.ForeignKey('Transfer', on_delete=models.DO_NOTHING,\n related_name='tokens_moves')\n\n state = models.CharField(max_length=10, choices=DEPOSIT_STATE_CHOICES,\n default='PREPARED')\n direction = models.CharField(max_length=3, choices=TOKENS_MOVE_DIRECTIONS)\n\n objects = models.Manager()\n\n class Meta:\n ordering = ['id']\n db_table = 'tokens_moves'\n\n def __str__(self):\n return f'TokensMove {self.id}'\n\n @property\n def actual(self):\n return self.state == 'ACTUAL'\n\n def actualize(self, date=None):\n if date is None:\n date = datetime.utcnow()\n\n self.state = 'ACTUAL'\n self.actualized_at = date\n","sub_path":"user_office/models/tokens_move.py","file_name":"tokens_move.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"201293814","text":"# -*- coding: utf-8 -*-\n\"\"\"Test the TcEx Threat Intel Module.\"\"\"\n\nfrom ..tcex_init import tcex\n\n\n# pylint: disable=W0201\nclass TestAdversaryGroups:\n \"\"\"Test TcEx Host Groups.\"\"\"\n\n def setup_class(self):\n \"\"\"Configure setup before all tests.\"\"\"\n self.ti = tcex.ti\n\n def test_update_name(self, name='adversary-name-42353'):\n \"\"\"Testing changing the adversary name before sending save request to TC\"\"\"\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner)\n name = 'adversary-name-42352'\n ti.set(name=name)\n r = ti.create()\n assert r.ok\n ti_data = r.json()\n assert ti_data.get('status') == 'Success'\n assert ti_data.get('data').get('adversary').get('name') == name\n ti.delete()\n\n def test_attributes(self, name='adversary-name-42353'):\n \"\"\"Tests adding, fetching, updating, and deleting host attributes\"\"\"\n adversary_id = self.adversary_create(name)\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n\n # assert that attribute is created.\n r = ti.add_attribute('description', 'description1')\n assert r.ok\n\n # assert that attribute data is correct\n json = r.json().get('data', {}).get('attribute', {})\n assert json.get('type').lower() == 'description'\n assert json.get('value').lower() == 'description1'\n for attribute in ti.attributes():\n assert attribute.get('value') == 'description1'\n\n # fetch the attribute id\n attribute_id = json.get('id')\n\n # assert that attribute is updated\n r = ti.update_attribute('description2', attribute_id)\n assert r.ok\n\n # assert that updated attribute data is correct\n for attribute in ti.attributes():\n assert attribute.get('value') == 'description2'\n\n # assert that attribute is deleted\n r = ti.delete_attribute(attribute_id)\n assert r.ok\n\n # assert that no attributes remain for this indicator/group/victim\n for attribute in ti.attributes():\n assert False\n\n # remove indicator/group/victim\n ti.delete()\n\n def test_adversary_get(self, name='adversary-name-42353'):\n \"\"\"Test adversary get.\"\"\"\n # create\n adversary_id = self.adversary_create(name)\n\n # get\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n r = ti.single()\n ti_data = r.json()\n assert r.status_code == 200\n assert ti_data.get('status') == 'Success'\n assert ti_data.get('data').get(ti.api_entity).get('name') == name\n\n # delete\n self.adversary_delete(adversary_id)\n\n def test_adversary_get_attributes(self, name='adversary-name-12453'):\n \"\"\"Test adversary get.\"\"\"\n # create\n adversary_id = self.adversary_create(name)\n self.test_adversary_add_attribute(\n adversary_id=adversary_id, attribute_type='Description', attribute_value='test1'\n )\n self.test_adversary_add_attribute(\n adversary_id=adversary_id, attribute_type='Description', attribute_value='test2'\n )\n self.test_adversary_add_attribute(\n adversary_id=adversary_id, attribute_type='Description', attribute_value='test3'\n )\n\n # get attributes\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n for attribute in ti.attributes():\n assert attribute\n break\n else:\n assert False\n\n # delete\n self.adversary_delete(adversary_id)\n\n def test_adversary_get_tags(self, name='adversary-name-64235'):\n \"\"\"Test adversary get.\"\"\"\n # create\n adversary_id = self.adversary_create(name)\n self.test_adversary_add_tag(adversary_id=adversary_id, tag='One')\n self.test_adversary_add_tag(adversary_id=adversary_id, tag='Two')\n\n # get tags\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n for tag in ti.tags():\n assert tag.get('name')\n break\n else:\n assert False\n\n # delete\n self.adversary_delete(adversary_id)\n\n def test_adversary_get_include(self, name='adversary-name-78159'):\n \"\"\"Test adversary get.\"\"\"\n adversary_id = self.adversary_create(name)\n self.test_adversary_add_attribute(\n adversary_id=adversary_id, attribute_type='Description', attribute_value='test123'\n )\n self.test_adversary_add_label(adversary_id=adversary_id, label='TLP:RED')\n self.test_adversary_add_tag(adversary_id=adversary_id, tag='PyTest')\n\n parameters = {'includes': ['additional', 'attributes', 'labels', 'tags']}\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n r = ti.single(params=parameters)\n ti_data = r.json()\n assert r.status_code == 200\n assert ti_data.get('status') == 'Success'\n assert ti_data.get('data').get('adversary').get('name') == name\n assert ti_data.get('data').get('adversary').get('attribute')[0].get('value') == 'test123'\n assert ti_data.get('data').get('adversary').get('securityLabel')[0].get('name') == 'TLP:RED'\n assert ti_data.get('data').get('adversary').get('tag')[0].get('name') == 'PyTest'\n\n # delete\n self.adversary_delete(adversary_id)\n\n def adversary_create(self, name='adversary-name-65341'):\n \"\"\"Test adversary create.\"\"\"\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner)\n r = ti.create()\n ti_data = r.json()\n assert r.status_code == 201\n assert ti_data.get('status') == 'Success'\n assert ti_data.get('data').get('adversary').get('name') == name\n return ti.unique_id\n\n def test_adversary_add_attribute(\n self,\n adversary_id=None,\n name='adversary-name-nkjvb',\n attribute_type='Description',\n attribute_value='Example Description.',\n ):\n \"\"\"Test adversary attribute add.\"\"\"\n\n should_delete = False\n if not adversary_id:\n should_delete = True\n adversary_id = self.adversary_create(name)\n\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n r = ti.add_attribute(attribute_type=attribute_type, attribute_value=attribute_value)\n attribute_data = r.json()\n assert r.status_code == 201\n assert attribute_data.get('status') == 'Success'\n assert attribute_data.get('data').get('attribute').get('value') == attribute_value\n if should_delete:\n self.adversary_delete(adversary_id)\n\n def test_adversary_add_label(\n self, adversary_id=None, name='adversary-name-ds4vb', label='TLP:GREEN'\n ):\n \"\"\"Test adversary attribute add.\"\"\"\n should_delete = False\n if not adversary_id:\n should_delete = True\n adversary_id = self.adversary_create(name)\n\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n r = ti.add_label(label=label)\n label_data = r.json()\n assert r.status_code == 201\n assert label_data.get('status') == 'Success'\n if should_delete:\n self.adversary_delete(adversary_id)\n\n def test_adversary_add_tag(\n self, adversary_id=None, name='adversary-name-fdsv23', tag='Crimeware'\n ):\n \"\"\"Test adversary attribute add.\"\"\"\n should_delete = False\n if not adversary_id:\n should_delete = True\n adversary_id = self.adversary_create(name)\n\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n r = ti.add_tag(tag)\n tag_data = r.json()\n assert r.status_code == 201\n assert tag_data.get('status') == 'Success'\n if should_delete:\n self.adversary_delete(adversary_id)\n\n def adversary_delete(self, adversary_id=None, name='adversary-name-bdsfd'):\n \"\"\"Test adversary delete.\"\"\"\n # create indicator\n if not adversary_id:\n adversary_id = self.adversary_create(name)\n\n # delete indicator\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n r = ti.delete()\n ti_data = r.json()\n assert r.status_code == 200\n assert ti_data.get('status') == 'Success'\n\n def test_adversary_update(self, name='adversary-name-b3da3'):\n \"\"\"Test adversary update.\"\"\"\n # create indicator\n adversary_id = self.adversary_create(name)\n\n name = 'adversary-new-name-fdasb3'\n\n # update indicator\n ti = self.ti.adversary(name, owner=tcex.args.tc_owner, unique_id=adversary_id)\n r = ti.update()\n ti_data = r.json()\n assert r.status_code == 200\n assert ti_data.get('status') == 'Success'\n assert ti_data.get('data').get('adversary').get('name') == name\n\n # delete indicator\n self.adversary_delete(adversary_id)\n","sub_path":"tests/tcex_ti/test_adversary_interface.py","file_name":"test_adversary_interface.py","file_ext":"py","file_size_in_byte":9073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"645231840","text":"#coding=UTF8\na = ['vimrc','t.cpp', \\\n\t 'check.cpp', \\\n\t 'LineTree.cpp','treap.cpp', \\\n\t 'graph.cpp','flow.cpp','Tarjan.cpp', \\\n\t 'ac.cpp','sa.cpp','string.cpp','hash.cpp',\n\t 'NumberTheory.cpp', \\\n\t 'dp.cpp', \\\n\t 'fft.cpp', \\\n\t 'matrix.cpp', \\\n\t 'big.cpp']\nf = file('template.cpp','w')\nfor x in a:\n\ttmp = file(x,'r')\n\tf.write('//' + tmp.name + '\\n')\n\tf.write(tmp.read())\n\tf.write('\\n')\n\tf.write('//********************************************************************\\n')\n\tf.write('\\n')\n\ttmp.close()\nf.close()\n","sub_path":"code/temp/join[1].py","file_name":"join[1].py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"227055826","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nlogic setup and for comb count\nCreated on Fri Jun 9\n@author: Nils\n\"\"\"\n\n# pylint: disable=locally-disabled, bare-except\n\nimport ctypes\n\nclass FXEReport(object):\n \"\"\"data structure for FXE counter report\"\"\"\n def __init__(self):\n super().__init__()\n self.status = 0\n self.string = \"\"\n self.type = 0x7C00\n self.values = []\n\n\nclass FXE(object):\n \"\"\"access library for FXE counter\"\"\"\n\n def __init__(self, gui):\n super().__init__()\n self.dllname = \"none\"\n self.dll_version = \"none\"\n self.lib = None\n self.devices = [\"none\"]\n self.device = \"none\"\n self.connected = False\n self.gui = gui\n self.read_dll_version = None #self.KK_GetDLLVersion = None\n self.read_devices = None #self.KK_EnumerateDevices = None\n self.open_port = None #self.FX_OpenPort = None\n self.close_port = None #self.FX_ClosePort = None\n self.read_report = None #self.FX_GetReport = None\n self.send_command = None #self.FX_SendCommand = None\n self.sep_string = ctypes.create_string_buffer(b\".\")\n\n def __str__(self):\n return str(self.devices)\n\n def connect_fxe(self):\n \"\"\"open connection to actual FXE hardware\"\"\"\n dllname = \"kk_fx80e.dll\"\n lib = ctypes.WinDLL(dllname)\n self.read_dll_version = lib.KK_GetDLLVersion\n self.read_dll_version.restype = ctypes.c_char_p\n self.read_dll_version.argtypes = []\n self.read_devices = lib.KK_EnumerateDevices\n self.read_devices.restype = ctypes.c_int\n self.open_port = lib.FX_OpenPort\n self.open_port.restype = ctypes.c_int\n self.close_port = lib.FX_ClosePort\n self.read_report = lib.FX_GetReport\n self.read_report.restype = ctypes.c_int\n self.send_command = lib.FX_SendCommand\n self.send_command.restype = ctypes.c_int\n\n lib_version = self.read_dll_version()\n self.dll_version = str(lib_version.decode(\"ascii\"))\n strbuf_p = ctypes.c_char_p(None)\n strbuf_q = ctypes.c_void_p(ctypes.addressof(strbuf_p))\n retval = self.read_devices(strbuf_q)\n if retval != 0:\n self.gui.log(\"## enumerate fail\")\n self.connected = False\n return -1\n if not strbuf_p:\n self.gui.log(\"## no device\")\n self.connected = False\n return -2\n device_string = ctypes.string_at(strbuf_p).decode()\n self.devices = device_string.split(\",\")\n device_index = 0\n # self.gui.log(self.devices[device_index])\n strbuf = ctypes.create_string_buffer(self.devices[device_index].encode(\"ascii\"))\n self.device = ctypes.string_at(strbuf_p).decode()\n strbuf_p = ctypes.c_char_p(ctypes.addressof(strbuf))\n strbuf_q = ctypes.c_void_p(ctypes.addressof(strbuf_p))\n retval = self.open_port(strbuf_q)\n if retval != 1:\n self.gui.log(\"## open fail\")\n self.connected = False\n return -3\n\n self.connected = True\n return 0\n\n\n def get_report(self):\n \"\"\"read single line of report from FXE\"\"\"\n report = FXEReport()\n if not self.connected:\n # no open connection\n report.status = -1\n report.string = \"not connected\"\n return report\n databuf_p = ctypes.c_char_p(ctypes.addressof(self.sep_string))\n databuf_q = ctypes.c_void_p(ctypes.addressof(databuf_p))\n retval = self.read_report(databuf_q)\n if retval != 1:\n # error reported by DLL\n report.status = -2\n report.string = (\n \"error [\"+str(retval)+\"] \"\n +str(ctypes.string_at(databuf_p).decode(\"ascii\"))\n )\n return report\n if not databuf_p:\n return report # returm empty report with status 0\n\n report.status = 1 # valid answer, will try to parse\n report.string = str(ctypes.string_at(databuf_p).decode(\"ascii\"))\n type_string = report.string[0:4]\n try:\n report.type = int(type_string, 16)\n except:\n report.type = 0x7C00\n report.string = \"failed to parse: \"+report.string\n return report\n\n\n def init_sync(self):\n \"\"\"activate sync on external pulse\"\"\"\n syncbuf = ctypes.create_string_buffer(b\"\\x0f\")\n syncbuf_p = ctypes.c_char_p(ctypes.addressof(syncbuf))\n #self.gui.log(str(ctypes.string_at(syncbuf_p).decode(\"ascii\")))\n syncbuf_q = ctypes.c_void_p(ctypes.addressof(syncbuf_p))\n retval = self.send_command(syncbuf_q)\n #retstr = str(ctypes.string_at(syncbuf_p).decode(\"ascii\"))\n #self.gui.log(\"# sync result \" + str(retval) + \" \" + retstr)\n return retval # retval 1 indicates success\n\n\n def set_mode(self, mode):\n \"\"\"set measurement mode of FXE, reject unimplemented modes\"\"\"\n if mode == 0:\n cmdbuf = ctypes.create_string_buffer(b\"\\x40\") # instantaneous phase\n elif mode == 1:\n cmdbuf = ctypes.create_string_buffer(b\"\\x41\") # average phase\n elif mode == 2:\n cmdbuf = ctypes.create_string_buffer(b\"\\x42\") # instantaneous frequency\n elif mode == 3:\n cmdbuf = ctypes.create_string_buffer(b\"\\x43\") # average frequency\n else:\n self.gui.log(\"unimplemented mode: \" + str(mode))\n return -4\n cmdbuf_p = ctypes.c_char_p(ctypes.addressof(cmdbuf))\n cmdbuf_q = ctypes.c_void_p(ctypes.addressof(cmdbuf_p))\n retval = self.send_command(cmdbuf_q)\n return retval\n\n\n def set_interval(self, interval):\n \"\"\"set measurement interval of FXE, reject unimplemented values\"\"\"\n if interval == 100:\n cmdbuf = ctypes.create_string_buffer(b\"\\x26\") # 100 ms\n elif interval == 1000:\n cmdbuf = ctypes.create_string_buffer(b\"\\x29\") # 1000 ms\n else:\n self.gui.log(\"unimplemented interval: \" + str(interval) + \" ms\")\n return -3\n cmdbuf_p = ctypes.c_char_p(ctypes.addressof(cmdbuf))\n cmdbuf_q = ctypes.c_void_p(ctypes.addressof(cmdbuf_p))\n retval = self.send_command(cmdbuf_q)\n return retval\n\n\n def disconnect(self):\n \"\"\"close connection to FXE device\"\"\"\n if self.connected:\n self.close_port()\n self.devices = [\"none\"]\n self.device = \"none\"\n self.connected = False\n","sub_path":"fxe.py","file_name":"fxe.py","file_ext":"py","file_size_in_byte":6507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"500872186","text":"from facebook_business.adobjects.adsinsights import AdsInsights\n\nfrom app.models import Ad\nfrom app.service.constants import OBJECTIVE_RESULT\nfrom app.service.mixins import AdsReportMixin, AdObjectMixin\n\n\nclass AdService(AdObjectMixin, AdsReportMixin):\n \"\"\"Ad service for AdAccount\"\"\"\n FIELDS = [\n AdsInsights.Field.campaign_name,\n AdsInsights.Field.campaign_id,\n AdsInsights.Field.adset_name,\n AdsInsights.Field.adset_id,\n AdsInsights.Field.ad_name,\n AdsInsights.Field.adset_id,\n AdsInsights.Field.ad_id,\n AdsInsights.Field.objective,\n AdsInsights.Field.impressions,\n AdsInsights.Field.frequency,\n AdsInsights.Field.spend,\n AdsInsights.Field.actions,\n AdsInsights.Field.cost_per_action_type,\n ]\n\n def _get_remote_ad_dlist(self, id_list):\n \"\"\"\n Fields to retrieve: effective_status, daily_budget, lifetime_budget, optimization_goal\n \"\"\"\n fields = ['effective_status']\n params = {\n 'limit' : 1000,\n 'filtering': [\n {\n 'field' : 'ad.id',\n 'operator': 'IN',\n 'value' : id_list\n },\n ],\n }\n ad_cursor = self._adaccount.get_ads(params=params, fields=fields)\n dlist = {ad['id']: ad for ad in ad_cursor}\n return dlist\n\n def _add_metafields_to_insights_dlist(self, insights_dlist):\n id_list = list(insights_dlist.keys())\n remote_ad_dlist = self._get_remote_ad_dlist(id_list)\n for ad_id, ad in remote_ad_dlist.items():\n insights_dlist[ad_id]['status'] = ad['effective_status']\n\n @staticmethod\n def _add_local_fields_to_insights_dlist(insights_dlist):\n \"\"\" Local attributes to add: kpi_value, products \"\"\"\n id_list = list(insights_dlist.keys())\n ad_dlist = Ad.objects.in_bulk(id_list=id_list)\n for ad_id, ad in ad_dlist.items():\n insights_dlist[ad_id]['products'] = list(ad.products.all())\n insights_dlist[ad_id]['kpi_value'] = ad.kpi_value\n\n def get_report(self, status, objective, date_preset, adset_id=None):\n \"\"\"\n :return: dlist of Insights {id: Insights including metafields, localfields}\n \"\"\"\n # 0. build params, fields\n params, fields = self._get_params_fields(status, date_preset, objective, 'ad', adset_id=adset_id)\n # 1. get dlist of Insights\n insights_dlist = self._get_insights_dlist(fields=fields, params=params, level='ad') # I/O\n if len(insights_dlist) == 0:\n return {}\n # 2. Add metafields to Insights\n self._add_metafields_to_insights_dlist(insights_dlist)\n # 3. Add local fields to Insights\n self._add_local_fields_to_insights_dlist(insights_dlist)\n return insights_dlist\n\n def _get_active_instance_id_list(self):\n params = {\n \"limit\" : 1000,\n \"effective_status\": ['ACTIVE'],\n \"is_completed\" : False,\n }\n ads = self._adaccount.get_ads(params=params)\n ad_id_list = [obj['id'] for obj in ads]\n return ad_id_list\n","sub_path":"app/service/ad_service.py","file_name":"ad_service.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"471200786","text":"import itertools\n\nwith open(\"input\") as file:\n target = int(file.read())\n \nfor part in (1,2):\n multiplier = 10 if part == 1 else 11\n sieve = [0] * (1 + target//10)\n for i in range(1, len(sieve)):\n for j in range(i, len(sieve), i):\n if part == 2 and (j / i) == 50: break\n sieve[j] += i*multiplier\n print(next(idx for idx, item in enumerate(sieve) if item >= target))","sub_path":"20/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"140716859","text":"from PIL import Image, ImageEnhance, ImageFilter\n\n\ndef blur_processor(image, blur=False, **kwargs):\n if blur:\n return image.filter(ImageFilter.GaussianBlur(radius=blur))\n return image\n\n\ndef overlay_processor(image, overlay=False, **kwargs):\n if overlay:\n color = '#333333'\n alpha = 0.25\n saturation = 0.5\n\n if isinstance(overlay, dict):\n if 'color' in overlay:\n color = overlay['color']\n\n if 'alpha' in overlay:\n alpha = overlay['alpha']\n\n if 'saturation' in overlay:\n saturation = overlay['saturation']\n\n overlay_image = Image.new(image.mode, image.size, color)\n return Image.blend(ImageEnhance.Color(image).enhance(saturation), overlay_image, alpha)\n else:\n return image","sub_path":"motius_django/thumbnail_processors.py","file_name":"thumbnail_processors.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"545336625","text":"# coding=utf-8\r\n\r\nimport os\r\nimport sys\r\n\r\nimport xlrd\r\n\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\n\r\n\r\nclass ExtendExcelLibrary():\r\n ROBOT_LIBRARY_SCOPE = 'Global'\r\n\r\n def __init__(self):\r\n self.excel = []\r\n self.data = None\r\n\r\n def _save_excel(self, excel_path):\r\n\r\n excel_dict = {\"name\": \"\", \"data\": \"\"}\r\n data = xlrd.open_workbook(filename=excel_path)\r\n\r\n # 只保存文件名和data\r\n excel_name = os.path.basename(excel_path)\r\n excel_dict[\"name\"] = excel_name\r\n excel_dict[\"data\"] = data\r\n\r\n return excel_dict\r\n\r\n def open_excel(self, excel_path):\r\n \"\"\"同一个Excel只打开一次\r\n \"\"\"\r\n excel_name = os.path.basename(excel_path)\r\n\r\n # 判断文件是否已经保存\r\n if excel_name.encode('unicode_escape') not in str(self.excel):\r\n excel_dict = self._save_excel(excel_path)\r\n self.excel.append(excel_dict)\r\n\r\n for i in range(len(self.excel)):\r\n if self.excel[i][\"name\"] == excel_name:\r\n self.data = self.excel[i][\"data\"]\r\n break\r\n\r\n def read_cell_data(self, sheet_name, column, row):\r\n \"\"\"sheet_name, column, row\r\n \"\"\"\r\n table = self.data.sheet_by_name(sheet_name)\r\n cell_data = table.cell(int(row), int(column)).value\r\n\r\n return cell_data\r\n\r\n def get_row_count(self, sheet_name):\r\n \"\"\"sheet_name\r\n \"\"\"\r\n table = self.data.sheet_by_name(sheet_name)\r\n rows = table.nrows\r\n\r\n return rows\r\n","sub_path":"chenchen-project/Keywords-公共/MyLibrary/ExtendExcelLibrary.py","file_name":"ExtendExcelLibrary.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"44193358","text":"from django.http import HttpResponse, HttpResponseRedirect, Http404\r\nfrom django.shortcuts import get_object_or_404, render\r\nfrom django.utils import timezone\r\nfrom django.urls import reverse\r\n\r\nfrom .models import Rota, Conexao\r\nfrom aircraft.models import Aircraft\r\n\r\n\r\ndef index(request):\r\n rota_list = Rota.objects.all\r\n context = {\r\n 'rota_list': rota_list,\r\n }\r\n return render(request, 'route/index.html', context)\r\n\r\ndef result(request, rota_id):\r\n rota = get_object_or_404(Rota, pk=rota_id)\r\n try:\r\n conexao_latest = rota.conexao_set.order_by('-id')[0]\r\n except:\r\n context = {\r\n 'rota': rota,\r\n }\r\n else:\r\n context = {\r\n 'rota': rota,\r\n 'conexao_latest': conexao_latest,\r\n }\r\n return render(request, 'route/result.html', context)\r\n\r\ndef addRota(request, aircraft_id):\r\n aircraft = get_object_or_404(Aircraft, pk=aircraft_id)\r\n rota = Rota(aircraft_id=aircraft,\r\n cod_rota='vazio')\r\n rota.save()\r\n return HttpResponseRedirect(reverse('aircraft:result', args=(aircraft.id,)))\r\n\r\ndef addConexao(request, rota_id):\r\n if request.POST['local_origem'] == '' or request.POST['local_destino'] == '':\r\n raise KeyError(\"Local não encontrado\")\r\n if request.POST['previsao_chegada'] == '' or request.POST['previsao_destino'] == '':\r\n raise KeyError(\"Previsão não encontrada\")\r\n if request.POST['valor'] == '':\r\n raise KeyError(\"Valor não encontrada\")\r\n rota = get_object_or_404(Rota, pk=rota_id)\r\n \r\n ###\r\n cod_conexao=Conexao.getCod_conexao(\r\n request.POST['local_origem'],\r\n request.POST['local_destino'],\r\n rota.id)\r\n rota.addConexao(cod_conexao, request.POST['local_origem'], request.POST['local_destino'],\r\n request.POST['previsao_chegada'], request.POST['previsao_destino'], request.POST['valor'])\r\n ###\r\n \r\n \r\n return HttpResponseRedirect(reverse('route:result', args=(rota.id,)))\r\n \r\n","sub_path":"KBACO_Airlines v0.1/route/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"56063353","text":"\"\"\"\nhttps://leetcode.com/problems/letter-combinations-of-a-phone-number\n\"\"\"\n\n\nclass Solution(object):\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n map = ['', '', 'abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv',\n 'wxyz']\n\n res = [[]]\n for digit in digits:\n res = [each.append(ch) for each in res for ch in map[int(digit)]]\n return [] if not res[0] else res\n\nprint(Solution().letterCombinations('233'))\n","sub_path":"leetcode/facebook/letter-combinations-of-a-phone-number.py","file_name":"letter-combinations-of-a-phone-number.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"207176025","text":"# coding=utf-8\n\"\"\"Unit tests for :mod:`movie_recommender.cli.mr_graph`.\"\"\"\nimport unittest\n\nfrom movie_recommender.cli.mr_graph import Columns, get_points\nfrom movie_recommender.graph import Point\nfrom .utils import get_fixture\n\n\nclass GetPointsTestCase(unittest.TestCase):\n \"\"\"Test :func:`movie_recommender.cli.mr_graph.get_points`.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Set class-wide variables.\"\"\"\n cls.points = (\n Point(0.25, 4.4),\n Point(0.265, 4.62),\n Point(0.25, 3.84),\n Point(0.256, 4.16),\n Point(0.27, 4.28),\n Point(0.25, 4.5),\n Point(0.269, 4.47)\n )\n\n def test_golden_path(self):\n \"\"\"Verify the function behaves correctly when given ideal input.\"\"\"\n with open(get_fixture('xy.csv')) as handle:\n for i, point in enumerate(get_points(handle, Columns(0, 1))):\n self.assertEqual(point, self.points[i])\n\n def test_header_row(self):\n \"\"\"Verify the function behaves correctly when asked to skip a header.\"\"\"\n with open(get_fixture('xy-header.csv')) as handle:\n for i, point in enumerate(\n get_points(handle, Columns(0, 1), header_rows=1)):\n self.assertEqual(point, self.points[i])\n","sub_path":"python/movie-recommender/tests/unit/test_cli_mr_graph.py","file_name":"test_cli_mr_graph.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"574488869","text":"from Tools.solar import SolarCamera, SolarMovement\nimport RPi.GPIO as GPIO\nimport time\n# pulneg, dirpos, dirneg, enblpin, servo_increment\nsolar_movement = SolarMovement()\nsolar_dream = SolarCamera()\n\nservo_current_position = None\n\nwindow_center = solar_dream.get_window_center\n\n\ndef automated(sensitivity):\n sun_coor = None\n if solar_dream.is_there_sun:\n sun_coor = solar_dream.get_sun_coordinates\n solar_dream.mark_sun()\n # X-axis\n solar_movement.stepper_enable()\n while sun_coor and abs(window_center[0] - sun_coor[0]) > sensitivity:\n if sun_coor[0] < window_center[0]:\n solar_movement.stepper_move_left(67)\n elif sun_coor[0] > window_center[0]:\n solar_movement.stepper_move_right(67)\n solar_dream.get_image() # get new image\n if solar_dream.is_there_sun:\n sun_coor = solar_dream.get_sun_coordinates\n solar_dream.mark_sun()\n else:\n sun_coor = None\n solar_dream.show_image()\n solar_movement.stepper_disable()\n # Y-axis\n while sun_coor and abs(window_center[1] - sun_coor[1]) > sensitivity:\n if sun_coor[1] < window_center[1]:\n solar_movement.servo_right()\n elif sun_coor[1] > window_center[1]:\n solar_movement.servo_left()\n solar_dream.get_image() # get new image\n if solar_dream.is_there_sun:\n sun_coor = solar_dream.get_sun_coordinates\n solar_dream.mark_sun()\n else:\n sun_coor = None\n solar_dream.show_image() # show the image\n\n\ninput_servo_left = 31\ninput_servo_right = 33\ninput_stepper_left = 35\ninput_stepper_right = 37\n\nGPIO.setmode(GPIO.BOARD)\n\nGPIO.setup(input_servo_left, GPIO.IN)\nGPIO.setup(input_servo_right, GPIO.IN)\nGPIO.setup(input_stepper_left, GPIO.IN)\nGPIO.setup(input_stepper_right, GPIO.IN)\n\n\ndef read_debounce(pinNum, previousButtonState):\n if GPIO.input(pinNum) != previousButtonState:\n time.sleep(0.01)\n return GPIO.input(pinNum)\n\n\ndef manualStepperAdjust(solar_movement):\n currentStepperLeft = False\n currentStepperRight = False\n previousStepperLeft = False\n previousStepperRight = False\n\n while True:\n currentStepperLeft = read_debounce(\n input_stepper_left, previousStepperLeft)\n currentStepperRight = read_debounce(\n input_stepper_right, previousStepperRight)\n if (previousStepperLeft == False and currentStepperLeft) or (previousStepperLeft and currentStepperLeft):\n solar_movement.stepper_enable()\n solar_movement.stepper_move_left(200)\n print('Stepper Moving Left')\n solar_movement.stepper_disable()\n elif (previousStepperRight == False and currentStepperRight) or (previousStepperRight and currentStepperRight):\n solar_movement.stepper_enable()\n solar_movement.stepper_move_right(200)\n print('Stepper Moving Right')\n solar_movement.stepper_disable()\n previousStepperLeft = currentStepperLeft\n previousStepperRight = currentStepperRight\n\n\ndef manualServoAdjust(solar_movement):\n previousServoLeft = False\n previousServoRight = False\n currentServoLeft = False\n currentServoRight = False\n\n while True:\n currentServoLeft = read_debounce(input_servo_left, previousServoLeft)\n currentServoRight = read_debounce(\n input_servo_right, previousServoRight)\n if (previousServoLeft == False and currentServoLeft) or (previousServoLeft and currentServoLeft):\n solar_movement.servo_left()\n print('Servo Moving Left')\n elif previousServoRight == False and currentServoRight or (previousServoRight and currentServoRight):\n solar_movement.servo_right()\n print('Servo Moving Right')\n\n previousServoLeft = currentServoLeft\n previousServoRight = currentServoRight\n\n\ndef manual():\n while True:\n manualServoAdjust(solar_movement)\n manualStepperAdjust(solar_movement)\n\n\ndef main():\n sensitivity = 10\n try:\n while True:\n solar_dream.get_image() # get image\n automated(sensitivity)\n solar_dream.show_image()\n except KeyboardInterrupt:\n solar_movement.clean_up()\n print('Terminated')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Samples/samplemanualcontrol.py","file_name":"samplemanualcontrol.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"229587706","text":"from PIL import Image, ImageFont, ImageDraw\n\n\nwith open('names.txt', 'r') as fl:\n for name in fl:\n x_cd, y_cd = map(int, input().split())\n img = Image.open('certificate.jpg')\n draw = ImageDraw.Draw(img)\n font = ImageFont.truetype('Acme-Regular.ttf', 30)\n draw.text((x_cd, y_cd), name, (24, 72, 136), font = font)\n img.save('output_certificate_' + name + '.jpg')","sub_path":"Solutions/arpitmisraw/Task7/certificate.py","file_name":"certificate.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"77418206","text":"#!/usr/bin/python\n\nfrom django.conf.urls import patterns, include, url\nfrom django.conf import settings\nfrom django.contrib import admin\n\nfrom django.conf.urls.static import static\n\nfrom rest_framework import routers\nfrom restapi import views\n\n\n# RESTful API\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'groups', views.GroupViewSet)\nrouter.register(r'quasis', views.QuasiShortViewSet)\nrouter.register(r'ships', views.ShipViewSet)\nrouter.register(r'quasi_sums', views.PlottingQuasiSumViewSet)\nrouter.register(r'a4s', views.PlottingA4ViewSet)\n\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(r'^$', 'MySQL_DB_Website.views.home', name='home'),\n\n url(r'^charts_test_db/', 'MySQL_DB_Website.views.charts', name='charts'),\n\n url(\n r'^charts_test/',\n 'MySQL_DB_Website.views.charts_test',\n name='charts_test'),\n\n url(\n r'^django_nvd3/',\n 'MySQL_DB_Website.views.django_nvd3',\n name='django_nvd3'),\n\n url(\n r'^charts_nvd3/',\n 'MySQL_DB_Website.views.charts_nvd3',\n name='charts_nvd3'),\n\n url(\n r'^table_inspection/',\n 'MySQL_DB_Website.views.table_inspection',\n name='table_inspection'),\n\n url(\n r'^database_structure/',\n 'MySQL_DB_Website.views.database_structure',\n name='database_structure'),\n\n url(r'^charts/', include('charts.urls')),\n url(r'^diff_csv/', include('diff_csv.urls')),\n url(r'^videos/', include('videos.urls')),\n\n url(\n r'^data_consolidation_daemon/',\n include('data_consolidation_daemon.urls')),\n\n url(\n r'^bokeh_test/',\n 'MySQL_DB_Website.views.bokeh_test',\n name='bokeh_test'),\n\n # url(r'^MySQL_DB_Website/', include('MySQL_DB_Website.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^restapi/', include(router.urls)),\n\n url(\n r'^restapi/api-auth/',\n include('rest_framework.urls', namespace='rest_framework')),\n\n url(r'^angular_plotting/', include('angular_plotting.urls')),\n url(r'^ewr_app/', include('ewr_app.urls')),\n)\n","sub_path":"MySQL_DB_Website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"306399122","text":"# -*- coding: utf-8 -*- \n# 다른축을 참조하는 plot 겹쳐서 표현하기\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nt = [1.,2.,3.,4.]\naa=[11.4, 12.7, 13.1, 14.56]\nplt.plot(t, aa, 'b-o', label=\"aa\")\nplt.text(3,14.7,\"<--------------- aa\",verticalalignment='top', \nhorizontalalignment='right') \nplt.xlabel('no')\nplt.ylabel('aa')\n\ncc=[10.4, 10.7, 9.1, 13.56]\nplt.plot(t, cc, 'g-o', label=\"cc\")\nplt.text(3,14.7,\"<--------------- cc\",verticalalignment='top', \nhorizontalalignment='right') \nplt.xlabel('no')\nplt.ylabel('aa')\n\nax2 = plt.twinx()\nbb = [0.9, 2.2, 3.54, 4.0]\nplt.plot(t, bb, 'r-s',label=\"bb\")\nplt.text(3,2.2,\"bb ------------------>\",verticalalignment='top', \nhorizontalalignment='left') \nplt.ylabel('bb')\nax2.yaxis.tick_right()\nplt.show()\n\n\n","sub_path":"example/report/test/AxisPlotExample.py","file_name":"AxisPlotExample.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"584930035","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.files.storage\nimport game.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('game', '0030_auto_20160301_2128'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='group',\n options={'ordering': ['name'], 'verbose_name': 'group'},\n ),\n migrations.AddField(\n model_name='competition',\n name='my_games_active',\n field=models.BooleanField(default=False),\n ),\n ]\n","sub_path":"game/migrations/0031_auto_20160302_0403.py","file_name":"0031_auto_20160302_0403.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"570399499","text":"import dataset_generator as dataset\nimport numpy as np\nimport os\n\nclass pipeline4:\n\n\tdef __init__(self, cut_classifier, binary_nets, classes=dataset.ALPHABET_ALL ):\n\n\t\tself._classes = classes\n\t\tself._cut_classifier = cut_classifier\n\t\tself._binary_nets = binary_nets\n\n\n\n\tdef predict(self, X_test, verbose=0):\n\t\tprediction_cuts = self._cut_classifier.predict(X_test, verbose=verbose)\n\n\t\tindex_good_letters = []\n\n\t\tfor i,(_,prob_letter) in enumerate(prediction_cuts):\n\t\t\tif prob_letter>=0.5:\n\t\t\t\tindex_good_letters.append(i)\n\n\t\tX_test = np.array(X_test)\n\t\tX_test_pip1 = X_test[index_good_letters]\n\n\t\tprediction_pip1 = self._binary_nets.predict(X_test_pip1)\n\n\t\tpredictions = []\n\n\t\tpip_index = 0\n\n\t\tfor i,_ in enumerate(X_test):\n\t\t\tif not i in index_good_letters:\n\t\t\t\tpredictions.append((False, []))\n\t\t\telse:\n\t\t\t\tpredictions.append(prediction_pip1[pip_index])\n\t\t\t\tpip_index += 1\n\n\t\treturn predictions\n\n\n\n","sub_path":"Notebooks/pipeline4.py","file_name":"pipeline4.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"362677700","text":"import sys\nfrom queue import Queue\nfrom collections import defaultdict\nfrom itertools import permutations\nfrom yal.io import *\nfrom yal.util import *\nfrom yal.grid import *\nfrom yal.graph import *\nfrom yal.geo2d import *\n\n# Make sure ~/.config/aocd/token is correct! (Chrome inspector -> Application tab -> Cookies)\n# scanner = Scanner(sys.stdin)\nlines = [line.strip() for line in sys.stdin.readlines()]\n\ndots = set()\nfor line in lines:\n if line == \"\":\n break\n (x,y) = get_ints(line)\n p = Point(x,y)\n dots.add(p)\n\nprint(len(dots))\n\nfor line in lines:\n if line.startswith(\"fold\"):\n pos = get_ints(line)[0]\n new_dots = set()\n if 'y' in line:\n for d in dots:\n np = d if d.y < pos else Point(d.x, 2*pos-d.y)\n new_dots.add(np)\n else:\n for d in dots:\n np = d if d.x < pos else Point(2*pos-d.x, d.y)\n new_dots.add(np)\n dots = new_dots\n for p in dots:\n assert p.x >= 0 and p.y >= 0\n\nprint(len(dots))\n\nv = {x:'#' for x in dots}\nGrid.from_sparse_map(v).show()\n","sub_path":"src/year2021/day13_newgrid.py","file_name":"day13_newgrid.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"182243578","text":"import logging\nimport os\n\nimport psycopg2\nfrom psycopg2 import sql\nfrom sqlalchemy import MetaData, Table, Column\nfrom geoalchemy2 import Geometry\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.dialects.postgresql import DATE, NUMERIC, VARCHAR\n\nlog = logging.getLogger(__name__)\n\n\nclass Database(object):\n \"\"\"Wrapper around psycopg2 and sqlachemy\"\"\"\n\n def __init__(self, url=os.environ.get(\"DATABASE_URL\")):\n self.url = url\n self.engine = create_engine(url)\n self.conn = psycopg2.connect(url)\n # make sure postgis is available\n try:\n self.query(\"SELECT postgis_full_version()\")\n except psycopg2.errors.UndefinedFunction:\n log.error(\"Cannot find PostGIS, is extension added to database %s ?\", url)\n raise psycopg2.errors.UndefinedFunction\n\n # supported oracle/wfs to postgres types\n self.supported_types = {\n \"NUMBER\": NUMERIC,\n \"VARCHAR2\": VARCHAR,\n \"DATE\": DATE,\n }\n\n @property\n def schemas(self):\n \"\"\"List all non-system schemas in db\"\"\"\n sql = \"\"\"SELECT schema_name FROM information_schema.schemata\n ORDER BY schema_name\"\"\"\n schemas = self.query(sql)\n return [s[0] for s in schemas if s[0][:3] != \"pg_\"]\n\n @property\n def tables(self):\n \"\"\"List all non-system tables in the db\"\"\"\n tables = []\n for schema in self.schemas:\n tables = tables + [schema + \".\" + t for t in self.tables_in_schema(schema)]\n return tables\n\n def tables_in_schema(self, schema):\n \"\"\"Get a listing of all tables in given schema\"\"\"\n sql = \"\"\"SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = %s\"\"\"\n return [t[0] for t in self.query(sql, (schema,))]\n\n def query(self, sql, params=None):\n \"\"\"Execute sql and return all results\"\"\"\n with self.conn:\n with self.conn.cursor() as curs:\n curs.execute(sql, params)\n result = curs.fetchall()\n return result\n\n def execute(self, sql, params=None):\n \"\"\"Execute sql and return only whether the query was successful\"\"\"\n with self.conn:\n with self.conn.cursor() as curs:\n result = curs.execute(sql, params)\n return result\n\n def execute_many(self, sql, params):\n \"\"\"Execute many sql\"\"\"\n with self.conn:\n with self.conn.cursor() as curs:\n curs.executemany(sql, params)\n\n def create_schema(self, schema):\n if schema not in self.schemas:\n log.info(f\"Schema {schema} does not exist, creating it\")\n dbq = sql.SQL(\"CREATE SCHEMA {schema}\").format(\n schema=sql.Identifier(schema)\n )\n self.execute(dbq)\n\n def drop_table(self, schema, table):\n if schema + \".\" + table in self.tables:\n log.info(f\"Dropping table {schema}.{table}\")\n dbq = sql.SQL(\"DROP TABLE {schema}.{table}\").format(\n schema=sql.Identifier(schema),\n table=sql.Identifier(table),\n )\n self.execute(dbq)\n\n def define_table(\n self,\n schema_name,\n table_name,\n table_details,\n geom_type,\n table_comments=None,\n primary_key=None,\n append=False,\n ):\n \"\"\"build sqlalchemy table definition from bcdc provided json definitions\"\"\"\n # remove columns of unsupported types, redundant columns\n table_details = [\n c for c in table_details if c[\"data_type\"] in self.supported_types.keys()\n ]\n table_details = [\n c\n for c in table_details\n if c[\"column_name\"] not in [\"FEATURE_AREA_SQM\", \"FEATURE_LENGTH_M\"]\n ]\n\n # translate the oracle types to sqlalchemy provided postgres types\n columns = []\n for i in range(len(table_details)):\n column_name = table_details[i][\"column_name\"].lower()\n column_type = self.supported_types[table_details[i][\"data_type\"]]\n # append precision if varchar or numeric\n if table_details[i][\"data_type\"] == \"VARCHAR2\":\n column_type = column_type(int(table_details[i][\"data_precision\"]))\n # check that comments are present\n if \"column_comments\" in table_details[i].keys():\n column_comments = table_details[i][\"column_comments\"]\n else:\n column_comments = None\n if column_name == primary_key:\n columns.append(\n Column(\n column_name,\n column_type,\n primary_key=True,\n comment=column_comments,\n )\n )\n else:\n columns.append(\n Column(\n column_name,\n column_type,\n comment=column_comments,\n )\n )\n\n # make everything multipart\n # (some datasets have mixed singlepart/multipart geometries)\n if geom_type[:5] != \"MULTI\":\n geom_type = \"MULTI\" + geom_type\n columns.append(Column(\"geom\", Geometry(geom_type, srid=3005)))\n metadata_obj = MetaData()\n table = Table(\n table_name,\n metadata_obj,\n *columns,\n comment=table_comments,\n schema=schema_name,\n )\n\n if schema_name not in self.schemas:\n self.create_schema(schema_name)\n\n # drop existing table if append is not flagged\n if schema_name + \".\" + table_name in self.tables and not append:\n log.info(f\"Table {schema_name}.{table_name} exists, overwriting\")\n self.drop_table(schema_name, table_name)\n\n # create the table\n if schema_name + \".\" + table_name not in self.tables:\n log.info(f\"Creating table {schema_name}.{table_name}\")\n table.create(self.engine)\n\n return table\n\n def get_columns(self, schema, table):\n metadata_obj = MetaData(schema=schema)\n table = Table(table, metadata_obj, schema=schema, autoload_with=self.engine)\n return list(table.columns.keys())\n","sub_path":"bcdata/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"157731443","text":"from pygame import mixer\r\nimport os\r\nimport tkinter\r\nlist1=os.listdir(\"F:\\songs\\Tholi Prema (2018) ~128Kbps\")\r\nmixer.init()\r\ni=0\r\nfirst=True\r\nmixer.music.load(\"F:\\songs\\Tholi Prema (2018) ~128Kbps\\\\\"+list1[i])\r\nwhile(1):\r\n choice=int(input(\"1.play\\t2.next\\t3.prev\\t4.pause\\t5.exit\"))\r\n if (choice==1):\r\n if(first==True):\r\n mixer.music.play()\r\n first=False \r\n else:\r\n mixer.music.unpause()\r\n elif(choice==2):\r\n i+=1\r\n if(i>=len(list1)):i=0\r\n mixer.music.load(\"F:\\songs\\Tholi Prema (2018) ~128Kbps\\\\\"+list1[i])\r\n mixer.music.play()\r\n elif(choice==3):\r\n i-=1\r\n mixer.music.load(\"F:\\songs\\Tholi Prema (2018) ~128Kbps\\\\\"+list1[i])\r\n mixer.music.play()\r\n elif(choice==4):\r\n mixer.music.pause()\r\n elif(choice==5):\r\n mixer.music.stop()\r\n break\r\n\r\n","sub_path":"sound.py","file_name":"sound.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"349289654","text":"import re\n\ntext = 'After beating the eggs, Dana read the next step:'\n'Add milk and eggs, then add flour and sugar.'\n\nmydict = {}\n\n\nfor line in text:\n print(line)\n if line.isalpha():\n wordL = line.lower()\n\n if wordL in mydict:\n mydict[wordL] += 1\n \n else:\n mydict[wordL] = 1\n\n # for word in line:\n \n # if word.isalpha(): \n # wordL = word.lower()\n # print(wordL)\n\n # if wordL in mydict:\n # mydict[wordL] += 1\n \n # else:\n # mydict[wordL] = 1\n\nprint(mydict)","sub_path":"WordCloud.py","file_name":"WordCloud.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"382193992","text":"\"\"\"Import from other formats to the CNVkit format.\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom builtins import next\nfrom builtins import map\nfrom builtins import zip\n\nimport logging\nimport math\nimport os.path\nimport subprocess\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import core, params\nfrom .cnary import CopyNumArray as CNA\n\n\n# __________________________________________________________________________\n# import-picard\n\nTOO_MANY_NO_COVERAGE = 100\n\ndef find_picard_files(file_and_dir_names):\n \"\"\"Search the given paths for 'targetcoverage' CSV files.\n\n Per the convention we use in our Picard applets, the target coverage file\n names end with '.targetcoverage.csv'; anti-target coverages end with\n '.antitargetcoverage.csv'.\n \"\"\"\n filenames = []\n for tgt in file_and_dir_names:\n if os.path.isdir(tgt):\n # Collect the target coverage files from this directory tree\n fnames = subprocess.check_output(['find', tgt,\n '-name', '*targetcoverage.csv']\n ).splitlines()\n if not fnames:\n raise RuntimeError(\"Given directory %s does not contain any \"\n \"'*targetcoverage.csv' files.\"\n % tgt)\n filenames.extend(fnames)\n elif os.path.isfile(tgt):\n filenames.append(tgt)\n else:\n raise ValueError(\"Given path is neither a file nor a directory: %s\"\n % tgt)\n filenames.sort()\n return filenames\n\n\ndef import_picard_pertargetcoverage(fname):\n \"\"\"Parse a Picard CalculateHsMetrics PER_TARGET_COVERAGE file.\n\n Return a CopyNumArray.\n\n Input column names:\n chrom (str),\n start, end, length (int),\n name (str),\n %gc, mean_coverage, normalized_coverage (float)\n \"\"\"\n dframe = pd.read_table(fname, na_filter=False)\n coverages = np.asarray(dframe['mean_coverage'])\n no_cvg_idx = (coverages == 0)\n if sum(no_cvg_idx) > TOO_MANY_NO_COVERAGE:\n logging.warn(\"*WARNING* Sample %s has >%d bins with no coverage\",\n fname, TOO_MANY_NO_COVERAGE)\n # Avoid math domain error\n coverages[no_cvg_idx] = 2**params.NULL_LOG2_COVERAGE\n cnarr = CNA.from_columns({\"chromosome\": dframe[\"chrom\"],\n \"start\": dframe[\"start\"] - 1,\n \"end\": dframe[\"end\"],\n \"gene\": dframe[\"name\"].apply(unpipe_name),\n \"gc\": dframe[\"%gc\"],\n \"log2\": np.log2(coverages)},\n {\"sample_id\": core.fbase(fname)})\n cnarr.sort()\n return cnarr\n\n\ndef unpipe_name(name):\n \"\"\"Fix the duplicated gene names Picard spits out.\n\n Return a string containing the single gene name, sans duplications and pipe\n characters.\n\n Picard CalculateHsMetrics combines the labels of overlapping intervals\n by joining all labels with '|', e.g. 'BRAF|BRAF' -- no two distinct\n targeted genes actually overlap, though, so these dupes are redundant.\n Meaningless target names are dropped, e.g. 'CGH|FOO|-' resolves as 'FOO'.\n In case of ambiguity, the longest name is taken, e.g. \"TERT|TERT Promoter\"\n resolves as \"TERT Promoter\".\n \"\"\"\n if '|' not in name:\n return name\n gene_names = set(name.split('|'))\n if len(gene_names) == 1:\n return gene_names.pop()\n cleaned_names = gene_names.difference(params.IGNORE_GENE_NAMES)\n if cleaned_names:\n gene_names = cleaned_names\n new_name = sorted(gene_names, key=len, reverse=True)[0]\n if len(gene_names) > 1:\n logging.warn(\"*WARNING* Ambiguous gene name %r; using %r\",\n name, new_name)\n return new_name\n\n\n# __________________________________________________________________________\n# import-seg\n\nLOG2_10 = math.log(10, 2) # To convert log10 values to log2\n\ndef import_seg(segfname, chrom_names, chrom_prefix, from_log10):\n \"\"\"Parse a SEG file as an iterable of CopyNumArray instances.\n\n `chrom_names`:\n Map (string) chromosome IDs to names. (Applied before chrom_prefix.)\n e.g. {'23': 'X', '24': 'Y', '25': 'M'}\n\n `chrom_prefix`: prepend this string to chromosome names\n (usually 'chr' or None)\n\n `from_log10`: Convert values from log10 to log2.\n \"\"\"\n dframe = pd.read_table(segfname, na_filter=False)\n if len(dframe.columns) == 6:\n dframe.columns = ['sample_id', 'chromosome', 'start', 'end', 'nprobes',\n 'mean']\n elif len(dframe.columns) == 5:\n dframe.columns = ['sample_id', 'chromosome', 'start', 'end', 'mean']\n else:\n raise ValueError(\"SEG format expects 5 or 6 columns; found {}: {}\"\n .format(len(dframe.columns), ' '.join(dframe.columns)))\n\n # Calculate values for output columns\n dframe['chromosome'] = dframe['chromosome'].apply(str)\n if chrom_names:\n dframe['chromosome'] = dframe['chromosome'].apply(lambda c:\n chrom_names.get(c, c))\n if chrom_prefix:\n dframe['chromosome'] = dframe['chromosome'].apply(lambda c:\n chrom_prefix + c)\n if from_log10:\n dframe['mean'] *= LOG2_10\n dframe['gene'] = [\"G\" if mean >= 0 else \"L\" for mean in dframe['mean']]\n\n for sid in pd.unique(dframe['sample_id']):\n sample = dframe[dframe['sample_id'] == sid]\n cols = {'chromosome': sample['chromosome'],\n 'start': sample['start'],\n 'end': sample['end'],\n 'gene': sample['gene'],\n 'log2': sample['mean']}\n if 'nprobes' in dframe:\n cols['probes'] = sample['nprobes']\n cns = CNA.from_columns(cols, {'sample_id': sid})\n cns.sort()\n yield cns\n\n\n# __________________________________________________________________________\n# import-theta\n\ndef parse_theta_results(fname):\n \"\"\"Parse THetA results into a data structure.\n\n Columns: NLL, mu, C, p*\n \"\"\"\n with open(fname) as handle:\n header = next(handle).rstrip().split('\\t')\n body = next(handle).rstrip().split('\\t')\n assert len(body) == len(header) == 4\n\n # NLL\n nll = float(body[0])\n\n # mu\n mu = body[1].split(',')\n mu_normal = float(mu[0])\n mu_tumors = list(map(float, mu[1:]))\n\n # C\n copies = body[2].split(':')\n if len(mu_tumors) == 1:\n # 1D array of integers\n # Replace X with None for \"missing\"\n copies = [[int(c) if c.isdigit() else None\n for c in copies]]\n else:\n # List of lists of integer-or-None (usu. 2 x #segments)\n copies = [[int(c) if c.isdigit() else None\n for c in subcop]\n for subcop in zip(*[c.split(',') for c in copies])]\n\n # p*\n probs = body[3].split(',')\n if len(mu_tumors) == 1:\n # 1D array of floats, or None for \"X\" (missing/unknown)\n probs = [float(p) if not p.isalpha() else None\n for p in probs]\n else:\n probs = [[float(p) if not p.isalpha() else None\n for p in subprob]\n for subprob in zip(*[p.split(',') for p in probs])]\n return {\"NLL\": nll,\n \"mu_normal\": mu_normal,\n \"mu_tumors\": mu_tumors,\n \"C\": copies,\n \"p*\": probs}\n","sub_path":"cnvlib/importers.py","file_name":"importers.py","file_ext":"py","file_size_in_byte":7621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"10819051","text":"import json\nimport os\nimport typing\n\nimport enum\nfrom NeonOcean.Order import Information, Paths\nfrom NeonOcean.Order.Tools import Exceptions, Version\nfrom sims4 import log\n\n_allMods = list() # type: typing.List[Mod]\n\nclass Mod:\n\tdef __init__ (self, namespace: str, name: str, loadController: str, informationFilePath: str):\n\t\t\"\"\"\n\t\tA container for mod information.\n\n\t\t:param namespace: The namespace of the mod, it should be the root of all the mod's modules.\n\t\t:type namespace: str\n\t\t:param name: The actual name of the mod. This will be used as the name of the attribute in this module that points to this mod.\n\t\t:type name: str\n\t\t:param loadController: The namespace of the mod that can load this mod.\n\t\t:type loadController: typing.Optional[str]\n\t\t:param informationFilePath: The file path of the mod information file.\n\t\t:type informationFilePath: str\n\t\t\"\"\"\n\n\t\tif not isinstance(namespace, str):\n\t\t\traise Exceptions.IncorrectTypeException(namespace, \"namespace\", (str,))\n\n\t\tif not isinstance(name, str):\n\t\t\traise Exceptions.IncorrectTypeException(name, \"name\", (str,))\n\n\t\tif not isinstance(loadController, str) and loadController is not None:\n\t\t\traise Exceptions.IncorrectTypeException(loadController, \"loadController\", (str, \"None\"))\n\n\t\tif not isinstance(informationFilePath, str):\n\t\t\traise Exceptions.IncorrectTypeException(informationFilePath, \"informationFilePath\", (str,))\n\n\t\tself.Namespace = namespace # type: str\n\t\tself.Name = name # type: str\n\t\tself.LoadController = loadController # type: typing.Optional[str]\n\n\t\tself.InformationFilePath = informationFilePath # type: str\n\t\tself.InformationFileDirectoryPath = os.path.dirname(self.InformationFilePath) # type: str\n\n\t\tself.Author = None # type: typing.Optional[str]\n\t\tself.Version = None # type: typing.Optional[Version.Version]\n\t\tself.VersionDisplay = None # type: typing.Optional[str]\n\t\tself.Distribution = None # type: typing.Optional[str]\n\t\tself.Rating = Rating.Normal # type: Rating\n\n\t\tself.ScriptPaths = list() # type: typing.List[str]\n\t\tself.Modules = list() # type: typing.List[str]\n\n\t\tself.Requirements = list() # type: typing.List[str]\n\t\tself.Compatibility = list() # type: typing.List[Compatibility]\n\n\t\tself.Path = os.path.join(Paths.ModsPath, self.Namespace) # type: str\n\t\tself.PersistentPath = os.path.join(Paths.PersistentPath, self.Namespace) # type: str\n\n\t\tself.Blocked = False # type: bool\n\t\tself.Loading = False # type: bool\n\n\t\tself.ReadInformation = False # type: bool\n\t\tself.Imported = False # type: bool\n\t\tself.Initiated = False # type: bool\n\t\tself.Started = False # type: bool\n\n\t\t_allMods.append(self)\n\n\tdef IsLoaded (self) -> bool:\n\t\t\"\"\"\n\t\tWhether or not this mod is currently loaded.\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t\treturn self.ReadInformation and \\\n\t\t\t self.Imported and \\\n\t\t\t self.Initiated and \\\n\t\t\t self.Started\n\n\tdef RequirementsLoaded (self) -> bool:\n\t\t\"\"\"\n\t\tWhether or not this mod's requirements are all currently loaded.\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t\tloadedRequirements = 0 # type: int\n\n\t\tfor requirement in self.Requirements:\n\t\t\tfor mod in GetAllMods(): # type: Mod\n\t\t\t\tif mod.Namespace == requirement:\n\t\t\t\t\tif not mod.IsLoaded():\n\t\t\t\t\t\treturn False\n\n\t\t\t\t\tloadedRequirements += 1\n\n\t\tif len(self.Requirements) != loadedRequirements:\n\t\t\treturn False\n\n\t\treturn True\n\n\tdef ControlsLoading (self, hostNamespace: str) -> bool:\n\t\t\"\"\"\n\t\tWhether or not the host controls the mod's loading.\n\t\t:param hostNamespace: The namespace of the mod that will be doing the loading\n\t\t:type hostNamespace: str\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t\tif self.LoadController is None:\n\t\t\treturn False\n\n\t\treturn self.LoadController == hostNamespace\n\n\tdef IsLoadable (self, hostNamespace: str) -> bool:\n\t\t\"\"\"\n\t\tWhether or not this mod can ever be loaded by the host. This returns false if the mod is blocked and if the mod is not controlled by the host.\n\t\t:param hostNamespace: The namespace of the mod that will be doing the loading\n\t\t:type hostNamespace: str\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t\treturn not self.Blocked and \\\n\t\t\t self.ControlsLoading(hostNamespace)\n\n\tdef IsCurrentlyLoadable (self, hostNamespace: str) -> bool:\n\t\t\"\"\"\n\t\tWhether or not this mod can be loaded by the host right now.\n\t\t:param hostNamespace: The namespace of the mod that will be doing the loading\n\t\t:type hostNamespace: str\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t\treturn self.IsLoadable(hostNamespace) and \\\n\t\t\t self.ReadInformation and \\\n\t\t\t not self.Loading and \\\n\t\t\t not self.Imported and \\\n\t\t\t not self.Initiated and \\\n\t\t\t not self.Started and \\\n\t\t\t self.RequirementsLoaded()\n\n\t# noinspection SpellCheckingInspection\n\tdef IsUnloadable (self, hostNamespace: str) -> bool:\n\t\t\"\"\"\n\t\tWhether or not this mod can ever be unloaded by the host.\n\t\t:param hostNamespace: The namespace of the mod that will be doing the loading\n\t\t:type hostNamespace: str\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t\treturn self.ControlsLoading(hostNamespace)\n\n\t# noinspection SpellCheckingInspection\n\tdef IsCurrentlyUnloadable (self, hostNamespace: str) -> bool:\n\t\t\"\"\"\n\t\tWhether or not this mod can be unloaded by the host right now.\n\t\t:param hostNamespace: The namespace of the mod that will be doing the loading\n\t\t:type hostNamespace: str\n\t\t:rtype: bool\n\t\t\"\"\"\n\n\t\treturn self.IsUnloadable(hostNamespace) and \\\n\t\t\t self.Imported\n\nclass Compatibility:\n\tdef __init__ (self, namespace: str, lowestVersion: typing.Optional[Version.Version], highestVersion: typing.Optional[Version.Version]):\n\t\tself.Namespace = namespace # type: str\n\t\tself.LowestVersion = lowestVersion # type: typing.Optional[Version.Version]\n\t\tself.HighestVersion = highestVersion # type: typing.Optional[Version.Version]\n\nclass Rating(enum.Int):\n\tNormal = 0 # type: Rating\n\t# noinspection SpellCheckingInspection\n\tNSFW = 1 # type: Rating\n\ndef GetMod (namespace: str) -> Mod:\n\tfor mod in _allMods: # type: Mod\n\t\tif mod.Namespace == namespace:\n\t\t\treturn mod\n\n\traise Exception(\"No mod with the namespace '\" + namespace + \"' exists.\")\n\ndef IsInstalled (namespace: str) -> bool:\n\tfor mod in _allMods: # type: Mod\n\t\tif mod.Namespace == namespace:\n\t\t\treturn True\n\n\treturn False\n\ndef GetAllMods () -> typing.List[Mod]:\n\treturn list(_allMods)\n\ndef _Setup () -> None:\n\tfor directoryRoot, directoryNames, fileNames in os.walk(Paths.ModsPath): # type: str, list, list\n\t\tfor fileName in fileNames: # type: str\n\t\t\tfileNameLower = fileName.lower() # type: str\n\n\t\t\tmodFilePath = os.path.join(directoryRoot, fileName) # type: str\n\n\t\t\ttry:\n\t\t\t\tif os.path.splitext(fileNameLower)[1] == \".json\" and fileNameLower.startswith((Information.RootNamespace + \"-mod\").lower()):\n\t\t\t\t\twith open(modFilePath) as modFile:\n\t\t\t\t\t\tmodInformation = json.JSONDecoder().decode(modFile.read()) # type: dict\n\n\t\t\t\t\tmodNamespace = modInformation[\"Namespace\"] # type: str\n\t\t\t\t\tmodName = modInformation[\"Name\"] # type: str\n\t\t\t\t\tmodLoadControl = modInformation.get(\"LoadController\") # type: typing.Optional[str]\n\n\t\t\t\t\tduplicateMod = False # type: bool\n\n\t\t\t\t\tfor mod in GetAllMods():\n\t\t\t\t\t\tif modNamespace == mod.Namespace:\n\t\t\t\t\t\t\tlog.exception(\"NeonOcean\", \"Duplicate mod with the namespace '\" + modNamespace + \"' at: \\n\" + modFilePath, owner = __name__)\n\t\t\t\t\t\t\tduplicateMod = True\n\n\t\t\t\t\tif duplicateMod:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tmod = Mod(modNamespace, modName, modLoadControl, modFilePath)\n\n\t\t\t\t\tglobals()[modName] = mod\n\t\t\texcept Exception as e:\n\t\t\t\tlog.exception(\"NeonOcean\", \"Failed to read basic data from mod information dictionary at: \\n\" + modFilePath, exc = e, owner = __name__)\n\n_Setup()\n\nChoreography: Mod\nCycle: Mod\nDebug: Mod\nMain: Mod\nOrder: Mod\nTime: Mod\n","sub_path":"Python/NeonOcean.Order/NeonOcean/Order/Mods.py","file_name":"Mods.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"330559630","text":"from typing import Optional, List\nfrom dataclasses import dataclass, field\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom matplotlib.patches import Rectangle\nimport mpl_toolkits.mplot3d.art3d as art3d\nimport pandas as pd\nimport numpy as np\n\n\n@dataclass\nclass SweeperPlot:\n fig: plt.Figure = field(init=False)\n ax: plt.Axes = field(init=False)\n\n def __post_init__(self):\n self.fig = plt.figure()\n self.ax = plt.axes(projection=\"3d\")\n self.ax.view_init(elev=5.0, azim=75)\n\n def set_up_plotter(self, n_levels: int, param_labels: List[str]):\n \"\"\"Defines the initial bounds and labels for the plotter.\"\"\"\n self.ax.set_ylim(0, n_levels)\n self.ax.set_zlim(0, 5)\n self.ax.set_xlim(0, 1)\n self.ax.invert_xaxis()\n\n self.ax.set_zlabel(param_labels[0], labelpad=5)\n self.ax.set_ylabel(\"Optimization level\", labelpad=10)\n self.ax.set_xlabel(param_labels[1], labelpad=10)\n\n self.fig.show()\n self.fig.canvas.draw()\n\n def draw(self):\n \"\"\"Updates the figure.\"\"\"\n self.fig.canvas.draw()\n\n def add_bounds_to_ax(self, x: np.ndarray, y: np.ndarray, z: int) -> None:\n \"\"\"Draws the bounds for a level's parameter space.\"\"\"\n width = max(y) - min(y)\n height = max(x) - min(x)\n\n p = Rectangle(\n (min(y), min(x)),\n width,\n height,\n edgecolor=\"black\",\n facecolor=\"none\",\n linestyle=\"--\",\n )\n\n self.ax.add_patch(p)\n art3d.pathpatch_2d_to_3d(p, z=z, zdir=\"y\")\n self.draw()\n\n @staticmethod\n def get_colormap(level_values: np.ndarray) -> np.ndarray:\n \"\"\"Convert the passed values to colormap.\"\"\"\n color_dimension = level_values # change to desired fourth dimension\n color_min, color_max = color_dimension.min(), color_dimension.max()\n norm = colors.Normalize(color_min, color_max)\n m = plt.cm.ScalarMappable(norm=norm, cmap=\"Spectral_r\")\n m.set_array([])\n face_colors = m.to_rgba(color_dimension)\n\n return face_colors\n\n def plot_level_results(\n self, x: np.ndarray, y: np.ndarray, z: int, results: np.ndarray\n ) -> None:\n # Get the parameter space\n x, y = np.meshgrid(y, x)\n m = self.get_colormap(results)\n # Plot surface using color as a 4th dimension\n self.ax.plot_surface(\n x,\n np.ones((len(x), len(x))) * z,\n y,\n facecolors=m,\n edgecolor=\"white\",\n linewidth=0.1,\n rstride=1,\n cstride=1,\n )\n\n self.fig.canvas.draw()\n\n\ndef plot_trajectories_2d(trajectories: pd.DataFrame, ax: Optional[plt.Axes] = None):\n \"\"\"\n Plots the cell trajectories in 2D as a line and a point at the last coordinate.\n\n Parameters\n ----------\n trajectories\n A DataFrame with the cells' x and y coordinates.\n ax\n The axes object where the trajectories will be plotted (optional).\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n\n for cell in trajectories:\n ax.plot(cell[\"position_x\"].values, cell[\"position_y\"].values)\n\n ax.scatter(\n cell[\"position_x\"].values[-1], cell[\"position_y\"].values[-1], marker=\"o\"\n )\n\n return ax\n\n\ndef plot_trajectories_3d(trajectories: pd.DataFrame, ax: Optional[plt.Axes] = None):\n \"\"\"\n Plots the cell trajectories in 3D as a line and a point at the last coordinate.\n\n Parameters\n ----------\n trajectories\n A DataFrame with the cells' x, y and z coordinates.\n ax\n The axes object where the trajectories will be plotted (optional).\n \"\"\"\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n\n for cell in trajectories:\n ax.plot(\n cell[\"position_x\"].values,\n cell[\"position_y\"].values,\n cell[\"position_z\"].values,\n )\n\n ax.scatter(\n cell[\"position_x\"].values[-1],\n cell[\"position_y\"].values[-1],\n cell[\"position_z\"].values[-1],\n marker=\"o\",\n )\n\n return ax\n","sub_path":"src/physicool/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"433081624","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nfrom jobs_scraping.items import JobItem\n\n\nclass GlassdoorSpider(CrawlSpider):\n \"\"\"Spider for extracting Python job offers from Glassdoor.\"\"\"\n\n name = \"glassdoor_spider\"\n file_name = \"Glassdoor\"\n initial_country = \"USA\"\n\n start_urls = [\"https://www.glassdoor.com/Job/python-jobs-SRCH_KO0,6.htm\"]\n base_url = \"https://www.glassdoor.com/\"\n\n rules = (\n Rule(\n LinkExtractor(allow=(), restrict_css=\".next\"),\n callback=\"parse_page\",\n follow=True,\n ),\n )\n\n custom_settings = {\n \"ITEM_PIPELINES\": {\"jobs_scraping.pipelines.CSVExportPipeline\": 300},\n }\n\n def __init__(self, *args, **kwargs):\n \"\"\"Setting up current page and max page.\"\"\"\n self.file_name = kwargs.get(\"filename\", self.file_name)\n self.max_page = kwargs.get(\"stop_page\", 1)\n\n self.current_page = 1\n super().__init__(*args, **kwargs)\n\n def parse_start_url(self, response):\n \"\"\"Method for correctly scrapping first page from website.\"\"\"\n return self.parse_page(response)\n\n def parse_page(self, response):\n \"\"\"\n Method for gathering job links.\n\n @url https://www.glassdoor.com/Job/python-jobs-SRCH_KO0,6.htm\n\n @returns requests 60\n \"\"\"\n if self.current_page > self.max_page:\n raise CloseSpider(\"Spider has reached maximum number of pages\")\n links = response.css(\"a.jobLink.jobInfoItem.jobTitle::attr(href)\").getall()\n for link in links:\n absolute_url = self.base_url + link[1:]\n yield scrapy.Request(absolute_url, callback=self.parse_job)\n self.current_page += 1\n\n def parse_job(self, response):\n \"\"\"\n Method for gathering specific job information.\n \n @url https://www.glassdoor.com/job-listing/fullstack-python-engineer-streetshares-JV_IC1130404_KO0,25_KE26,38.htm?jl=3147380862&ctt=1586168334926\n\n @returns items 1\n\n @scrapes position company location url country\n \"\"\"\n item = JobItem()\n item[\"position\"] = response.css(\"h2.mt-0.mb-xsm.strong::text\").get()\n item[\"company\"] = response.css(\"span.strong.ib::text\").get()\n item[\"location\"] = response.css(\"span.subtle.ib::text\").getall()[1]\n item[\"url\"] = response.url\n item[\"country\"] = self.initial_country\n yield item\n","sub_path":"src/jobs_scraping/spiders/glassdoor_spider.py","file_name":"glassdoor_spider.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"102949240","text":"\"\"\"Cute blur animation.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom matplotlib import animation\nfrom PIL import Image\n\nfrom blur import BoxFilter\n\n\nif __name__ == \"__main__\":\n img_path = \"./imgs/cat-crop.png\"\n img = np.asarray(Image.open(img_path)).astype(\"uint8\")\n\n h, w, c = img.shape\n blurred = BoxFilter(11).filter(img)\n\n out = np.zeros_like(img, dtype=\"uint8\")\n fig, axes = plt.subplots(1, 2)\n axes[0].imshow(img)\n canvas = axes[1].imshow(out)\n for ax in axes:\n ax.axis('off')\n\n def gen_func():\n for row in range(h):\n yield row,\n\n def update_func(row):\n row = row[0]\n for col in range(w):\n out[row, col, :] = blurred[row, col, :]\n canvas.set_data(out)\n return canvas,\n\n ani = animation.FuncAnimation(fig, update_func, gen_func, interval=2, blit=True, save_count=200)\n ani.save('../assets/blur_ani.gif', writer='imagemagick')\n","sub_path":"examples/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"30409934","text":"#!/usr/bin/python3.5\n# -*-coding:utf-8 -*\n\n# tkinter is the Python standart GUI\nfrom tkinter import *\n\n# creating a window called window\nwindow = Tk()\n\n# getting screen size to get a \"responsive\" window\nscreen_width = window.winfo_screenwidth()\nscreen_height = window.winfo_screenheight()\n\n# setting var for windows size and position\nw_w = screen_width / 2\nw_h = screen_height / 2\nw_x = screen_height / 20\nw_y = screen_width / 20\n\n# setting window's initial size and position\n# note that we are converting size from float to int to string\nwindow.geometry(str(int(w_w)) + \"x\" + str(int(w_h)) + \"+\" + str(int(w_x)) + \"+\" + str(int(w_y)))\n\n# setting window's title\nwindow.title(\"Simple PySnake\")\n\n# creating plate\nplate_w = w_w\nplate_h = (w_h - w_h / 11)\nplate = Canvas(window, width = plate_w, height = plate_h, bg = \"blue\")\n\n# putting plate on window's bottom\nplate.pack(side = BOTTOM)\n\n# creating topbar\ntopbar = Text(window, width = int(w_w), height = int(w_h / 11), bg = \"red\")\n\n# putting topbar on window's top\ntopbar.pack(side = TOP)\n\n# creating a loop on the window\n# to get out of the loop the user has to close the window by clicking on the red cross\nwindow.mainloop()\n","sub_path":"src/includes/python/simple-pysnake-05.py","file_name":"simple-pysnake-05.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"501741359","text":"TYPE = 'type'\nREQUEST = 'request'\nRESPONSE = 'response'\n\nACTION = 'action'\nTIME = 'time'\nBODY = 'body'\nCODE = 'code'\nMESSAGE = 'message'\nUSERNAME = 'username'\nPASSWORD = 'password'\nSENDER = 'sender'\nTO = 'to'\nTEXT = 'text'\n\n\nclass RequestAction:\n PRESENCE = 'presence'\n AUTH = 'auth'\n MESSAGE = 'msg'\n QUIT = 'quit'\n COMMAND = 'command'\n START_CHAT = 'start_chat'\n ACCEPT_CHAT = 'accept_chat'","sub_path":"Lesson_14/jim/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"624086623","text":"from pymongo import MongoClient\n\nfrom MainAPI.models import Resource\nfrom constants import Constants\n\n\nclass ResourceDao:\n def __init__(self):\n self.client = MongoClient('localhost', 27017)\n print(\"Database Client created\")\n\n def read(self, resource_id):\n resource_log = self.client[\"test_db\"][\"resource_log\"]\n cursor = resource_log.find_one({\"resource_id\": str(resource_id)})\n if cursor is None:\n return None\n resource = Resource(resource_id)\n for attr in resource.__dict__:\n resource.__dict__[attr] = cursor.get(attr)\n return resource\n\n def create(self, resource):\n try:\n resource_log = self.client[\"test_db\"][\"resource_log\"]\n print(type(resource.to_bson()))\n resource_log.insert_one(resource.to_bson())\n except Exception as e:\n print('ERROR!!: ' + str(e))\n\n def update(self, resource):\n resource_log = self.client[\"test_db\"][\"resource_log\"]\n query = {'resource_id': resource.resource_id, 'version': int(resource.version)}\n resource.version += 1\n update = resource.to_bson()\n response = resource_log.find_and_modify(query=query, update=update)\n return response is not None","sub_path":"Dao/resource_dao.py","file_name":"resource_dao.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"10066553","text":"#!/usr/bin/env python\n\n# In this simple RPG game, the hero fights the goblin. He has the options to:\n\n# 1. fight goblin\n# 2. do nothing - in which case the goblin will attack him anyway\n# 3. flee\n\n\n#used in number generators\nimport random\n#used to create an infinite number\nimport math\n\nclass Character(object):\n def __init__(self, health, power):\n self.health = health\n self.power = power\n\n def attack(self, enemy):\n enemy.health -= self.power\n #20% chance \n if numberTwenty == 2:\n enemy.health = enemy.health + enemy.health\n print(\"You cause {} damage to the goblin.\".format(enemy.health))\n \n if enemy.health <= 0:\n print(\"SUCCESS!! You killed that stupid goblin\")\n\n#makes sure classes are alive\n def alive(self):\n if self.health > 0:\n return True\n\n def printStatus(self):\n return self.health\n\n\nclass Hero(Character):\n def __init__(self, health, power):\n super(Hero, self).__init__(health, power)\n\n#can regen health but has slightly lower attack\nclass Medic(Character):\n def __init__(self, health, power):\n super(Medic, self).__init__(health, power)\n\n#20% chance to add two health\n def regen():\n if numberTwenty == 3:\n self.health = self.health + 2\n\n#pretty much a 1 shot character without block function\nclass Shadow(Character):\n def __init__(self, health, power):\n super(Shadow, self).__init__(health, power)\n\n#should only take damage one out of 10 times\n def block():\n if numberTen != 5:\n enemy.attack = 0\n\n\n\nclass Goblin(Character):\n def __init__(self, health, power):\n super(Goblin, self).__init__(health, power)\n\n#classes\nhero = Hero(10, 5)\nmedic = Medic(10, 5)\nshadow = Shadow(1, 3)\ngoblin = Goblin(6, 2)\n\n#random number generators\nnumberTwenty = random.randint(1, 5)\nnumberTen = random.randint(1, 10)\n\ninf = float(\"inf\")\n\nprint(inf)\ndef main():\n\n while goblin.alive() and hero.alive():\n print(f\"You currently have {hero.printStatus()} health and {hero.power} power.\")\n print(f\"The goblin has {goblin.printStatus()} health and {goblin.power} power.\")\n print()\n print(\"What would you like to do? >> \")\n print(\"1. Attempt to kill the goblin\")\n print(\"2. Get hit for no reason\")\n print(\"3. Run away, because you're scared\")\n print(\"> \", end=' ')\n rawInput = input()\n if rawInput == \"1\":\n # Hero attacks goblin\n hero.attack(goblin)\n # if number == 2:\n # hero.attack(goblin)\n elif rawInput == \"2\":\n print(\" \")\n print(\"YOU FOOL!!!\")\n goblin.attack(hero)\n pass\n elif rawInput == \"3\":\n print(\"You don't deserve this game anyway. Leave my presence!!\")\n break\n else:\n print(\"Invalid input {}\".format(rawInput))\n\n if goblin.health > 0:\n # Goblin attacks hero\n hero.health -= goblin.power\n print(\"The goblin does {} damage to you.\".format(goblin.power))\n if hero.health <= 0:\n print(\"YOU DIED!! What were you thinking?!\")\n\n\n\n\nmain()\n\n","sub_path":"RPGgame.py","file_name":"RPGgame.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"251586343","text":"class tree(object):\n\tdef __init__(self,key):\n\t\tself.key = key\n\t\tself.left = None\n\t\tself.right = None\n\t\n\n\tdef insertLeft(self,newNode):\n\t\tif self.left == None:\n\t\t self.left = newNode\n\t\telse:\n\t\t\tt = tree(newNode)\n\t\t\tt.left = self.left\n\t\t\tself.left = t\n\n\tdef insertRight(self,newNode):\n\t\tif self.right == None:\n\t\t\tself.right = newNode\n\t\telse:\n\t\t\tt = tree(newNode)\n\t\t\tt.right = self.right\n\t\t\tself.right = t\n\n\tdef getLeft(self):\n\t\treturn self.left\n\n\tdef getRight(self):\n\t\treturn self.right\n\n\tdef getRootVal(self):\n\t\treturn self.key\n\t\n\tdef setRootVal(self,key):\n\t\tself.key = key\n\n\ndef preOrderprint(tree):\n if tree:\n print(tree.getRootVal())\n preOrderprint(tree.getLeft())\n preOrderprint(tree.getRight())\n\ndef inOrder(tree,arr):\n if tree == None:\n return\n else:\n inOrder(tree.left,arr)\n arr.append(tree.key)\n inOrder(tree.right,arr)\n\n\n\n# pre order traverse to populate from array\n\ndef populateHeap(tree,arr,i):\n if not tree:\n return\n i[0] += 1\n\n tree.setRootVal(arr[i[0]])\n populateHeap(tree.left, arr, i)\n populateHeap(tree.right, arr, i)\n\ndef convertHeap(tree):\n\tarr = []\n\ti = [-1]\n\tinOrder(tree,arr)\n\tpopulateHeap(tree,arr,i)\n\n\n\nr = tree(5) #Remember here to assign tree objects, not just assining ints\nr.left = tree(3)\nr.right = tree(6)\nr.getLeft().insertLeft(tree(2))\nr.getLeft().insertRight(tree(4))\n\n\nconvertHeap(r)\npreOrderprint(r)","sub_path":"harder/bst_to_minheap.py","file_name":"bst_to_minheap.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"492571071","text":"import os\nimport shutil\nfrom tqdm import tqdm\ndate_path = \"data\"\ntrain_path = \"train\"\ntest_path = \"test\"\ntrainpercentage = 0.7 # 训练集合与测试百分比\n\n\ndef RName(str):\n l = 0\n for i in range(len(str)):\n if str[i] <= 'Z' and str[i] >= 'A':\n return str[i:]\n if str[i] <= 'z' and str[i] >= 'a':\n return str[i:]\n\n\ndef DirRename():\n print(\"修改文件夹\")\n imagelist_name = os.listdir(date_path)\n for name in tqdm(imagelist_name):\n if os.path.isdir(os.path.join(date_path, name)):\n os.rename(os.path.join(date_path, name),\n os.path.join(date_path, RName(name)))\n\n\ndef PictureRename():\n print(\"批量重命名\")\n imagelist_name = os.listdir(date_path)\n for name in tqdm(imagelist_name):\n dir_data = os.path.join(date_path, name)\n if os.path.isdir(dir_data) == True:\n image_list = os.listdir(dir_data)\n i = 0\n for image in image_list:\n if image.find('.png') != -1 or image.find('.jpg') != -1:\n lastname = os.path.join(dir_data, image)\n # print(lastname)\n filename = os.path.join(\n dir_data, \"image_\" + str(i).zfill(4) + \".jpg\")\n os.renames(lastname, filename)\n i += 1\n\n\ndef Make_tarin_test():\n print(\"训练和测试分离\")\n imagelist_name = os.listdir(date_path)\n for name in tqdm(imagelist_name):\n dir_data = os.path.join(date_path, name)\n if os.path.isdir(dir_data) == True:\n \n image_list = os.listdir(dir_data)\n tarinimage_list = os.path.join(train_path, name)\n if os.path.exists(tarinimage_list):\n shutil.rmtree(tarinimage_list)\n os.mkdir(tarinimage_list)\n\n testimage_list = os.path.join(test_path, name)\n if os.path.exists(testimage_list):\n shutil.rmtree(testimage_list)\n os.mkdir(testimage_list)\n train_num = int(len(image_list)*trainpercentage)\n\n for image in image_list[:train_num]:\n if image.find('.png') != -1 or image.find('.jpg') != -1:\n old_name = os.path.join(dir_data, image)\n new_name = os.path.join(\n os.path.join(train_path, name), image)\n shutil.copyfile(old_name, new_name)\n \n for image in image_list[train_num:]:\n if image.find('.png') != -1 or image.find('.jpg') != -1:\n old_name = os.path.join(dir_data, image)\n new_name = os.path.join(\n os.path.join(test_path, name), image)\n shutil.copyfile(old_name, new_name)\n \n\n\nif __name__ == '__main__':\n #PictureRename()\n Make_tarin_test()\n # DirRename()\n","sub_path":"DataHelper.py","file_name":"DataHelper.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"308942056","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n\nimport boto\nimport time\n\nfrom boto.dynamodb2.items import Item\nfrom boto.dynamodb2.table import Table\nfrom boto.s3.key import Key\nfrom datetime import datetime\nfrom quik import FileLoader\n\ndrinkers_table = Table('drinkers')\ns3 = boto.connect_s3()\n\ndef scrape_data_to_csv():\n all_drinkers = drinkers_table.scan()\n with open(\"booze.csv\", \"w\") as f:\n f.write(\"{0},{1},{2},{3}\".format(\"CODE\",\"NAME\",\"NUMBER_OF_DRINKS\",\"VOLUME_CONSUMED\"))\n for drinker in all_drinkers:\n if (drinker['drinker_id'] == None):\n drinker['drinker_id'] = \"UNKNOWN\"\n if (drinker['name'] == None):\n drinker['name'] = \"UNKNOWN\"\n if (drinker['volume_consumed'] == None):\n drinker['volume_consumed'] = 0\n if (drinker['number_of_drinks'] == None):\n drinker['number_of_drinks'] = 0\n f.write(\"{0},{1},{2},{3}\\n\".format(drinker['drinker_id'], drinker['name'], drinker['number_of_drinks'], drinker['volume_consumed']))\n\ndef scrape_data_to_html():\n timestamp = datetime.fromtimestamp(time.time()).strftime(\"%H:%M:%S on %A, %d %B, %Y\")\n all_drinkers = drinkers_table.scan()\n drinkers = []\n for drinker in all_drinkers:\n if (drinker['drinker_id'] == None):\n drinker['drinker_id'] = \"UNKNOWN\"\n if (drinker['name'] == None):\n drinker['name'] = \"UNKNOWN\"\n if (drinker['volume_consumed'] == None):\n drinker['volume_consumed'] = 0\n if (drinker['number_of_drinks'] == None):\n drinker['number_of_drinks'] = 0\n d = {}\n d['drinker_id'] = drinker['drinker_id']\n d['name'] = drinker['name']\n d['volume_consumed'] = drinker['volume_consumed']\n d['number_of_drinks'] = drinker['number_of_drinks']\n drinkers.append(d)\n loader = FileLoader('templates')\n template = loader.load_template('drinks.html.template')\n webpage = template.render(locals())\n bucket = s3.get_bucket('kegerator')\n key = Key(bucket)\n key.key = 'drinks.html'\n key.content_type = 'text/html'\n key.set_contents_from_string(webpage)\n key.make_public()\n\nif __name__ == \"__main__\":\n scrape_data_to_html()\n","sub_path":"data_scraper.py","file_name":"data_scraper.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"197006666","text":"'''A module to sort list'''\nfrom time import time\nfrom random import shuffle\n\n__all__ = ['insertionsort', 'selectionsort', 'bubblesort', 'shellsort', 'printlist']\n\nprintlist = False\n\ndef insertionsort(list):\n '''插入排序:它的工作原理是在已排序序列中从后向前扫描,找到相应位置并插入'''\n t1 = time()\n for x in range(1, len(list)):\n key = list[x]\n j = x - 1\n while j >= 0 and key < list[j]:\n list[j + 1] = list[j]\n j -= 1\n list[j + 1] = key\n t2 = time()\n print('Insertion sorted list:')\n if printlist:\n for i in range(len(list)): \n print(\"%d\" % list[i])\n print('耗时:%f' % (t2 - t1))\n\ndef selectionsort(list):\n '''选择排序:在未排序序列中找到最小(大)元素,存放到排序序列的起始位置,然后再从剩余未排序元素中继续寻找最小(大)元素,然后放到已排序序列的末尾。以此类推,直到所有元素均排序完毕'''\n t1 = time()\n for x in range(len(list)):\n min_idx = x\n for j in range(x + 1, len(list)): \n if list[min_idx] > list[j]: \n min_idx = j \n list[x], list[min_idx] = list[min_idx], list[x] \n t2 = time()\n print('Selection Sorted list:')\n if printlist:\n for i in range(len(list)): \n print(\"%d\" % list[i])\n print('耗时:%f' % (t2 - t1))\n\ndef bubblesort(list):\n '''冒泡排序:重复地走访过要排序的数列,一次比较两个元素,如果他们的顺序错误就把他们交换过来'''\n n = len(list)\n t1 = time()\n for x in range(n):\n for j in range(0, n - x - 1):\n if list[j] > list[j + 1]:\n list[j], list[j + 1] = list[j + 1], list[j]\n t2 = time()\n print('Bubble sorted list:')\n if printlist:\n for i in range(len(list)): \n print(\"%d\" % list[i])\n print('耗时:%f' % (t2 - t1))\n\ndef shellsort(list): \n '''希尔排序:先将整个待排序的序列分割成为若干子序列,分别进行直接插入排序,待整个序列中的记录\"基本有序\"时,再对全体记录进行依次直接插入排序'''\n n = len(list)\n gap = int(n / 2)\n t1 = time()\n while gap > 0: \n for i in range(gap, n): \n temp = list[i] \n j = i \n while j >= gap and list[j - gap] > temp: \n list[j] = list[j - gap] \n j -= gap \n list[j] = temp \n gap = int(gap / 2)\n t2 = time()\n print('Shell sorted list:')\n if printlist:\n for i in range(len(list)): \n print(\"%d\" % list[i])\n print('耗时:%f' % (t2 - t1))\n\ndef test():\n a = list(range(7500))\n shuffle(a)\n insertionsort(a)\n selectionsort(a)\n bubblesort(a)\n shellsort(a)\n\nif __name__ == '__main__':\n test()\n\n\n","sub_path":"module 文件/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"461739297","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\nimport json\nimport copy\nfrom types import SimpleNamespace as Namespace\nfrom feature.FeatureExtractor import FeatureExtractor\nfrom feature.SimpleFeatureExtractor import SimpleFeatureExtractor\n\nclass TimeSeriesFeatureExtractor (SimpleFeatureExtractor):\n\n\n def insertRollingFeatures(self, df, window, returnDataFrame = True):\n #[['timeStamp','x', 'y', 'z', 'Rolling_Mean_x','Rolling_Mean_y','Rolling_Mean_z','Rolling_Std_x','Rolling_Std_y','Rolling_Std_z', 'label']]\n\n #Calculate rolling mean and standard deviation using number of data set above\n df['Max_x'] = df['x'].rolling(window).max()\n df['Max_y'] = df['y'].rolling(window).max()\n df['Max_z'] = df['z'].rolling(window).max()\n df['Min_x'] = df['x'].rolling(window).min()\n df['Min_y'] = df['y'].rolling(window).min()\n df['Min_z'] = df['z'].rolling(window).min()\n df['Rolling_Mean_x'] = df['x'].rolling(window).mean()\n df['Rolling_Std_x'] = df['x'].rolling(window).std()\n df['Rolling_Mean_y'] = df['y'].rolling(window).mean()\n df['Rolling_Std_y'] = df['y'].rolling(window).std()\n df['Rolling_Mean_z'] = df['z'].rolling(window).mean()\n df['Rolling_Std_z'] = df['z'].rolling(window).std()\n df = df.dropna(subset=['Rolling_Mean_x','Rolling_Mean_y','Rolling_Mean_z'])\n if returnDataFrame:\n return df\n df = df[[['timeStamp','x', 'y', 'z', 'Rolling_Mean_x','Rolling_Mean_y','Rolling_Mean_z','Rolling_Std_x','Rolling_Std_y','Rolling_Std_z', 'label']]]\n return df.as_matrix();\n","sub_path":"script/feature/TimeSeriesFeatureExtractor.py","file_name":"TimeSeriesFeatureExtractor.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"366189451","text":"\"\"\"\nview.py\n\nThis module contains the classes used for the view portion of the MVC pattern. The View class is where the GUI is\ncontrolled from and started.\n\nauthor: Wiley Matthews\nauthor: http://newcoder.io/gui/part-3/ Specifically for much of the contents of the SudokuBoard class.\n\"\"\"\nfrom tkinter import *\nfrom typing import List\n\nfrom model import Model\nfrom controller import Controller\n\n# SudokoBoard frame specs.\nWIDTH = 500\nHEIGHT = 500\nMARGIN = 10\nSIDE = 52\n\n\nclass SudokuBoard(Frame):\n\n def __init__(self, parent: Tk, board: List[List[str]]) -> None:\n self.board = board\n self.parent = parent\n Frame.__init__(self, parent)\n\n self.row, self.col = 0, 0\n\n self.__initUI()\n\n def __initUI(self) -> None:\n \"\"\"\n Initialize the UI.\n :return: None\n \"\"\"\n self.parent.title(\"Sudoku\")\n self.pack(fill=BOTH, expand=1)\n self.canvas = Canvas(self,\n width=WIDTH,\n height=HEIGHT)\n self.canvas.pack(fill=BOTH, side=TOP)\n\n self.__draw_grid()\n self.__draw_puzzle()\n\n def __draw_grid(self) -> None:\n \"\"\"\n Draws grid divided with blue lines into 3x3 squares\n :return: None\n \"\"\"\n for i in range(10):\n color = \"blue\" if i % 3 == 0 else \"gray\"\n\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n x0 = MARGIN\n y0 = MARGIN + i * SIDE\n x1 = WIDTH - MARGIN\n y1 = MARGIN + i * SIDE\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n def __draw_puzzle(self) -> None:\n \"\"\"\n Draws the characters that consist the puzzle.\n :return: None\n \"\"\"\n self.canvas.delete(\"numbers\")\n for i in range(9):\n for j in range(9):\n answer = self.board[i][j]\n if answer != 0:\n x = MARGIN + j * SIDE + SIDE / 2\n y = MARGIN + i * SIDE + SIDE / 2\n original = self.board[i][j]\n color = \"black\" if answer == original else \"sea green\"\n self.canvas.create_text(\n x, y, text=answer, tags=\"numbers\", fill=color\n )\n\n def update_state(self) -> None:\n \"\"\"\n Redraws the board after a model state update.\n :return: None\n \"\"\"\n self.__draw_puzzle()\n\n\nclass View(object):\n\n def __init__(self, start_board: List[List[str]]) -> None:\n \"\"\"\n Creates root window and saves starting puzzle.\n :param start_board: initial state of puzzle\n \"\"\"\n self.root = Tk()\n self.board = start_board\n\n def start(self) -> None:\n \"\"\"\n Starts the UI and necesary MVC components, then starts solving.\n :return: None\n \"\"\"\n self.sb = SudokuBoard(self.root, self.board)\n self.sb.pack()\n model = Model(self.board)\n model.add_observer(self)\n controller = Controller(model, 0.05)\n controller.start_solving()\n self.root.mainloop()\n exit()\n\n def update_state(self) -> None:\n \"\"\"\n Model uses this to inform view the puzzle state has been changed, and that the puzzle needs to be redrawn.\n :return: None\n \"\"\"\n self.sb.update_state()\n self.sb.pack()\n\n\ndef main() -> None:\n \"\"\"\n Test program for when this module is run directly.\n :return: None\n \"\"\"\n b = [\n [\"5\", \"3\", \".\", \".\", \"7\", \".\", \".\", \".\", \".\"],\n [\"6\", \".\", \".\", \"1\", \"9\", \"5\", \".\", \".\", \".\"],\n [\".\", \"9\", \"8\", \".\", \".\", \".\", \".\", \"6\", \".\"],\n [\"8\", \".\", \".\", \".\", \"6\", \".\", \".\", \".\", \"3\"],\n [\"4\", \".\", \".\", \"8\", \".\", \"3\", \".\", \".\", \"1\"],\n [\"7\", \".\", \".\", \".\", \"2\", \".\", \".\", \".\", \"6\"],\n [\".\", \"6\", \".\", \".\", \".\", \".\", \"2\", \"8\", \".\"],\n [\".\", \".\", \".\", \"4\", \"1\", \"9\", \".\", \".\", \"5\"],\n [\".\", \".\", \".\", \".\", \"8\", \".\", \".\", \"7\", \"9\"]\n ]\n view = View(b)\n view.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sudoku/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"251496244","text":"# -*- coding: utf-8 -*-\nimport os.path\nimport sys\nimport string\nfrom pybtex.database.input import bibtex\nfrom citation_vim.item import Item\nfrom citation_vim.utils import check_path, raiseError\n\nclass bibtexParser(object):\n\n def __init__(self, context):\n self.context = context\n self.bibtex_file = check_path(self.context.bibtex_file)\n\n def load(self):\n \"\"\"\n Returns: A bibtex file as an array of standardised Items.\n \"\"\"\n items = []\n bib_data = self._read_file(self.bibtex_file)\n\n for key in bib_data.entries:\n bib_entry = bib_data.entries[key]\n authors = self.parse_authors(bib_entry)\n\n item = Item()\n item.collections = []\n item.type = bib_entry.type\n item.abstract = self.get_field(bib_entry, \"abstract\")\n item.date = self.get_field(bib_entry, \"year\")\n item.doi = self.get_field(bib_entry, \"doi\")\n item.isbn = self.get_field(bib_entry, \"isbn\")\n item.publication = self.get_field(bib_entry, \"journal\")\n item.language = self.get_field(bib_entry, \"language\")\n item.issue = self.get_field(bib_entry, \"number\")\n item.notes = self.get_field(bib_entry, \"annote\")\n item.pages = self.get_field(bib_entry, \"pages\")\n item.publisher = self.get_field(bib_entry, \"publisher\")\n item.tags = self.get_field(bib_entry, \"keyword\")\n item.title = self.get_field(bib_entry, \"title\")\n item.volume = self.get_field(bib_entry, \"volume\")\n item.url = self.format_url(bib_entry)\n item.file = self.format_file(bib_entry)\n item.author = self.format_author(authors)\n item.key = self.format_key(authors, bib_entry, key)\n item.combine()\n items.append(item)\n return items\n\n def _read_file(self, filename):\n \"\"\"\n Returns: A bibtex file from the pybtex parser\n \"\"\"\n try:\n parser = bibtex.Parser()\n output = parser.parse_file(filename)\n except:\n raiseError(u\"Failed to read {}\".format(self.bibtex_file))\n return output\n\n def strip_braces(self, string):\n \"\"\"\n Returns: string stripped of {} braces.\n \"\"\"\n return string.replace(\"{\",\"\").replace(\"}\",\"\")\n\n def get_field(self, bib_entry, field):\n \"\"\"\n Returns cleaned field value for any bibtex field. \n \"\"\"\n output = bib_entry.fields[field] if field in bib_entry.fields else \"\"\n return self.strip_braces(output)\n\n def parse_authors(self, bib_entry):\n \"\"\"\n Returns: Array of authors\n \"\"\"\n try:\n persons = bib_entry.persons[u\"author\"]\n if sys.version_info[0] == 2:\n authors = [unicode(au).split(\",\") for au in persons]\n elif sys.version_info[0] == 3:\n authors = [str(au).split(\",\") for au in persons]\n except KeyError:\n authors = []\n return authors\n\n def format_first_author(self, authors):\n \"\"\"\n Returns: The first authors surname, if one exists.\n \"\"\"\n if authors == []: \n return \"\"\n return self.strip_braces(authors[0][0]).replace(' ', '_') \n\n def format_title_word(self, bib_entry):\n return self.get_field(bib_entry, \"title\").partition(' ')[0]\n\n def format_author(self, authors):\n \"\"\"\n Returns: Authors - format depending on et_al_limit.\n \"\"\"\n if authors == []: \n return \"\"\n if len(authors) > self.context.et_al_limit:\n return u\"%s et al.\" % authors[0][0]\n if len(authors) > 2:\n auth_string = u\"\"\n for author in authors[:-1]:\n auth_string += author[0] + ', '\n return auth_string + u\"& \" + authors[-1][0]\n if len(authors) == 2:\n return authors[0][0] + u\" & \" + authors[1][0]\n return ', '.join(authors[0])\n\n def format_file(self, bib_entry):\n \"\"\"\n Returns: Attachment file path\n \"\"\"\n attachment = \"\"\n if u\"file\" in bib_entry.fields:\n for file in bib_entry.fields[u\"file\"].split(\";\"):\n details = file.split(\":\")\n if 2 < len(details) and details[2] == \"application/pdf\":\n attachment = details[1]\n break\n return attachment\n\n def format_url(self, bib_entry):\n \"\"\"\n Returns: Url string\n \"\"\"\n url = \"\"\n if u\"file\" in bib_entry.fields:\n for file in bib_entry.fields[u\"file\"].split(\";\"):\n details = file.split(\":\")\n if 2 < len(details) and details[2] != \"application/pdf\":\n url = details[1]\n break\n return url\n\n def format_tags(bib_entry):\n \"\"\"\n Returns: Tags/keywords string\n \"\"\"\n tags = \"\"\n if u\"keywords\" in bib_entry.fields:\n tags = \", \".join(bib_entry.fields[u\"keywords\"])\n return tags\n\n def format_key(self, authors, bib_entry, key):\n \"\"\"\n Returns:\n A key manual format or default bibtex key.\n \"\"\"\n if self.context.key_format == \"\":\n return key\n\n author = self.format_first_author(authors)\n title = self.format_title_word(bib_entry)\n date = self.get_field(bib_entry, \"year\")\n replacements = {\n u\"title\": title.lower(),\n u\"Title\": title.capitalize(), \n u\"author\": author.lower(), \n u\"Author\": author.capitalize(),\n u\"date\": date\n }\n key_format = u\"%s\" % self.context.key_format\n return key_format.format(**replacements)\n\n","sub_path":"python/citation_vim/bibtex/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"303302204","text":"import requests\nimport time\nfrom .models import Webtoon\n\n\ndef get_artists(artists):\n artist = ''\n for i in artists:\n artist += i.get('name') + '/'\n\n return artist[:-1]\n\n\ndef webtoon(day):\n json = requests.get('http://webtoon.daum.net/data/pc/webtoon/list_serialized/' + day + '?timeStamp=' + str(int(time.time()))).json()\n\n for webtoon in json.get('data'):\n webtoon_model = Webtoon()\n webtoon_model.webtoon_id = \"daum_\" + webtoon.get('title')\n webtoon_model.site_name = \"daum\"\n webtoon_model.webtoon_name = webtoon.get('title')\n webtoon_model.webtoon_author = get_artists(webtoon.get('cartoon').get('artists'))\n webtoon_model.webtoon_img_url = webtoon.get('thumbnailImage2').get('url')\n\n webtoon_model.save()\n\n\ndef webtoon_all():\n week_day = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']\n for day in week_day:\n webtoon(day)\n\n\n","sub_path":"webtoon/webtoon/daum_webtoon.py","file_name":"daum_webtoon.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"529673190","text":"import heapq\nimport math\nn, m = map(int, input().split())\na = list(map(int, input().split()))\na = list(map(lambda x: x*(-1), a))\nticket_num = m\nheapq.heapify(a)\n\nif n > 1:\n for i in range(m):\n MAX = heapq.heappop(a)\n MAX = math.ceil(MAX / 2)\n heapq.heappush(a,MAX)\n \nelse:\n MAX = heapq.heappop(a)\n MAX = math.ceil(MAX / (2**m))\n heapq.heappush(a,MAX)\n\nprint(sum(a)*(-1))","sub_path":"ABC141/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"46564054","text":"\nimport AntsyAngler.Utilities.shrink as crts\nimport AntsyAngler.Stocks.folder as crsf\nimport AntsyAngler.Utilities.pkgstdy as crup\nimport AntsyAngler.Utilities.frames as cruf\nimport AntsyAngler.Utilities.filters as crtf\nfrom multiprocessing import Pool\nimport numpy as np\n\n# d_bt_bs = crsf.cr_bt_bs\n# d_bt_fr = crsf.cr_bt_fr\n\nd_comb = crsf.cr_comb\nd_bt_pp30 = crsf.cr_bt_pp30\nd_bt_pp60 = crsf.cr_bt_pp60\nd_bt_pp120 = crsf.cr_bt_pp120\nd_bt_pp240 = crsf.cr_bt_pp240\n\nd_bt_bs30 = crsf.cr_bt_bs30\nd_bt_bs60 = crsf.cr_bt_bs60\nd_bt_bs120 = crsf.cr_bt_bs120\nd_bt_bs240 = crsf.cr_bt_bs240\n\nd_bt_fr30 = crsf.cr_bt_fr30\nd_bt_fr60 = crsf.cr_bt_fr60\nd_bt_fr120 = crsf.cr_bt_fr120\nd_bt_fr240 = crsf.cr_bt_fr240\n\nvt = 0.01\ncap = 30000\n\nnp.set_printoptions(precision=4, linewidth=800, suppress=True)\n\nsigp = 0.5*(np.array(list(range(10, 510, 10)))+1)**(1/3)\n\n_d_sig_data1 = []\n_d_sig_data2 = []\nfor x1 in range(10, 510, 10):\n _d_sig_data1.append('LrB_'+str(x1)+'D')\n _d_sig_data2.append('QrB_'+str(x1)+'D')\n_d_sig_data = _d_sig_data1 + _d_sig_data2\n\n\ndef _get_sig_multiplier(x, fld):\n # x = 'LrB_10D'\n xx = np.empty(0)\n for i in range(0, 8):\n # i = 0\n y = fld[i].retrieve(x)\n y = y[y.tick_cols()].values\n y = y.flatten()\n y = y[~np.isnan(y)]\n xx = np.concatenate((xx, y))\n xx = np.abs(xx/vt)\n xx = 1.4826*np.median(xx)\n yy = int(x.split('_')[1][:-1])\n return yy, xx\n\n\ndef get_sig_multiplier():\n for fld in [d_bt_pp30, d_bt_pp60, d_bt_pp120, d_bt_pp240]:\n x0 = 'LrB_'\n for i in range(10, 510, 10):\n x = x0+str(i)+'D'\n yy, xx = _get_sig_multiplier(x, fld)\n print(yy, xx)\n x0 = 'QrB_'\n for i in range(10, 510, 10):\n x = x0+str(i)+'D'\n yy, xx = _get_sig_multiplier(x, fld)\n print(yy, xx)\n\n\ndef get_empirical_std_lr(x):\n x /= 100\n x = -0.37402*(x**4)+5.29291*(x**3)-25.93145*(x**2)+122.98748*x+4.21953\n x = 1/np.sqrt(x)\n return x\n\n\ndef get_empirical_std_qr(x):\n x /= 100\n x = 0.01100*(x**4)-0.10998*(x**3)-0.96048*(x**2)+22.41142*x+0.16797\n x = 1/np.sqrt(x)\n return x\n\n_empirical_std_lr = 1/np.array([get_empirical_std_lr(ii) for ii in range(10, 510, 10)])\n_empirical_std_qr = 1/np.array([get_empirical_std_qr(ii) for ii in range(10, 510, 10)])\n\n\ndef np_dstd(x, *args, **kwargs):\n return np.std(np.minimum(x, 0), *args, **kwargs)\n\n\ndef _refresh_fast_backtest(x):\n _d_bt_bs, _d_bt_fr, _d_comb, _d_flag = x\n _d_bt_bs_dt = _d_bt_bs.listdir()\n _d_bt_bs_dt = _d_bt_bs_dt[:-2]\n\n ndt = len(_d_bt_bs_dt)\n nsig = 100\n\n ri1_1 = np.zeros([ndt, nsig])\n # hi1_1 = np.zeros([ndt, nsig])\n\n for j, i in enumerate(_d_bt_bs_dt):\n # j = 0\n # i = _d_bt_bs_dt[0]\n xi_all = _d_bt_bs.load(i)\n xi = np.ascontiguousarray(xi_all['arr_0']).astype('float64')\n si1_ = np.vstack(xi_all['arr_1'])/vt\n si2_ = np.vstack(xi_all['arr_2'])/vt\n ci = xi_all['arr_4'].astype('float64')\n tci = xi_all['arr_4']\n del xi_all\n\n # curve-fitted (2-std)\n si1 = np.multiply(si1_, _empirical_std_lr[:, np.newaxis])\n si2 = np.multiply(si2_, _empirical_std_qr[:, np.newaxis])\n si = np.vstack((si1, si2))\n si = si*np.exp(0.5-0.5*(si**2)/4)/2\n\n n = xi.shape[1]\n ei = np.ones(n)\n\n yi = _d_bt_fr.load(i)\n yi[np.isnan(yi)] = 0\n\n # get the risk budgets\n bi = xi[19, :]\n bi = bi/np.sum(bi)\n lmb0 = np.median(bi/np.dot(ci, ei))\n\n flev = xi[0, :]\n\n si1_1 = lmb0*np.dot(si, crts.neut_corr_mat(ci, xi[2:14, :]))*10\n\n ri1_1[j, :] = np.dot(si1_1, yi)\n # hi1_1[j, :] = np.dot(abs(si1_1), flev)\n\n # combining them by mean-variance\n ri1_1_mn = np.mean(ri1_1, axis=0)*256\n ri1_1_sd = np.std(ri1_1, axis=0)*16\n ri1_1_corr = np.corrcoef(ri1_1.T)+np.identity(nsig)\n ri1_1_cov = np.dot(np.diag(ri1_1_sd), np.dot(ri1_1_corr, np.diag(ri1_1_sd)))\n ri1_1_cov = np.dot(np.linalg.inv(ri1_1_cov), ri1_1_mn)\n wgt1_1_ = ri1_1_cov/np.sum(ri1_1_cov)\n _d_comb.save('Signal_Weights', wgt1_1_)\n # check results\n ri1_1_g = np.dot(ri1_1, wgt1_1_)\n print('Results for Cluster # % i' % _d_flag)\n print(16*np.mean(ri1_1, axis=0)/np.std(ri1_1, axis=0))\n print(wgt1_1_)\n print(16*np.mean(ri1_1_g)/np.std(ri1_1_g))\n\n\ndef refresh_fast_backtest():\n for i in range(0, 8):\n _refresh_fast_backtest((d_bt_bs240[i], d_bt_fr240[i], d_comb[i], i))\n\n\nif __name__ == '__main__':\n refresh_fast_backtest()\n\n\n","sub_path":"AntsyAngler/Stocks/backtest/backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"201410245","text":"\nimport makiflow\nfrom makiflow.generators.gen_base import SegmentIterator, PathGenerator\nfrom glob import glob\nimport os\nimport numpy as np\nfrom sklearn.utils import shuffle\n\n\nclass Generator(PathGenerator):\n def __init__(self, path_images, path_masks):\n self.images = glob(os.path.join(path_images, '*.bmp'))\n self.masks = glob(os.path.join(path_masks, '*.bmp'))\n \n def next_element(self):\n index = 0\n while True:\n if index % len(self.images) == 0:\n self.images, self.masks = shuffle(self.images, self.masks)\n index = 0\n #index = np.random.randint(low=0, high=len(self.images))\n el = {\n SegmentIterator.image: self.images[index],\n SegmentIterator.mask: self.masks[index]\n }\n index += 1\n \n yield el\n\n\nfrom makiflow.models.segmentation.map_methods import LoadDataMethod, ComputePositivesPostMethod\nfrom makiflow.models.segmentation.map_methods import NormalizePostMethod, RGB2BGRPostMethod, SqueezeMaskPostMethod\n\nfrom makiflow.models.segmentation.gen_layers import InputGenLayer\n\ndef get_generator(path_images, path_masks):\n map_method = LoadDataMethod(image_shape=[1024, 1024, 3], mask_shape=[1024, 1024, 3])\n map_method = SqueezeMaskPostMethod()(map_method)\n map_method = RGB2BGRPostMethod()(map_method)\n map_method = NormalizePostMethod(use_float64=True)(map_method)\n map_method = ComputePositivesPostMethod()(map_method)\n return InputGenLayer(\n prefetch_size=6,\n batch_size=6, \n path_generator=Generator(path_images, path_masks),\n name='Input',\n map_operation=map_method,\n num_parallel_calls=5\n )\n","sub_path":"Danil_tests/segmentation/Train_separate_NN_test/exp_4/only_6_class/generator_provider.py","file_name":"generator_provider.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"255579703","text":"# Copyright (c) 2015\n#\n# All rights reserved.\n#\n# This file is distributed under the Clear BSD license.\n# The full text can be found in LICENSE in the root directory.\n\nimport ipaddress\nimport logging\nimport os\nimport re\nimport time\nfrom enum import Enum\nfrom functools import cached_property\nfrom typing import List, Optional\n\nimport pexpect\nfrom debtcollector import moves\nfrom termcolor import colored\n\nfrom boardfarm.devices.platform import debian\nfrom boardfarm.exceptions import CodeError\nfrom boardfarm.lib.dhcpoption import configure_option\nfrom boardfarm.lib.dns import DNS\nfrom boardfarm.lib.installers import install_tshark\nfrom boardfarm.lib.linux_nw_utility import Ping\nfrom boardfarm.lib.network_testing import kill_process, tcpdump_capture, tshark_read\n\n\nclass MulticastGroupRecordType(Enum):\n \"\"\"IGMPv3 Record Types.\"\"\"\n\n MODE_IS_INCLUDE = 1\n MODE_IS_EXCLUDE = 2\n CHANGE_TO_INCLUDE_MODE = 3\n CHANGE_TO_EXCLUDE_MODE = 4\n ALLOW_NEW_SOURCES = 5\n BLOCK_OLD_SOURCES = 6\n\n\n# Add Type Hint support for IGMP Records\nMCAST_SOURCE = str\nMCAST_GROUP = str\nMCAST_GROUP_RECORD = List[\n tuple[List[MCAST_SOURCE], MCAST_GROUP, MulticastGroupRecordType]\n]\n\n\nlogger = logging.getLogger(\"bft\")\n\n\nclass DebianLAN(debian.DebianBox):\n model = \"debian_lan\"\n name = \"lan\"\n install_pkgs_after_dhcp = False\n wan_no_eth0 = False\n wan_dhcp = False\n is_bridged = False\n static_route = None\n mgmt_dns = \"8.8.8.8\"\n shared_tftp_server = False\n\n def __init__(self, *args, **kwargs):\n self.parse_device_options(*args, **kwargs)\n\n # introducing a hack till json schema does not get updated\n if not self.dev_array:\n self.legacy_add = True\n self.dev_array = \"lan_clients\"\n\n self.lan_network = ipaddress.IPv4Interface(\n str(kwargs.pop(\"lan_network\", \"192.168.1.0/24\"))\n ).network\n self.lan_gateway = ipaddress.IPv4Interface(\n str(kwargs.pop(\"lan_gateway\", \"192.168.1.1/24\"))\n ).ip\n self.dns = DNS(self, {}, {})\n self.nw_util_ping = Ping(self)\n self.ipv4_client_started = False\n\n def get_lan_gateway(self):\n self.sendline(\"ip route list 0/0 | awk '{print $3}'\")\n self.expect_exact(\"ip route list 0/0 | awk '{print $3}'\")\n self.expect(self.prompt)\n try:\n return ipaddress.IPv4Address(str(self.before.strip()))\n except ipaddress.AddressValueError:\n logger.warning(\n \"Unable to resolve lan client gateway IP. \"\n \"Did you run boot before tests? \"\n \"Using default Ziggo address now. (192.168.178.1)\"\n )\n return ipaddress.IPv4Address(\"192.168.178.1\")\n\n def setup(self, config=None):\n self.check_dut_iface()\n # potential cleanup so this wan device works\n self.sendline(\"killall iperf ab hping3 iperf3\")\n self.expect(self.prompt)\n self.sendline(\"\\niptables -t nat -X\")\n self.expect(\"iptables -t\")\n self.expect(self.prompt)\n self.sendline(\"sysctl net.ipv4.ip_forward=1\")\n self.expect(self.prompt)\n self.sendline(\"iptables -t nat -F; iptables -t nat -X\")\n self.expect(self.prompt)\n self.sendline(\"iptables -F; iptables -X\")\n self.expect(self.prompt)\n self.sendline(\n \"iptables -t nat -A PREROUTING -p tcp --dport 222 -j DNAT --to-destination %s:22\"\n % self.lan_gateway\n )\n self.expect(self.prompt)\n self.sendline(\n \"iptables -t nat -A POSTROUTING -o %s -p tcp --dport 22 -j MASQUERADE\"\n % self.iface_dut\n )\n self.expect(self.prompt)\n self.sendline(\"echo 0 > /proc/sys/net/ipv4/tcp_timestamps\")\n self.expect(self.prompt)\n self.sendline(\"echo 0 > /proc/sys/net/ipv4/tcp_sack\")\n self.expect(self.prompt)\n self.sendline(\"echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts\")\n self.expect(self.prompt)\n self.sendline(f\"pkill --signal 9 -f dhclient.*{self.iface_dut}\")\n self.expect(self.prompt)\n\n def prepare_interface(self):\n # bring ip link down and up\n self.sendline(\n f\"ip link set down {self.iface_dut} && ip link set up {self.iface_dut}\"\n )\n self.expect(self.prompt)\n\n def __kill_dhclient(self, ipv4=True):\n dhclient_str = f\"dhclient {'-4' if ipv4 else '-6'}\"\n\n if ipv4:\n self.release_dhcp(self.iface_dut)\n else:\n self.release_ipv6(self.iface_dut)\n\n self.sendline(\"ps aux\")\n if self.expect([dhclient_str] + self.prompt) == 0:\n logger.warning(\n \"WARN: dhclient still running, something started rogue client!\"\n )\n self.sendline(f\"pkill --signal 9 -f {dhclient_str}.*{self.iface_dut}\")\n self.expect(self.prompt)\n\n self.sendline(f\"kill $( ~/.ssh/config << EOF\")\n self.sendline(f\"Host {self.lan_gateway}\")\n self.sendline(\"StrictHostKeyChecking no\")\n self.sendline(\"UserKnownHostsFile=/dev/null\")\n self.sendline(\"\")\n self.sendline(\"Host krouter\")\n self.sendline(f\"Hostname {self.lan_gateway}\")\n self.sendline(\"StrictHostKeyChecking no\")\n self.sendline(\"UserKnownHostsFile=/dev/null\")\n self.sendline(\"EOF\")\n self.expect(self.prompt)\n\n def __passwordless_setting(self):\n \"\"\"Copy an id to the router so people don't have to type a password to ssh or scp\"\"\"\n self.sendline(f\"nc {self.lan_gateway} 22 -w 1 | cut -c1-3\")\n self.expect_exact(f\"nc {self.lan_gateway} 22 -w 1 | cut -c1-3\")\n if 0 == self.expect([\"SSH\"] + self.prompt, timeout=5) and not self.is_bridged:\n self.sendcontrol(\"c\")\n self.expect(self.prompt)\n self.sendline(\n '[ -e /root/.ssh/id_rsa ] || ssh-keygen -N \"\" -f /root/.ssh/id_rsa'\n )\n if 0 != self.expect([\"Protocol mismatch.\"] + self.prompt):\n self.sendline(\n \"\\nscp ~/.ssh/id_rsa.pub %s:/etc/dropbear/authorized_keys\"\n % self.lan_gateway\n )\n self.expect(\"_keys\")\n if 0 == self.expect([\"assword:\"] + self.prompt):\n self.sendline(\"password\")\n self.expect(self.prompt)\n else:\n self.sendcontrol(\"c\")\n self.expect(self.prompt)\n\n def configure_dhclient(self, dhcpopt):\n \"\"\"configure dhclient options in lan dhclient.conf\n\n param dhcpopt: contains list of dhcp options to configure enable or disable\n type dhcpopt: list)\n \"\"\"\n for opt, enable in dhcpopt:\n configure_option(opt, (self, enable))\n\n def check_dut_iface(self):\n output = super().check_dut_iface()\n if \"NO-CARRIER\" in output:\n msg = colored(\n f\"{self.name}: {self.iface_dut} CARRIER DOWN\\n{output}\",\n color=\"red\",\n attrs=[\"bold\"],\n )\n logger.error(msg)\n return output\n\n def send_igmpv3_report(\n self, mcast_group_record: MCAST_GROUP_RECORD, count: int\n ) -> None:\n \"\"\"Send an IGMPv3 report with desired multicast record.\n\n Multicast source and group must be IPv4 addresses.\n Multicast sources need to be non-multicast addresses and\n group address needs to be a multicast address.\n\n Implementation relies on a custom send_igmp_report\n script based on scapy.\n\n :param mcast_group_record: IGMPv3 multicast group record\n :type mcast_group_record: MCAST_GROUP_RECORD\n :param count: num of packets to send in 1s interval\n :type count: int\n :raises CodeError: if send_igmp_report command fails\n \"\"\"\n command = f\"send_igmp_report -i {self.iface_dut} -c {count}\"\n out = self._send_multicast_report(command, mcast_group_record)\n if f\"Sent {count} packets\" not in out:\n raise CodeError(f\"Failed to execute send_mld_report command:\\n{out}\")\n\n def send_mldv2_report(self, mcast_group_record: MCAST_GROUP_RECORD, count: int):\n \"\"\"Send an MLDv2 report with desired multicast record.\n\n Multicast source and group must be IPv6 addresses.\n Multicast sources need to be non-multicast addresses and\n group address needs to be a multicast address.\n\n Implementation relies on a custom send_mld_report\n script based on scapy.\n\n :param mcast_group_record: MLDv2 multicast group record\n :type mcast_group_record: MCAST_GROUP_RECORD\n :param count: num of packets to send in 1s interval\n :type count: int\n :raises CodeError: if send_mld_report command fails\n \"\"\"\n command = f\"send_mld_report -i {self.iface_dut} -c {count}\"\n out = self._send_multicast_report(command, mcast_group_record)\n if f\"Sent {count} packets\" not in out:\n raise CodeError(f\"Failed to execute send_mld_report command:\\n{out}\")\n\n def _send_multicast_report(\n self, command: str, mcast_group_record: MCAST_GROUP_RECORD\n ) -> str:\n args = \"\"\n for sources, group, rtype in mcast_group_record:\n src = \",\".join(sources)\n args += f'-mr \"{src};{group};{rtype.value} \"'\n\n out = self.check_output(f\"{command} {args}\")\n if \"Traceback\" in out:\n raise CodeError(f\"Failed to send the report!!\\n{self.before}\")\n return out\n\n @cached_property\n def ip_addr(self) -> str:\n \"\"\"Return the IPv4 address on IFACE facing DUT.\n\n :return: IPv4 address in string format.\n :rtype: str\n \"\"\"\n return self.get_interface_ipaddr(self.iface_dut)\n\n @cached_property\n def ip6_addr(self) -> str:\n \"\"\"Return the IPv6 address on IFACE facing DUT.\n\n :return: IPv6 address in string format.\n :rtype: str\n \"\"\"\n return self.get_interface_ip6addr(self.iface_dut)\n\n @cached_property\n def gw_mac_addr(self) -> str:\n \"\"\"Return the L2 address of DUT gateway from ARP table.\n\n :return: MAC address in string format.\n :rtype: str\n \"\"\"\n # must only be called post boot.\n route = self.check_output(\"ip route show default\")\n gw_ip = re.findall(r\"default via (.*) dev\", route)[0]\n out = self.check_output(f\"arp -i {self.iface_dut} -a\")\n return re.findall(rf\"\\({gw_ip}\\) at\\s(.*)\\s\\[\", out)[0]\n\n @cached_property\n def mac_addr(self) -> str:\n \"\"\"Return the L2 address of IFACE facing DUT.\n\n :return: MAC address in string format.\n :rtype: str\n \"\"\"\n return self.get_interface_macaddr(self.iface_dut)\n\n def clear_cache(self):\n \"\"\"To clear all the cached properties.\"\"\"\n self.__dict__.pop(\"ip_addr\", None)\n self.__dict__.pop(\"ip6_addr\", None)\n self.__dict__.pop(\"gw_mac_addr\", None)\n self.__dict__.pop(\"mac_addr\", None)\n\n\nif __name__ == \"__main__\":\n # Example use\n import sys\n\n try:\n ipaddr, port = sys.argv[1].split(\":\") # noqa : F821\n except Exception:\n raise Exception(\"First argument should be in form of ipaddr:port\")\n dev = DebianLAN(\n ipaddr=ipaddr, color=\"blue\", username=\"root\", password=\"bigfoot1\", port=port\n )\n dev.sendline(\"echo Hello\")\n dev.expect(\"Hello\", timeout=4)\n dev.expect(dev.prompt)\n\n dev.configure()\n if sys.argv[2] == \"test_voip\": # noqa : F821\n sys.path.insert(0, os.getcwd()) # noqa : F821\n sys.path.insert(0, os.getcwd() + \"/tests\") # noqa : F821\n from boardfarm.lib import installers\n\n installers.install_asterisk(dev)\n","sub_path":"boardfarm/devices/debian_lan.py","file_name":"debian_lan.py","file_ext":"py","file_size_in_byte":19671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"342063989","text":"# Reverse a singly-linked list in-place\n\ndef reverse(L):\n # Here we establish two pointers.\n # One is at the head and the other prev follows one node behind it\n prev = None\n current = L.head\n\n # Next we traverse the list starting at the head\n while current is not None:\n # Identify the node that's ahead of current\n next_node = current_next\n # Point that that node behind the current\n current_next = prev\n # Move the two pointers forward\n prev = current\n current = next_node\n # Return that last node\n return prev\n\n # Runtime is going to be O(n) due to the fact we're iterating through the entire list.\n # Space is going to be O(1) since this is being done in-place\n","sub_path":"Linked-Lists/reverseList.py","file_name":"reverseList.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"517126438","text":"# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.\n\"\"\"\nmisc server components tests\n\"\"\"\n\nfrom flask_wtf import FlaskForm\n\nfrom sner.server.forms import TextAreaListField\nfrom tests.server import DummyPostData\n\n\ndef test_textarealistfield(app): # pylint: disable=unused-argument\n \"\"\"tests TextAreaListField form field\"\"\"\n\n class Xform(FlaskForm):\n \"\"\"form test instance\"\"\"\n a = TextAreaListField()\n\n form = Xform(DummyPostData({'a': 'a\\nb'}))\n assert isinstance(form.a.data, list)\n assert len(form.a.data) == 2\n\n form = Xform(DummyPostData())\n assert isinstance(form.a.data, list)\n assert not form.a.data\n","sub_path":"tests/server/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"76"} +{"seq_id":"156124646","text":"import os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.use('Agg')\n\nimport constants_cmap\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nfrom datasets import datasets\n\nimport matplotlib.cm as cm\nimport matplotlib.colors as ml_colors\nfrom matplotlib.lines import Line2D\nimport torch\nfrom torch import tensor\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndef knn(X_train,y_train, X_test, y_test):\n neigh = KNeighborsClassifier(n_neighbors=10)\n neigh.fit(X_train, y_train)\n score=neigh.score(X_test, y_test)\n print(score)\n print(neigh.score(X_train, y_train))\n\n return score\n\ndef main():\n print(\"start script...\")\n dataset_names=constants_cmap.ALL_DATASET_NAMES\n dataset= datasets.Dataset(dataset_names=dataset_names, data_type=constants_cmap.DATA_TYPE)\n dataloader_ctor= datasets.DataLoader(dataset, 0.2, 0.2)\n trainloader = dataloader_ctor.train_loader()\n testloader = dataloader_ctor.test_loader()\n # dataset_names=dataset_names_tcga+dataset_names_icgc\n\n data_train=tensor([])\n labels_train=tensor([]).long()\n for batch_idx, (data, label) in enumerate(trainloader):\n data_train=torch.cat((data_train, data), 0)\n labels_train=torch.cat((labels_train, label), 0)\n\n n_tcga_unique_labels=len(dataset_names)\n\n data_test=tensor([])\n labels_test=tensor([]).long()\n for batch_idx, (data, label) in enumerate(testloader):\n data_test=torch.cat((data_test, data), 0)\n labels_test=torch.cat((labels_test, label), 0)\n\n data_train=data_train.cpu().numpy()\n labels_train=labels_train.cpu().numpy()\n data_test=data_test.cpu().numpy()\n labels_test=labels_test.cpu().numpy()\n\n n_components=2\n print(\"start pca...\")\n pca = PCA(n_components=n_components).fit(data_train)\n X_train=pca.transform(data_train)\n X_test=pca.transform(data_test)\n print(\"start tsne...\")\n y_train=labels_train\n y_test=labels_test # [constants.ICGC_PSEUDO_LABELS[a-n_tcga_unique_labels] for a in labels_test]\n knn(X_train,y_train, X_test,y_test) # X_test, y_test)\n fig = plt.figure(1, figsize=(20, 20))\n ax = fig.add_subplot(111)\n\n X=np.vstack([X_train,X_test])\n xs=X[:, 0]\n ys=X[:, 1]\n labels=np.hstack([labels_train,labels_test])\n ax.scatter(xs, ys, c=labels)\n colormap = cm.jet\n plt.scatter(xs,ys, c=[a for a in labels], cmap=colormap) # sns.color_palette(\"Paired\", n_colors=len(constants.DATASETS_INCLUDED))[a]\n\n\n label_unique = np.arange(len(np.unique(labels)))\n colorlist_unique = [ml_colors.rgb2hex(colormap(a)) for a in\n label_unique / float(max(labels))]\n patches = [Line2D([0], [0], marker='o', color='gray', label=dataset_names[a],\n markerfacecolor=c) for a, c in zip(label_unique, colorlist_unique)]\n\n for a in label_unique:\n plt.scatter([np.median([xs[i] for i, b in enumerate(labels) if a==b])],[np.median([ys[i] for i, b in enumerate(labels) if a==b])], s=2000, c=colorlist_unique[a], cmap=colormap, alpha=0.5)\n plt.annotate(dataset_names[a],\n xy=(np.median([xs[i] for i, b in enumerate(labels) if a==b]), np.median([ys[i] for i, b in enumerate(labels) if a==b])), xytext=(-20, 20), textcoords='offset points',\n bbox=dict(boxstyle='round,pad=0.5', fc=('yellow' if a= tb_ref, 1, 0)\n\n tb_change.name = 'tb_change'\n tb_change.attrs['long_name'] = 'Tb departure from cold-season mean'\n tb_change.attrs['units'] = 'K'\n tb_change.encoding = {'_FillValue': -999,\n 'scale_factor': 0.1,\n 'dtype': 'int16'}\n\n melt.name = 'melt'\n melt.attrs['long_name'] = 'Melt'\n melt.attrs['units'] = '0 = no melt, 1 = melt'\n melt.encoding = {'_FillValue': -1,\n 'dtype': 'int8'}\n\n return melt, tb_change\n\n#------------------------------------------------------------------------------#\n\ndef calc_meltdays(startdate, enddate, platform=None):\n \"\"\"Calculate the number of melting days during a certain period\n\n Parameters\n ----------\n startdate : string or datetime-like\n Start date\n enddate : string or date-like\n End date\n platform : string, default None\n Valid choices: SMMR, F08, F11, F13, F17\n\n Returns\n -------\n xarray.DataArray\n \"\"\"\n\n start = parse(startdate)\n end = parse(enddate)\n valid_range = ((start.month >= 4) & (end.month >= 4)) | \\\n ((start.month < 4) & (end.month < 4))\n if not valid_range:\n logger.error('Invalid date range.')\n raise SystemExit\n if start.month >= 4:\n year = start.year\n else:\n year = start.year - 1\n if platform is None:\n platform = '*'\n fname = '_'.join(['melt', platform, str(year)]) +'.nc'\n flist = glob(os.path.join(OUTPUT_NC, fname))\n if not flist:\n logger.error('Found no file matching given pattern.')\n raise SystemExit\n elif len(flist) > 1:\n logger.error('Found multiple files matching given pattern.')\n raise SystemExit\n with xr.open_dataset(flist[0]) as f:\n melt = f['melt'].loc[startdate:enddate]\n meltdays = melt.sum(dim='time', skipna=True)\n\n fout = os.path.join(OUTPUT_NC, 'meltdays.nc')\n meltdays.to_dataset(name='meltdays').to_netcdf(fout)\n logger.info('Data saved to %s', fout)\n\n return meltdays\n\n#------------------------------------------------------------------------------#\n\ndef wrapper_melt(include=None, exclude=None, platform=None,\n startdate=None, enddate=None):\n \"\"\"Wrapper function for converting Tb to melt data (2 obs/day).\"\"\"\n\n # Load file list\n if not include:\n include = '|'.join(['nsidc0071', 'nsidc0032', 'nsidc0342'])\n if not exclude:\n exclude = '|'.join([r'\\.TIM', r'\\.tim'])\n flist = list_files(rootdir=NSIDC_ES25R_BIN,\n include= include,\n exclude= exclude,\n to_dataframe=True,\n startdate=startdate,\n enddate=enddate)\n\n # Remove NRT data already in final archive\n nrtdates = flist.date[flist.dataset == 'nsidc0342']\n otherdates = flist.date[flist.dataset != 'nsidc0342']\n overlap = nrtdates[nrtdates.isin(otherdates)]\n flist.drop(overlap.index, inplace=True)\n\n # Select platform\n if platform is not None:\n flist = flist[flist.platform == platform]\n\n # Load topo data for masking (SMMR data only)\n topo = get_topo('es25r')\n\n # Extract list of platform to iterate over\n platforms = sorted(list(set(flist.platform)))\n\n for platform in platforms:\n\n logger.info('Processing platform %s.', platform)\n flist_platform = flist[flist.platform == platform]\n dates = pd.to_datetime(flist_platform.date)\n flist_platform.set_index('date', inplace=True)\n\n mindate = min(dates)\n maxdate = max(dates)\n minyear = mindate.year\n maxyear = maxdate.year\n if mindate.month < 4:\n minyear = minyear - 1\n if maxdate.month >= 4:\n maxyear = maxyear + 1\n\n for year in range(minyear, maxyear):\n\n # Year runs from Apr 1 of year N through Mar 31 of year N+1\n start = pd.Timestamp(str(year) + '-04-01 00:00:00')\n end = pd.Timestamp(str(year+1) + '-03-31 23:59:00')\n logger.info('Processing period %s to %s.',\n start.date().isoformat(), end.date().isoformat())\n\n # Select files for selected time range\n flist_period = flist_platform[start:end]\n if flist_period.empty:\n logger.info('No files for this period.')\n continue\n\n # Separate files between ascending/descending passes.\n pass_a = flist_period['pass_id'] == 'A'\n pass_d = flist_period['pass_id'] == 'D'\n flist_a = flist_period[pass_a]\n flist_d = flist_period[pass_d]\n\n # Read in brightness temperature data.\n tb_a = get_tbarr(flist_a)\n tb_d = get_tbarr(flist_d)\n\n # Intercalibrate Tb between sensors\n tb_a = calibrate(tb_a, platform)\n tb_d = calibrate(tb_d, platform)\n\n # Fill in the gaps through linear interpolation\n tb_a = interp1d(tb_a)\n tb_d = interp1d(tb_d)\n\n # Compute daily mean and max Tb.\n tb_daymean = convert_to_daily(tb_a, tb_d, func=np.nanmean)\n tb_daymax = convert_to_daily(tb_a, tb_d, func=np.nanmax)\n\n # Compute \"cold season\" mean Tb and its std. dev.\n tb_ref, tb_std = calc_ref_tb(tb_daymean, tb_daymax, year, platform)\n\n # Convert daily Tb to melt/no melt\n melt, tb_change = calc_melt(tb=tb_daymax, tb_cold=tb_ref)\n\n # Filter out remaining ocean points (typically with std dev > ~15 K)\n melt = melt.where(tb_std < 15)\n tb_change = tb_change.where(tb_std < 15)\n\n # Assume no melting above 2000m (only for SMMR)\n if platform == 'SMMR':\n melt[:] = np.where(topo > 2000, 0, melt)\n\n # Write to netcdf file\n ds = xr.Dataset({'melt': melt, 'tb_change': tb_change})\n fout = '_'.join(['melt', platform, str(year)]) +'.nc'\n fout = os.path.join(OUTPUT_NC, fout)\n ds.to_netcdf(fout, format='NETCDF3_CLASSIC')\n logger.info('Data saved to %s', fout)\n","sub_path":"packages/melting/melt.py","file_name":"melt.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"504125565","text":"# import ml.py\r\nimport ml\r\n\r\n# import Flask\r\nfrom flask import Flask, send_from_directory, request, json\r\nfrom flask_cors import CORS\r\napp = Flask(__name__)\r\nCORS(app)\r\n\r\n# Send index.html\r\n@app.route('/', methods=[\"GET\"])\r\n@app.route('/index.html', methods=[\"GET\"])\r\ndef get_index():\r\n #return contents of index.html\r\n return send_from_directory('', 'index.html', mimetype='text/html')\r\n\r\n# Send main.js\r\n@app.route('/main.js', methods=[\"GET\"])\r\ndef get_main():\r\n #return contents of main.js\r\n return send_from_directory('', 'main.js', mimetype='text/javascript')\r\n\r\n# Send the result from machine learning\r\n# Endpoint is \"result\"\r\n@app.route('/result', methods=[\"GET\"])\r\ndef result():\r\n\r\n # call the prediction function in ml.py\r\n result = ml.prediction()\r\n \r\n # make a dictionary from the result\r\n resultDict = { \"model\": \"kNN\", \"result\": result }\r\n \r\n # convert dictionary to JSON string\r\n resultString = json.dumps(resultDict)\r\n\r\n return resultString\r\n\r\n# Run the server\r\nif __name__ == '__main__':\r\n \r\n # train the model\r\n ml.train()\r\n \r\n # start the server\r\n app.run(port = 8000)\r\n \r\n# Run the server\r\nif __name__ == '__main__':\r\n \r\n # start the server\r\n app.run(port = 8000)\r\n \r\n","sub_path":"ml_testing/code/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"256954058","text":"from news import *\n\nimport tweepy # python package for accessing Tweet streaming API\nfrom tweepy import API\nfrom tweepy import Stream\nimport json\nimport logging\nimport pandas as pd\nimport configparser\nimport requests\nfrom datetime import date, timedelta\nimport urllib.parse\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\naccess_token = config['twitterAuth']['access_token']\naccess_token_secret = config['twitterAuth']['access_token_secret']\nconsumer_key = config['twitterAuth']['consumer_key']\nconsumer_secret = config['twitterAuth']['consumer_secret']\n\nnews_api_key = config['newsAuth']['api_key']\n\n# instantiate News class\nnews = News(news_api_key)\n# get all news - takes about 30 seconds\nnews.get_all_news()\n\nclass Tweets():\n \n def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret, logger=logging):\n self.logger = logging.basicConfig(filename='tweets.log', filemode='w',\n format=f'%(asctime)s - %(levelname)s - %(message)s')\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n\n def tweepy_auth(self):\n \"\"\"Authorize tweepy API\"\"\"\n\n self.auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)\n self.auth.set_access_token(self.access_token, self.access_token_secret)\n\n # create API object\n self.api = API(self.auth, wait_on_rate_limit=True, user_agent=get_random_ua('Chrome'))# wait_on_rate_limit_notify=True)\n\n try:\n self.api.verify_credentials()\n logging.info(\"Tweepy API Authenticated\")\n print('Authentication successful')\n except Exception as e:\n logging.error(f\"Error during Tweepy authentication: {e}\")\n print('Authentication error. Did you set your access tokens?')\n raise e\n return self.api\n \n def get_tweets(self, news_keywords, news_instance): # TODO add stream listening stuff to params\n searched_tweets = self.tweet_search(news_keywords)\n stream_tweets = TwitterStreamListener.on_status(listener, tweet_stream)\n\n # all_tweets = {}\n # # process tweets\n # for tweet in searched_tweets:\n # # count tweets\n # pass\n # # add count to df column?\n \n # for tweet in stream_tweets:\n # pass\n # # break tweets apart for table\n # for tweet in searched_tweets, stream_tweets:\n # all_tweets[\"tweet_id\"] = tweet['id']\n\n # # add all tweets to database! via mogrify\n\n # # put tweets in df\n # self.all_tweets_df = pd.DataFrame.from_dict(all_tweets, columns=[\n # \"tweet_id\", \"user_id\", \"location\", \"createdAt\", \"tweet_text\"])\n\n # self.all_tweets_df.set_index(\"tweet_id\")\n\n # # tweets mention count to news df column\n # news_instance.all_news_df[\"tweet_mention_count\"] = self.all_tweets_df[\"tweet_id\"].apply(\n # np.count_nonzero)\n\n # clear dataframe?\n \n def tweet_search(self, news_keywords):\n \"\"\"Search for tweets within previous 7 days.\n Inputs: \n keyword list\n Returns: \n Tweet list => JSON\n \"\"\"\n api = self.api\n # tweet_list = []\n # unpack keyword tuples\n for keys in news_keywords:\n keywords = list(keys) # TODO add itertools combinations\n for word in keywords:\n # tweets = tweepy.Cursor(self.api.search_tweets, q=str(word) + \" -filter:retweets\", lang='en').items()\n # collect tweets, filter out retweets\n try: \n print('Searching for tweets matching keywords')\n result = api.search_tweets(q=str(\n word) + \" -filter:retweets\", lang='en')\n status = result[0]\n tweets = status._json\n except (TypeError) as e:\n logging('Error: ', e)\n print('Error: keyword not found in tweet search')\n continue\n # else:\n # print('Loading tweets to JSON')\n # tweets = json.loads(status._json)\n \n else:\n # write tweets to json file\n with open(\"tweets.json\", \"w\") as f:\n print('Loading tweets to JSON file')\n json.dump(tweets, f)\n # self.logging.info('Success')\n print('Success')\n\n finally:\n pass # TODO add tweet unpacking & cleaning\n\n #return result\n\n # self.tweet_search_df = pd.DataFrame.from_dict(self.tweet_search_dict, columns=[\n # \"tweet_id\", \"user_id\", \"location\", \"createdAt\", \"tweet_text\"])\n # self.tweet_search_df.set_index(\"tweet_id\")\n # return self.tweet_search_df\n\n # for status in tweets:\n\n \n # print(type(tweets))\n # for tweet in tweets:\n # print(tweet)\n\n #tweets = tweets['Status']['_json']\n\n # with open(\"tweets.json\", \"w\") as f:\n # # write tweets to json file\n # json.dump(tweets, f)\n\n # with open(\"tweets.json\", \"r\") as file:\n # # create python object from json\n # tweets_json = file.read().split(\"\\n\")\n\n # for tweet in tweets_json:\n # tweet_obj = json.loads(tweet)\n\n # #flatten nested fields\n # if 'quoted_status' in tweet_obj:\n # tweet_obj['quote_tweet'] = tweet_obj['quoted_status']['extended_tweet']['full_text']\n # if 'user' in tweet_obj:\n # tweet_obj['location'] = tweet_obj['user']['location']\n # if 'created_at' in tweet_obj:\n # tweet_obj['createdAt'] = tweet_obj['created_at']\n # if 'truncated' == True:\n # pass\n # if 'entities' in tweet_obj:\n # tweet_obj['hashtags'] = tweet_obj['entities']['hashtags']\n\n # tweet_list.append(tweet_obj)\n # print(tweet_list)\n\n # return tweet_list\n\n # for tweet in tweets:\n # print(tweet)\n \n # tweet_search_dict = {[tweet.id, tweet.user.id, tweet.user.location, tweet.created_at, tweet.text] for tweet in tweets}\n # print(tweet_search_dict)\n \n # self.tweet_search_df = pd.DataFrame.from_dict(tweet_search_dict, columns=[\"tweet_id\", \"user_id\", \"location\", \"createdAt\", \"tweet_text\"])\n\n #return tweets\n # self.tweet_search_df.set_index(\"tweet_id\")\n \n #return self.tweet_search_df\n #return tweet_search_dict\n \n # for status in tweets:\n\n # print(type(tweets))\n # for tweet in tweets:\n # print(tweet)\n\n #tweets = tweets['Status']['_json']\n\n # with open(\"tweets.json\", \"w\") as f:\n # # write tweets to json file\n # json.dump(tweets, f)\n\n # with open(\"tweets.json\", \"r\") as file:\n # # create python object from json\n # tweets_json = file.read().split(\"\\n\")\n\n # for tweet in tweets_json:\n # tweet_obj = json.loads(tweet)\n\n # #flatten nested fields\n # if 'quoted_status' in tweet_obj:\n # tweet_obj['quote_tweet'] = tweet_obj['quoted_status']['extended_tweet']['full_text']\n # if 'user' in tweet_obj:\n # tweet_obj['location'] = tweet_obj['user']['location']\n # if 'created_at' in tweet_obj:\n # tweet_obj['createdAt'] = tweet_obj['created_at']\n # if 'truncated' == True:\n # pass\n # if 'entities' in tweet_obj:\n # tweet_obj['hashtags'] = tweet_obj['entities']['hashtags']\n\n # tweet_list.append(tweet_obj)\n # print(tweet_list)\n\n # return tweet_list\n\n # for tweet in tweets:\n # print(tweet)\n\n # tweet_search_dict = {[tweet.id, tweet.user.id, tweet.user.location, tweet.created_at, tweet.text] for tweet in tweets}\n # print(tweet_search_dict)\n\n # self.tweet_search_df = pd.DataFrame.from_dict(tweet_search_dict, columns=[\"tweet_id\", \"user_id\", \"location\", \"createdAt\", \"tweet_text\"])\n\n #return tweets\n # self.tweet_search_df.set_index(\"tweet_id\")\n\n #return self.tweet_search_df\n #return tweet_search_dict\n\n \n def tweet_trends(self):\n # returns JSON\n # 1 refers to USA WOEID \n self.tweet_trends_list = []\n result = tweepy.Cursor(self.api.trends_place(1))\n\n for trend in tweepy.Cursor(result).items():\n self.tweet_trends_list.append(trend)\n return self.tweet_trends_list\n \n #TODO append to dataframe\n self.tweet_trends_df = pd.DataFrame(self.tweet_trends_list)\n return self.tweet_trends_df \n\n# define stream listener class\nclass TwitterStreamListener(tweepy.StreamListener):\n def __init__(self, api=None):\n super(TwitterStreamListener, self).__init__()\n self.num_tweets = 0\n # self.file = open('tweets.txt', 'w')\n # self.db = ''\n self.tweet_list = []\n # self.file = open(\"tweets.json\", \"w\")\n\n def on_status(self, status):\n tweet = status._json\n\n with open(\"tweets.json\", \"w\") as f:\n # write tweets to json file\n json.dump(tweet, f)\n \n with open(\"tweets.json\", \"r\") as file:\n # create python object from json\n tweets_json = file.read().split(\"\\n\")\n\n for tweet in tweets_json:\n tweet_obj = json.loads(tweet)\n\n #flatten nested fields\n if 'quoted_status' in tweet_obj:\n tweet_obj['quote_tweet'] =tweet_obj['quoted_status']['extended_tweet']['full_text']\n if 'user' in tweet_obj:\n tweet_obj['location'] = tweet_obj['user']['location']\n # if 'created_at' in tweet_obj:\n # tweet_obj['created_at'] = pd.to_datetime(tweet)\n \n\n self.tweet_list.append(status)\n self.num_tweets += 1\n\n # flatten data to dataframe\n # tweets = pd.json_normalize(self.tweet_list, record_path=['articles'])\n self.tweets_df = pd.DataFrame(self.tweet_list, columns=[\"tweet_id\", \"publishedAt\", \"userID\", \"text\", \"location\"])\n\n return self.tweets_df\n \n if self.num_tweets < 450: # whatever the max stream rate is for the twitter API Client\n return True\n else:\n return False\n\n\nkeywords = dict(news.all_news_df[\"keywords\"])\n\n#print(keywords)\nt = Tweets(consumer_key, consumer_secret, access_token, access_token_secret)\nauth = t.tweepy_auth()\n# search_df = t.tweet_search(keywords)\n\n\n","sub_path":"tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":11885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"253408844","text":"#!/usr/bin/python\n\nfrom __future__ import division, print_function\n\nimport os\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.io import ascii\n\n## array[:N] gives first N values in array\n\npath = '/Users/kevinnapier/Mechanics/Lab/Lab01/data'\n\n##################### Reading in Data #########################\n\n# Wooden Ball Data\nWB11 = np.loadtxt(os.path.join(path, 'WoodenBallTrial1-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nWB12 = np.loadtxt(os.path.join(path, 'WoodenBallTrial1-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nWB21 = np.loadtxt(os.path.join(path, 'WoodenBallTrial2-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nWB22 = np.loadtxt(os.path.join(path, 'WoodenBallTrial2-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nWB31 = np.loadtxt(os.path.join(path, 'WoodenBallTrial3-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nWB32 = np.loadtxt(os.path.join(path, 'WoodenBallTrial3-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\n\n# Big Black Ball Data\nBBB11 = np.loadtxt(os.path.join(path, 'BBB_Trial_1-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nBBB12 = np.loadtxt(os.path.join(path, 'BBB_Trial_1-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nBBB21 = np.loadtxt(os.path.join(path, 'BBB_Trial_2-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nBBB22 = np.loadtxt(os.path.join(path, 'BBB_Trial_2-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nBBB31 = np.loadtxt(os.path.join(path, 'BBB_Trial_3-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nBBB32 = np.loadtxt(os.path.join(path, 'BBB_Trial_3-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\n\n# Small Black Ball Data\nSBB11 = np.loadtxt(os.path.join(path, 'SBB_Trial1-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nSBB12 = np.loadtxt(os.path.join(path, 'SBB_Trial1-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nSBB21 = np.loadtxt(os.path.join(path, 'SBB_Trial2-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nSBB22 = np.loadtxt(os.path.join(path, 'SBB_Trial2-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nSBB31 = np.loadtxt(os.path.join(path, 'SBB_Trial3-1.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\nSBB32 = np.loadtxt(os.path.join(path, 'SBB_Trial3-2.txt'),\n skiprows=7,\n usecols = (0,2),\n unpack=True)\n\n##################### Trimming/Parsing Data #########################\n\n# Wooden Ball\nWB11 = (WB11.transpose()-WB11.transpose()[0]).transpose()\nWB12 = (WB12.transpose()-WB12.transpose()[0]).transpose()\nWB11 = WB11[:,:-1] # Trim the matrices to equal sizes\n\nWB21 = (WB21.transpose()-WB21.transpose()[0]).transpose()\nWB22 = (WB22.transpose()-WB22.transpose()[0]).transpose()\nWB22 = WB22[:,:-1]\n\nWB31 = (WB31.transpose()-WB31.transpose()[0]).transpose()\nWB32 = (WB32.transpose()-WB32.transpose()[0]).transpose()\nWB32 = WB32[:,:-2]\n\n# Small Black Ball\nSBB11 = (SBB11.transpose()-SBB11.transpose()[0]).transpose()\nSBB12 = (SBB12.transpose()-SBB12.transpose()[0]).transpose()\nSBB12 = SBB12[:,:-4]\n\nSBB21 = (SBB21.transpose()-SBB21.transpose()[0]).transpose()\nSBB22 = (SBB22.transpose()-SBB22.transpose()[0]).transpose()\nSBB22 = SBB22[:,:-1]\n\nSBB31 = (SBB31.transpose()-SBB31.transpose()[0]).transpose()\nSBB32 = (SBB32.transpose()-SBB32.transpose()[0]).transpose()\nSBB31 = SBB31[:,:-2]\n\n# Big Black Ball\nBBB11 = (BBB11.transpose()-BBB11.transpose()[0]).transpose()\nBBB12 = (BBB12.transpose()-BBB12.transpose()[0]).transpose()\nBBB11 = BBB11[:,:-1]\n\nBBB21 = (BBB21.transpose()-BBB21.transpose()[0]).transpose()\nBBB22 = (BBB22.transpose()-BBB22.transpose()[0]).transpose()\nBBB22 = BBB22[:,:-1]\n\nBBB31 = (BBB31.transpose()-BBB31.transpose()[0]).transpose()\nBBB32 = (BBB32.transpose()-BBB32.transpose()[0]).transpose()\nBBB32 = BBB32[:,:-5]\n\n##################### Averaging Data #########################\n\n\n# Individual Means\nWB1 = np.mean(np.array([WB11, WB12]), axis=0)\nSBB1 = np.mean(np.array([SBB11, SBB12]), axis=0)\nBBB1 = np.mean(np.array([BBB11, BBB12]), axis=0)\n\nWB2 = np.mean(np.array([WB21, WB22]), axis=0)\nSBB2 = np.mean(np.array([SBB21, SBB22]), axis=0)\nBBB2 = np.mean(np.array([BBB21, BBB22]), axis=0)\n\nWB3 = np.mean(np.array([WB31, WB32]), axis=0)\nSBB3 = np.mean(np.array([SBB31, SBB32]), axis=0)\nBBB3 = np.mean(np.array([BBB31, BBB32]), axis=0)\n\n# Overall Means\nBBB2 = BBB2[:,:-3]\nBBB3 = BBB3[:,:-2]\n\nSBB2 = SBB2[:,:-2]\nSBB3 = SBB3[:,:-5]\n\nWB = np.mean(np.array([WB1, WB2, WB3]), axis=0)\nBBB = np.mean(np.array([BBB1, BBB2, BBB3]), axis=0)\nSBB = np.mean(np.array([SBB1, SBB2, SBB3]), axis=0)\n\n# Generate a throretical curve with no drag\ngrav = 9.8\ntime = np.linspace(0,1.0,50)\nynodrag = -0.5 * grav * time**2\n\n# Generate a theoretical curve with linear drag\ntauthe = 0.77\nylindrag2 = grav * tauthe**2 * (1- (time / tauthe) - np.exp(-time / tauthe))\n\n##################### Calculating Error #########################\n\n# aa = (WB11-WB12)[1]\n# bb = (WB21-WB22)[1]\n# cc = (WB31-WB32)[1]\n# dd = np.mean([aa,bb,cc],axis=0)\n# #plt.errorbar(WB[0], WB[1], xerr=0, yerr=dd)\n\n# aaa = (SBB11-SBB12)[1]\n# bbb = (SBB21-SBB22)[1]\n# ccc = (SBB31-SBB32)[1]\n# ddd = np.mean([aaa,bbb,ccc],axis=0)\n# #plt.errorbar(SBB[0], SBB[1], xerr=0, yerr=ddd)\n\n# aaaa = (BBB11-BBB12)[1]\n# bbbb = (BBB21-BBB22)[1]\n# cccc = (BBB31-BBB32)[1]\n# dddd = np.mean([aaaa,bbbb,cccc],axis=0)\n# #plt.errorbar(WB[0], WB[1], xerr=0, yerr=dddd)\n\n##################### Plotting Flight Paths ##########################\n\n\nplt.figure()\nplt.plot(WB[0], WB[1], color='orange', marker='o',\n linestyle='None', label='Wooden Ball')\nplt.plot(BBB[0], BBB[1], color='brown', marker='v',\n linestyle='None', label='Big Styrofoam Ball')\nplt.plot(SBB[0], SBB[1], color='purple', marker='s',\n linestyle='None', label='Small Styrofoam Ball')\nplt.plot(time, ynodrag, label='No Drag')\nplt.plot(time, ylindrag2, label=r'Linear Drag, $\\tau$ = 0.77')\nplt.legend(numpoints=1, loc=3)\nplt.xlabel('Time (s)')\nplt.ylabel('Position (m)')\nplt.savefig('flightpaths.png')\n\n\n##################### chi^2 fitting #########################\n\n\nBBBmass = 7E-3 # mass of BBB\nSBBmass = 1E-3 # mass of SBB\nWBmass = 1.1E-2 # mass of WB\n\n# Find optimum drag coefficient for linear drag\n\ndef calculatechi(tt, position, mass, name, colour, lim, size):\n bdragarray = np.logspace(np.log10(1E-3), np.log10(0.003), 5000)\n chisq = np.zeros_like(bdragarray)\n for ii, bdrag in enumerate(bdragarray):\n tau = mass/bdrag\n ylindrag = grav * tau**2 * (1- (tt / tau) - np.exp(-tt / tau))\n chisq[ii] = np.log10(np.sum((position - ylindrag)**2))\n plt.plot(mass/bdragarray, chisq, color = colour,\n label=size+' Styrofoam Ball fit')\n plt.xlabel(r'$\\tau$')\n plt.ylabel('$\\log_{10}(\\chi^2)$', rotation=90)\n plt.legend(loc=2)\n plt.xlim([0.6, 0.9])\n return;\n\n##################### chi^2 plotting #########################\n\n# plt.figure(figsize=(12,5))\nplt.figure()\n#plt.subplot(1,1,1)\n#calculatechi(BBB[0], BBB[1], BBBmass, 'BBB', 'brown', 1.0, 'Big') \n# plt.subplot(1,2,2)\ncalculatechi(SBB[0], SBB[1], SBBmass, 'SBB', 'purple', 1.0, 'Small')\nplt.tight_layout()\nplt.savefig('chisq.png')\n\n# Generating LaTex Tables\n#ascii.write(WB11.transpose(), format='latex')\n\n############# Errorbars if time allows ##############\n## not necessary. Shortest route to a completed lab\n","sub_path":"repos/school/Mechanics/Lab/Lab01/lab01analysis.py","file_name":"lab01analysis.py","file_ext":"py","file_size_in_byte":8481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"37316514","text":"# import socket programming library\r\nimport socket\r\n\r\n# import thread module\r\nfrom _thread import *\r\nimport threading\r\n\r\nthread_lock = list([threading.Lock() for i in range(1000)])\r\n\r\n\r\n# thread function\r\ndef evaluation(c,i):\r\n\twhile True:\r\n\r\n\t\t# data received from client\r\n\t\tdata = c.recv(1024)\r\n\t\tif not data:\r\n\t\t\tprint('End ................')\r\n\t\t\t\r\n\t\t\tthread_lock[i].release()\r\n\t\t\t# lock released on exit\r\n\t\t\tbreak\r\n\r\n\t\ttry :\r\n\t\t\tdata = str(eval(data)).encode('ascii')\r\n\t\texcept :\r\n\t\t\tdata = str(\"Invalid Arithmetic expression\").encode('ascii')\r\n\r\n\t\t# send back reversed string to client\r\n\t\tc.send(data)\r\n\r\n\t# connection closed\r\n\tc.close()\r\n\r\n\r\ni = -1 \r\n\r\ndef Main():\r\n\thost = \"\"\r\n\t\r\n\t# reverse a port on your computer\r\n\t# in our case it is 12345 but it\r\n\t# can be anything\r\n\tport = 12345\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\ts.bind((host, port))\r\n\tprint(\"socket binded to port\", port)\r\n\r\n\t# put the socket into listening mode\r\n\ts.listen(5)\r\n\tprint(\"socket is listening\")\r\n\r\n\t# a forever loop until client wants to exit\r\n\twhile True:\r\n\r\n\t\t# establish connection with client\r\n\t\tc, addr = s.accept()\r\n\r\n\t\t# lock acquired by client\r\n\t\tglobal i \r\n\t\ti+=1 \r\n\t\tthread_lock[i].acquire()\r\n\t\tprint('Connected to :', addr[0], ':', addr[1])\r\n\r\n\t\t# Start a new thread and return its identifier\r\n\t\tstart_new_thread(evaluation, (c,i,))\r\n\r\n\t\t\r\n \r\n\ts.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n\tMain()\r\n","sub_path":"server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"84072058","text":"import argparse\nimport json\n\nimport boto3\nfrom jinja2 import Environment, FileSystemLoader\n\n\n\"\"\"\nA bunch of free functions that we use in all scripts.\n\"\"\"\n\n\ndef get_jinja_env(config):\n \"\"\"\n Get a jinja2 Environment object that we can use to find templates.\n \"\"\"\n return Environment(loader=FileSystemLoader(\".\"))\n\n\ndef json_file(filename):\n with open(filename, \"r\") as f:\n return json.load(f)\n\n\ndef get_parent_parser():\n \"\"\"\n Get an argparse parser with arguments that are always needed\n \"\"\"\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument(\n \"--prod\",\n action=\"store_false\",\n dest=\"sandbox\",\n default=True,\n help=\"Whether to run on the production AMT site.\",\n )\n parser.add_argument(\"-hf\", \"--hit_ids_file\")\n parser.add_argument(\n \"-c\", \"--config\", default=\"config.json\", type=json_file\n )\n return parser\n\n\ndef get_mturk_connection_from_args(args):\n \"\"\"\n Utility method to get an MTurkConnection from argparse args.\n \"\"\"\n aws_access_key = args.config.get(\"aws_access_key\")\n aws_secret_key = args.config.get(\"aws_secret_key\")\n return get_mturk_connection(\n sandbox=args.sandbox,\n aws_access_key=aws_access_key,\n aws_secret_key=aws_secret_key,\n )\n\n\ndef get_mturk_connection(\n sandbox=True,\n aws_access_key=None,\n aws_secret_key=None,\n region_name=\"us-east-1\",\n):\n \"\"\"\n Get a boto mturk connection. This is a thin wrapper over boto3.client;\n the only difference is a boolean flag to indicate sandbox or not.\n \"\"\"\n kwargs = {}\n # boto3 client requires a region to make a connection. if you\n # have a default region in your ~/.aws/config other than us-east-1,\n # it throws an error. Since Mturk endpoint is by default only in\n # us-east-1, there is no point of asking users to provide it. See #29\n kwargs[\"region_name\"] = region_name\n if aws_access_key is not None:\n kwargs[\"aws_access_key_id\"] = aws_access_key\n if aws_secret_key is not None:\n kwargs[\"aws_secret_access_key\"] = aws_secret_key\n\n if sandbox:\n host = \"https://mturk-requester-sandbox.us-east-1.amazonaws.com\"\n else:\n host = \"https://mturk-requester.us-east-1.amazonaws.com\"\n return boto3.client(\"mturk\", endpoint_url=host, **kwargs)\n","sub_path":"simpleamt.py","file_name":"simpleamt.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"250640620","text":"#! /usr/bin/env python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nsys.path.insert(0,'../../..')\n\nimport chronostar.tfgroupfitter as tfgf\nimport chronostar.transform as tf\nimport chronostar.traceback as tb\nimport chronostar.error_ellipse as ee\nfrom chronostar import utils\n\nresults = []\nfor i in range(30):\n try:\n res = np.load(\"burnin_chain{}.npy\".format(i))\n print(\"Successfully loaded chain {}\".format(i))\n results.append(res)\n except IOError:\n pass\n\nfinal = np.load(\"result.npy\")\nstar_pars = tfgf.read_stars(\"tb_data.pkl\")\n\nprint(\"Origin's score: {}\".format(tfgf.lnprobfunc(final[3], star_pars)))\nprint(\"Fitted's score: {}\".format(tfgf.lnprobfunc(final[0], star_pars)))\n\n\nnstars = star_pars['xyzuvw'].shape[0]\nstar_covs = star_pars['xyzuvw_cov'][:,0]\nstar_mns = star_pars['xyzuvw'][:,0]\nnstars = star_mns.shape[0]\n\nfitted_mean_then = final[0][:6]\nfitted_cov_then = tfgf.generate_cov(final[0])\nfitted_age = final[0][8]\n\nfitted_mean_now = tb.trace_forward(fitted_mean_then, fitted_age)\nfitted_cov_now = tf.transform_cov(\n fitted_cov_then, tb.trace_forward, fitted_mean_then, dim=6,\n args=(fitted_age,)\n)\nlnols = tfgf.get_lnoverlaps(\n fitted_cov_now, fitted_mean_now, star_covs, star_mns, nstars\n)\n\norigin_mean_then = final[3][:6]\norigin_cov_then = tfgf.generate_cov(final[3])\norigin_age = final[3][8]\n\norigin_mean_now = tb.trace_forward(origin_mean_then, origin_age)\norigin_cov_now = tf.transform_cov(\n origin_cov_then, tb.trace_forward, origin_mean_then, dim=6,\n args=(origin_age,)\n)\no_lnols = tfgf.get_lnoverlaps(\n origin_cov_now, origin_mean_now, star_covs, star_mns, nstars\n)\n\ntrue_orign_cov_then = utils.generate_cov(utils.internalise_pars(final[5]))\n","sub_path":"scripts/retired/tf_debug_script.py","file_name":"tf_debug_script.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"624555149","text":"import pandas as pd\nfrom datetime import datetime, timedelta\nfrom gmma import mixture\nfrom tqdm import tqdm\nimport numpy as np\nfrom sklearn.cluster import DBSCAN \nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom pydantic import BaseModel\nfrom datetime import datetime, timedelta\nfrom typing import List, Dict, NamedTuple, Union\nfrom fastapi import FastAPI\n\napp = FastAPI()\n\nstations = pd.read_csv(\"../../stations.csv\", delimiter=\"\\t\", index_col=\"station\")\nnum_sta = len(stations)\ndims = ['x(km)', 'y(km)', 'z(km)']\nbounds = ((-1, 111),(-1, 111),(0, 20), (None, None))\nuse_dbscan = True\nuse_amplitude = True\ndbscan_eps = 111/(6.0/1.75)/2\ndbscan_min_samples = int(16 * 0.8)\nmin_picks_per_eq = int(16 * 0.6)\noversample_factor = 5.0\nverbose = 1\n\nclass Pick(BaseModel):\n picks: List[Dict[str, Union[float, str]]]\n\n\nto_seconds = lambda t: datetime.strptime(t, \"%Y-%m-%dT%H:%M:%S.%f\").timestamp()\nfrom_seconds = lambda t: [datetime.fromtimestamp(x).strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3] for x in t]\n\ndef convert_picks(picks, stations):\n data, locs, phase_type, phase_weight = ([],[],[],[])\n for pick in picks:\n data.append([to_seconds(pick[\"timestamp\"]), np.log10(pick[\"amp\"]*1e2)])\n locs.append(stations.loc[pick[\"id\"]][dims].values.astype(\"float\"))\n phase_type.append(pick[\"type\"].lower())\n phase_weight.append(pick[\"prob\"])\n data = np.array(data)\n locs = np.array(locs)\n phase_weight = np.array(phase_weight)[:,np.newaxis]\n return data, locs, phase_type, phase_weight\n\n\ndef association(data, locs, phase_type, phase_weight):\n \n db = DBSCAN(eps=dbscan_eps, min_samples=dbscan_min_samples).fit(data)\n labels = db.labels_\n unique_labels = set(labels)\n\n events = []\n for k in unique_labels:\n if k == -1:\n continue\n\n class_mask = (labels == k)\n data_ = data[class_mask]\n locs_ = locs[class_mask]\n phase_type_ = np.array(phase_type)[class_mask]\n phase_weight_ = phase_weight[class_mask]\n\n num_event_ = min(max(int(len(data_)/num_sta*oversample_factor), 1), len(data_))\n t0 = data_[:,0].min()\n t_range = max(data_[:,0].max() - data_[:,0].min(), 1)\n centers_init = np.vstack([np.ones(num_event_)*np.mean(stations[\"x(km)\"]),\n np.ones(num_event_)*np.mean(stations[\"y(km)\"]),\n np.zeros(num_event_),\n np.linspace(data_[:,0].min()-0.1*t_range, data_[:,0].max()+0.1*t_range, num_event_)]).T # n_eve, n_dim(x, y, z) + 1(t)\n \n\n if use_amplitude:\n covariance_prior = np.array([[1,0],[0,1]]) * 3\n else:\n covariance_prior = np.array([[1]])\n data = data[:,0:1]\n gmm = mixture.BayesianGaussianMixture(n_components=num_event_, \n weight_concentration_prior=1000/num_event_,\n mean_precision_prior = 0.3/t_range,\n covariance_prior = covariance_prior,\n init_params=\"centers\",\n centers_init=centers_init, \n station_locs=locs_, \n phase_type=phase_type_, \n phase_weight=phase_weight_,\n loss_type=\"l1\",\n bounds=bounds,\n max_covar=10.0,\n reg_covar=0.1,\n ).fit(data_) \n \n pred = gmm.predict(data_) \n prob = gmm.predict_proba(data_)\n prob_eq = prob.mean(axis=0)\n prob_data = prob[range(len(data_)), pred]\n score_data = gmm.score_samples(data_)\n\n idx = np.array([True if len(data_[pred==i, 0]) >= max(num_sta*0.6, 4) else False for i in range(len(prob_eq))]) #& (prob_eq > 1/num_event) #& (std_eq[:, 0,0] < 40)\n eq_idx = np.arange(len(idx))[idx]\n\n time = from_seconds(gmm.centers_[idx, len(dims)])\n loc = gmm.centers_[idx, :len(dims)]\n if use_amplitude:\n mag = gmm.centers_[idx, len(dims)+1]\n std_eq = gmm.covariances_[idx,...]\n\n for i in range(len(time)):\n events.append({\"time\": time[i],\n \"location\": loc[i].tolist(),\n \"magnitude\": mag[i],\n \"std\": std_eq[i].tolist()})\n\n print(events)\n \n\n return events\n\n@app.get('/predict')\ndef predict(data: Pick):\n \n picks = data.picks\n data, locs, phase_type, phase_weight = convert_picks(picks, stations)\n event_log = association(data, locs, phase_type, phase_weight)\n\n return event_log\n\n","sub_path":"gmma/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"467779047","text":"#!/usr/bin/python3\n\nimport json\nimport os\nimport sys\nimport getopt\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom surveillancestation.surveillancestation import Surveillancestation\n\n# Configuration file path\nlocation = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nconfigurationFile = os.path.join(location, 'config.json')\n\n# Configure logs\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')\n\nfile_handler = RotatingFileHandler(os.path.join(location, 'surveillance-station.log'), 'a', 1000000, 1)\nfile_handler.setLevel(logging.INFO)\nfile_handler.setFormatter(formatter)\nlogger.addHandler(file_handler)\n\nsteam_handler = logging.StreamHandler()\nsteam_handler.setLevel(logging.INFO)\nsteam_handler.setFormatter(formatter)\nlogger.addHandler(steam_handler)\n\n# Check if configuration file exists\nif os.path.isfile(configurationFile):\n # Import configuration file\n with open(configurationFile) as data_file:\n config = json.load(data_file)\nelse:\n logger.error('Your configuration file doesn\\'t exists')\n sys.exit('Your configuration file doesn\\'t exists')\n\n\ndef usage():\n print('execute.py -a -c ')\n\n\n# Main script\ndef main(argv):\n action = ''\n cams = ''\n\n try:\n opts, args = getopt.getopt(argv, \"h:a:c:\", [\"action=\", \"cams=\", \"help\"])\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print('test.py -i -o ')\n sys.exit()\n elif opt in (\"-a\", \"--action\"):\n action = arg\n elif opt in (\"-c\", \"--cams\"):\n cams = arg.split(',')\n\n # Check options\n if not action or not cams:\n usage()\n sys.exit(2)\n\n # Check action\n if action != 'on' and action != 'off':\n usage()\n sys.exit(2)\n\n # Check cams\n cam_keys = config['cams'].keys()\n for cam in cams:\n if cam not in cam_keys:\n print('The cam [', cam, ']', 'is not in config file')\n sys.exit(2)\n\n # Convert cam name to cam idx\n cam_idxs = []\n for cam in cams:\n cam_idxs.append(config['cams'][cam])\n\n # Create API\n api = Surveillancestation(host=config['host'], user=config['login'], passwd=config['password'])\n\n # Execute action in all cam\n try:\n if action == 'on':\n api_return = api.camera.enable(cam_idxs)\n elif action == 'off':\n api_return = api.camera.disable(cam_idxs)\n\n sys.exit(0 if api_return['success'] else 1)\n except Exception:\n sys.exit(1)\n finally:\n # Don't forget to logout\n api.logout()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"12061426","text":"#_*_coding:utf-8_*_\nimport pandas as pd\nimport numpy as np\nimport jieba\nimport math\n\n\ndef extract_words_list(text_data):\n '''\n 用jieba 的方法进行中文分词\n python使用jieba实现中文文档分词和去停用词,无用词等\n https://www.cnblogs.com/zuixime0515/p/9221156.html\n @param text_data: 初始的文本数据\n @return: 关键词\n '''\n # 1 对初始数据进行清洗\n if text_data == np.nan:\n text_data = ''\n text_data = text_data.replace('\\\\n', '').replace('\\\\t', '').replace('\\\\xa0', '').replace('\\\\r', '')\\\n .replace('\\n', '').replace('\\t', '').replace('\\r', '').replace('\\xa0', '')\n\n # 2 jieba 中文分词, 并全景模式\n words = jieba.cut(text_data, cut_all=True)\n\n # 3 对分好的词进行处理,\n # 3.1 先导入 停用词库\n stop_words_dic = open('./stop_words.txt', 'rb')\n stop_words_list = stop_words_dic.read().splitlines()\n stop_words_dic.close()\n\n # 3.2 去掉空值,去掉单字,去掉数字,去掉停用词\n words = [x for x in words if\n ((x != '') and (len(x) > 1) and (not (str(x).isdigit())) and (not (x in stop_words_list)))]\n split_words = [w for w in words]\n\n return split_words\n\n\ndef calculate_similarity(text1, text2):\n '''计算两个文本余弦相似度\n http://www.luyixian.cn/news_show_256402.aspx\n 计算两个文本的相似度,构建一个文本集合,计算词频,再计算相似度\n @param text1:\n @param text2:\n @return: 两个文本的余弦相似度\n '''\n # 1 分词\n words_list1 = extract_words_list(text1)\n words_list2 = extract_words_list(text2)\n # 1.2 列出所有的词,并组成一个集合\n words_dict = set(words_list1 + words_list2)\n\n # 3 统计词频,和构建词频向量\n word_count1, word_count2 = {}, {}\n word_count_vector1, word_count_vector2 = [], []\n # 3.2 对于词汇集合中的每一个词,统计他在原文本中出现的次数:没有就是0,有则 +1\n\n for word in words_dict:\n # 3.2.1 对文本中出现同样的词进行计数 -> 统计词频\n n1 = text1.count(word)\n n2 = text2.count(word)\n # 3.2.3 特殊处理:如果两个文本中都出现了相同的某个词,权重乘 1.5\n if (word in text1) and (word in text2):\n n1 *= 1.5\n n2 *= 1.5\n # 3.2.4 以 word 为键,以word出现的次数为值, 来构建键值对\n word_count1[word] = n1\n word_count2[word] = n2\n\n # 3.2.5 把词频一个个的添加到词频向量中,作为维度 -> 构建词频向量\n # [1,0,9,2,5,3,2,1,0,0,0,0,1,1,1]\n # [1,1,5,0,1,0,0,1,1,6,1,0,1,0,0]\n word_count_vector1.append(n1)\n word_count_vector2.append(n2)\n\n # 4 计算余弦相似度: 先把列表转化为 numpy的数组\n vec1 = np.array(word_count_vector1)\n vec2 = np.array(word_count_vector2)\n similar = cosine_distance(vec1, vec2)\n\n return similar\n\n\ndef cosine_distance(vector1, vector2):\n '''\n 计算两个向量的夹角的余弦值\n @param vector1:\n @param vector2:\n @return:\n '''\n # 1 长度不相等,直接返回\n if len(vector1) != len(vector2):\n return None\n\n # 2 向量点积公式,\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n\n # 上半部分是向量点积: x 点乘 y = x1y1 + x2y2 + x3y3\n # 下半部分是 向量模长的乘积结果再开根号 ||x||^2 = x1^2 + x2^2 + x3^2\n for a1, b1 in zip(vector1, vector2):\n # 2.1 上半部分就是向量各元素的对应乘积的 和\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n\n # 2.2 如果分母为0了,说明至少有一个是零向量,不可能发生,直接返回\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\ndef get_recommended_list(job_index, topN=10):\n '''\n 1 在数据库中选择指定的岗位数据,和所有的课程数据\n 2 根据岗位-课程,一对一的计算相似度后排序\n 3 进一步调整:对于相似度五名以后的推荐课程,进行他们与前五个推荐的课程进行相似度再计算,\n 并与原岗位-课程相似度按比例合并,得出最终的推荐结果\n @return: 针对此课程的推荐列表\n @param job_index: 岗位的索引号\n @param topN: 想接受推荐的课程数量,默认为10\n @return:\n '''\n # 1.1 获取岗位的相关内容\n jobs_data = pd.read_csv('./Jobs_Courses_Datas/jobs_data.csv')\n # jobs_data = jobs_data.dropna().reset_index(drop=True)\n\n # 1.1.1 获取到指定行索引的数据\n job = jobs_data.iloc[job_index]\n job_title = job['job_title']\n job_describe = job['job_responsibility']\n # 把名字和描述都加进去参与匹配,但是名字的权重要再增加一点\n job_total = job_title * 2 + job_describe\n\n # 1.2 获取课程的相关内容\n courses_data = pd.read_csv('./Jobs_Courses_Datas/courses_data.csv')\n # 1.2.1 去掉有NAN的行的数据\n courses_data = courses_data.dropna().reset_index(drop=True)\n\n # 2 构建一个列表存储:课程-岗位相似度,以及课程信息\n recommends = []\n for i in range(len(courses_data)):\n # 获取第i个元素\n c = courses_data.loc[i]\n\n # 2.1 构建一个小字典存储:课程名字,课程描述,课程-岗位相似度\n r_dict = c\n r_dict['Recommended_Course_Name'] = c[0]\n r_dict['Course_Description'] = c[1]\n r_total = r_dict['Recommended_Course_Name'] * 2 + r_dict['Course_Description']\n\n # 2.2 对 job名字,描述 和 课程名字,描述都进行匹配\n cos_similar = calculate_similarity(job_total, r_total)\n r_dict['Job_Course_Similarity'] = cos_similar\n # 2.3 把生成的字典添加到列表中取\n recommends.append(r_dict)\n\n # 3 对获得的列表按相似度为键,从高到低进行排序\n # 3.1 针对 岗位-课程获得到的排序结果,再进行 课程-课程间的相似度检验\n recommends = sorted(recommends, key=lambda z: z['Job_Course_Similarity'], reverse=True)[:(topN * 3)]\n\n # 3.1.2 以 岗位-课程 相似度的前五为基准\n for i in range(5, len(recommends)):\n temp_i = recommends[i]['Recommended_Course_Name'] + recommends[i]['Course_Description']\n sim_list_i = []\n\n # 3.1.3 对于其他的课程,则进行与前五的相似度计算\n for j in range(5):\n temp_j = recommends[j]['Recommended_Course_Name'] + recommends[j]['Course_Description']\n while i != j:\n t = calculate_similarity(temp_i, temp_j)\n sim_list_i.append(t)\n j += 1\n\n # 3.2 某课程与top5的推荐课程的相似度,求个平均数\n sim = np.mean(sim_list_i)\n # 3.3 重新反馈给相似度,并给予一定的权重分配\n recommends[i]['Job_Course_Similarity'] = recommends[i]['Job_Course_Similarity'] * 0.5 + sim * 0.5\n\n # 3.4 再次对推荐列表进行排序\n recommends = sorted(recommends, key=lambda z: z['Job_Course_Similarity'], reverse=True)\n recommends = pd.DataFrame(recommends)\n\n # 4 选取topN 课程: -> 用花式索引来获取,并打印\n print('对于工作:%s,岗位描述信息如下:\\n%s\\n' % (job_title, job_describe))\n recommended_courses_list = recommends[['Recommended_Course_Name', 'Job_Course_Similarity']][:topN]\n print('对于工作:%s, 我们推荐学习的课程清单如下:\\n %s' % (job_title, recommended_courses_list))\n\n return None\n\n\nif __name__=='__main__':\n get_recommended_list(job_index=526, topN=10)\n","sub_path":"Week6_7_8_9/cosine_similarity.py","file_name":"cosine_similarity.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"508814601","text":"\n\ndata = \"123\"\n\n#cach duyet 1\nfor i in data:\n print (i)\n\n#cach duyet 2\nfor i in range(len(data)):\n print(data[i])\n\n\n#\ni = 0\nwhile(i <= 5):\n\tprint(\"count: \", i)\n\ti += 1\nprint(\"Good bye\")\n\na = list(range(5))\nprint(a)\n\n\n#\nfor i in a:\n\tprint(i)\n\n\n#####\nfor letter in 'Python': # traversal of a string sequence\n print ('Current Letter :', letter)\nprint()\n\n\n#####\nfruits = ['banana', 'apple', 'mango']\n\nfor fruit in fruits: # traversal of List sequence\n print ('Current fruit :', fruit)\n\nprint (\"Good bye!\")\n\n#########\nfruits = ['banana', 'apple', 'mango']\nfor index in range(len(fruits)):\n print ('Current fruit :', fruits[index])\n\nprint (\"Good bye!\")\n\n\n\n##############\nnumbers = [11,33,55,39,55,75,37,21,23,41,13]\n\nfor num in numbers:\n if num%2 == 0:\n print ('the list contains an even number')\n break\nelse:\n print ('the list doesnot contain even number')\n\n###############\nimport sys\nfor i in range(1,11):\n for j in range(1,11):\n k = i*j\n print (k, end = '\\t')\n print()\n\n ############\nfor i in 'Python': \n if i == 'h':\n break\n print ('Current Letter :', i)\n \nvar = 10 # Second Example\nwhile var > 0: \n print ('Current variable value :', var)\n var = var -1\n if var == 5:\n break\n\nprint (\"Good bye!\")\n\n####\nnum = int(input('Nhap 1 so bat ky: '))\nnumber = [11,33,55,39,55,75,37,21,23,41,13]\nfor i in number:\n\tif(i == num):\n\t\tprint('So da nhap co trong danh sach')\n\t\tbreak;\nelse:\n\tprint('So da nhap khong co trong danh sach')\n\n#####\nfor letter in 'Python': # First Example\n if letter == 'h':\n continue\n print ('Current Letter :', letter)","sub_path":"Week 1 + 2/Loops.py","file_name":"Loops.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"578975806","text":"def parent_index(i):\r\n\r\n p_index = (i - 1) // 2\r\n return p_index if 0 < i else i\r\n\r\n\r\ndef left_index(i, n):\r\n\r\n l_index = 2 * i + 1\r\n return l_index if l_index < n else i\r\n\r\n\r\ndef right_index(i, n):\r\n\r\n r_index = 2 * i + 2\r\n return r_index if r_index < n else i\r\n\r\n\r\ndef max_heapify_up(A, n, c_index):\r\n\r\n p_index = parent_index(c_index)\r\n\r\n if A[p_index] < A[c_index]:\r\n A[c_index], A[p_index] = A[p_index], A[c_index]\r\n max_heapify_up(A, n, p_index)\r\n\r\n\r\ndef max_heapify_down(A, n, p_index):\r\n\r\n l_index, r_index = left_index(p_index, n), right_index(p_index, n)\r\n c_index = l_index if A[r_index] < A[l_index] else r_index\r\n\r\n if A[p_index] < A[c_index]:\r\n A[c_index], A[p_index] = A[p_index], A[c_index]\r\n max_heapify_down(A, n, c_index)\r\n\r\n\r\ndef set_second_largest(A, k):\r\n\r\n l_index, r_index = left_index(0, len(A)), right_index(0, len(A))\r\n second_max_index = l_index if A[l_index] > A[r_index] else r_index\r\n\r\n if k > A[second_max_index]:\r\n\r\n A[second_max_index] = k\r\n max_heapify_up(A, len(A), second_max_index)\r\n\r\n else:\r\n\r\n A[second_max_index] = k\r\n max_heapify_down(A, len(A), second_max_index)\r\n\r\n return A\r\n\r\n\r\nA = [11, 7, 10, 2, 6, 4, 9]\r\nprint(set_second_largest(A, 0))\r\nprint(set_second_largest(A, 22))\r\n","sub_path":"18-19F/6.006/Problem Set/3/PS3P1C.py","file_name":"PS3P1C.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"34088591","text":"import os\nfrom utils.YamlUtils import YamlReader\n\n\n# 1. 获取项目的基本路径\ncurrent = os.path.dirname(os.path.dirname(__file__))\n\n# 2. conf目录\nconf_path = current + os.sep + 'conf'\nconf_path_yml = conf_path + os.sep + 'conf.yml'\n# print(conf_path)\n\n# 3. caps.yml\nconf_caps = conf_path + os.sep + 'caps.yml'\n# print(conf_caps)\n\n# 4. log目录\nlog_path = current + os.sep + 'logs'\n\n# 5. keywords文件目录\nkeywords_path = conf_path + os.sep + 'keywords.yml'\n\n# 6. data目录\ndata_path = current + os.sep + 'data'\n# 6.1 data测试用例\ntestcase_file = data_path + os.sep + 'data.xls'\n\n# 7. report\nreport_path = current + os.sep + 'report'\n\n# 8.1 通过yamlreader获取data\nconfig = YamlReader(conf_path_yml).data()\n","sub_path":"conf/Conf.py","file_name":"Conf.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"638748831","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\nfrom tensorflow.keras.layers import Dense, Activation, Flatten\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.utils import to_categorical\nimport tensorflow.keras.optimizers as opti\nimport tensorflow as tf\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\nimport numpy as np\n\nmnist = tf.keras.datasets.mnist\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\ntrain_labels = to_categorical(train_labels)\ntest_labels = to_categorical(test_labels)\n\nmodel = Sequential()\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dense(10, activation='softmax'))\n\n# optimus = opti.SGD(learning_rate=0.1, nesterov=True)\n# optimus = opti.RMSprop(learning_rate=0.001, rho=0.9)\n# optimus = opti.Adagrad(learning_rate=0.1)\n# optimus = opti.Adadelta(learning_rate=1.0, rho=0.95)\n# optimus = opti.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)\n# optimus = opti.Adamax(learning_rate=0.002, beta_1=0.9, beta_2=0.999)\noptimus = opti.Nadam(learning_rate=0.01, beta_1=0.9999, beta_2=0.9999)\n\nmodel.compile(optimizer=optimus, loss='categorical_crossentropy',\n metrics=['accuracy'])\nmodel.fit(train_images, train_labels, epochs=5, batch_size=128)\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint('test_loss: ', np.around(test_loss, decimals=5))\nprint('test_acc: {:.2%}'.format(test_acc))\n\ndef load_image(filename):\n img = load_img(filename, color_mode='grayscale', target_size=(28, 28))\n img = img_to_array(img)\n img = img.reshape(1, 28, 28, 1)\n img = img.astype('float32') / 255.0\n return img\n\ndef pred_img(model, filename):\n img = load_image(filename)\n digit = np.argmax(model.predict(img), axis=-1)\n print(digit[0])\n\nwhile(True):\n print('type img name to pred; 0 to exit')\n filename = input()\n if filename != '0':\n pred_img(model, filename)\n else:\n break\n","sub_path":"8383/Kireev/lb/4/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"18973717","text":"import numpy\n\n\nclass WareHouse:\n def __init__(self, x, y, stock):\n self.x = x\n self.y = y\n self.stock = stock\n return\n\n def distance_to(self, target):\n if isinstance(target, WareHouse):\n return numpy.math.sqrt((int(self.x) - int(target.x)) ** 2 + (int(self.y) - int(target.y)) ** 2)\n else:\n raise ValueError('Target was not a warehouse instance')\n","sub_path":"warehouse.py","file_name":"warehouse.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"203127272","text":"import torch\nfrom torch import nn as nn\nimport torch.nn.functional as F\nimport inspect\n\nfrom model_utils import name_to_kwargs\nfrom module.losses import l2_loss\nimport config\n\n\nclass BaseModel(nn.Module):\n\n def __init__(self, **kwargs):\n super().__init__()\n self.reg_tensors = []\n self.reg_values = []\n\n @property\n def model_type(self):\n return name_to_kwargs(self.name)[0]\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def add_activity_loss(self, tensor, l, loss=l2_loss):\n self.reg_tensors.append(tensor)\n self.reg_values.append((loss, l))\n\n def regularize(self, weight=None):\n reg_loss = next(self.parameters()).new_zeros([])\n for t, v in zip(self.reg_tensors, self.reg_values):\n l_func, l_val = v\n reg_loss += l_func(t, l_val, weight=weight)\n self.reg_tensors = []\n self.reg_values = []\n return reg_loss\n\n def constraint(self, **kwargs):\n pass\n\n def predict(self, *args, **kwargs):\n raise NotImplementedError\n\n def predict_topk(self, users, users_items, k, candidates=None, invalid=None, only_new=True, **predict_kwargs):\n if 'users_items' in inspect.signature(self.predict).parameters:\n predict_kwargs['users_items'] = users_items\n if 'users' in inspect.signature(self.predict).parameters:\n predict_kwargs['users'] = users\n predict_kwargs['candidates'] = candidates\n r = self.predict(**predict_kwargs)\n if candidates is None:\n if only_new:\n r = r.scatter_(1, users_items, -float('inf'))\n if invalid is not None:\n r[:, invalid] = -float('inf')\n topk_items = r.topk(k, -1)[1]\n if candidates is not None:\n topk_items = candidates.gather(1, topk_items)\n return topk_items\n\n\nclass RNNBase(BaseModel):\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def predict(self, *args, **kwargs):\n raise NotImplementedError\n\n def init_hidden(self, batch_size):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return (weight.new_zeros(self.n_layers, batch_size, self.n_hidden),\n weight.new_zeros(self.n_layers, batch_size, self.n_hidden))\n else:\n return weight.new_zeros(self.n_layers, batch_size, self.n_hidden)\n","sub_path":"src/model/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"564524040","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom copy import deepcopy\n\nfrom django.test import TestCase\n\nfrom ralph_scrooge.models import (\n Environment,\n OwnershipType,\n ProfitCenter,\n Service,\n)\nfrom ralph_scrooge.plugins.collect.ralph3_service_environment import (\n update_service,\n update_environment,\n)\nfrom ralph_scrooge.tests.utils.factory import (\n ProfitCenterFactory,\n Ralph3OwnerFactory,\n ServiceFactory,\n)\nfrom ralph_scrooge.tests.plugins.collect.samples.ralph3_service_environment import ( # noqa\n SAMPLE_ENVIRONMENTS,\n SAMPLE_SERVICES,\n)\n\n\nclass TestServiceEnvironmentCollectPlugin(TestCase):\n def setUp(self):\n self.data = deepcopy(SAMPLE_SERVICES[0])\n self.default_profit_center = ProfitCenter(pk=1)\n ProfitCenterFactory.reset_sequence()\n self.profit_centers = ProfitCenterFactory.create_batch(2)\n Ralph3OwnerFactory.reset_sequence()\n # Don't create more than 6 owners (see remark in Ralph3UserFactory for\n # explaination).\n self.owners = Ralph3OwnerFactory.create_batch(6)\n\n def _create_and_test_service(self, data):\n \"\"\"\n General method to check if created/updated service match passed data\n \"\"\"\n created, saved_service = update_service(\n data, self.default_profit_center\n )\n\n self.assertEquals(saved_service.name, data['name'])\n self.assertEquals(saved_service.symbol, data['uid'])\n\n # ownership\n self.assertEquals(\n saved_service.serviceownership_set.count(),\n len(data['business_owners']) + len(data['technical_owners'])\n )\n self.assertEquals(\n set(\n saved_service.serviceownership_set.filter(\n type=OwnershipType.business\n ).values_list('owner__user__username', flat=True)\n ),\n set([o['username'] for o in data['business_owners']])\n )\n self.assertEquals(\n set(\n saved_service.serviceownership_set.filter(\n type=OwnershipType.technical\n ).values_list('owner__user__username', flat=True)\n ),\n set([o['username'] for o in data['technical_owners']])\n )\n return created, saved_service\n\n def test_new_service(self):\n \"\"\"\n Basic test for new service\n \"\"\"\n self.assertEquals(Service.objects.count(), 0)\n created, service = self._create_and_test_service(self.data)\n self.assertTrue(created)\n self.assertEquals(Service.objects.count(), 1)\n self.assertIn(service.profit_center, self.profit_centers)\n\n def test_new_service_without_profit_center(self):\n \"\"\"\n Basic test for new service without profit center\n \"\"\"\n self.data['profit_center'] = None\n self.assertEquals(Service.objects.count(), 0)\n created, service = self._create_and_test_service(self.data)\n self.assertTrue(created)\n self.assertEquals(Service.objects.count(), 1)\n self.assertEquals(service.profit_center, self.default_profit_center)\n\n def test_service_update(self):\n \"\"\"\n Check update of service data\n \"\"\"\n created, service = self._create_and_test_service(self.data)\n self.assertTrue(created)\n service = ServiceFactory.build()\n self.data['name'] = service.name\n created, service = self._create_and_test_service(self.data)\n self.assertFalse(created)\n self.assertEquals(Service.objects.count(), 1)\n\n def test_owners_delete(self):\n \"\"\"\n Checks if owners are correctly deleted\n \"\"\"\n created, service = self._create_and_test_service(self.data)\n self.assertTrue(created)\n self.assertEquals(Service.objects.count(), 1)\n # remove one owner from technical and business\n self.data['technical_owners'].pop()\n del self.data['business_owners'][0]\n created, service = self._create_and_test_service(self.data)\n self.assertFalse(created)\n self.assertEquals(Service.objects.count(), 1)\n\n def test_owners_change(self):\n \"\"\"\n Checks if owners are correctly added\n \"\"\"\n created, service = self._create_and_test_service(self.data)\n self.assertTrue(created)\n self.assertEquals(Service.objects.count(), 1)\n # move one owner from technical to business\n self.data['business_owners'].append(\n self.data['technical_owners'].pop()\n )\n # add new technical owner\n self.data['technical_owners'].append({\n 'username': self.owners[5].user.username,\n })\n created, service = self._create_and_test_service(self.data)\n self.assertFalse(created)\n self.assertEquals(Service.objects.count(), 1)\n\n def _compare_environments(self, environment, sample_data):\n self.assertEquals(environment.name, sample_data['name'])\n self.assertEquals(environment.ralph3_id, sample_data['id'])\n\n def test_add_environment(self):\n sample_data = SAMPLE_ENVIRONMENTS[0]\n self.assertTrue(update_environment(sample_data))\n environment = Environment.objects.get(ralph3_id=sample_data['id'])\n self._compare_environments(environment, sample_data)\n\n def test_update_environment(self):\n sample_data = SAMPLE_ENVIRONMENTS[0]\n self.assertTrue(update_environment(sample_data))\n environment = Environment.objects.get(ralph3_id=sample_data['id'])\n self._compare_environments(environment, sample_data)\n\n sample_data2 = SAMPLE_ENVIRONMENTS[1]\n sample_data2['id'] = sample_data['id']\n self.assertFalse(update_environment(sample_data2))\n environment = Environment.objects.get(\n ralph3_id=sample_data2['id']\n )\n self._compare_environments(environment, sample_data2)\n\n # TODO(xor-xor): Consider re-adding 'test_batch_update' test which has been\n # removed due to 'service' and 'environment' plugins being merged into one.\n\n # TODO(xor-xor): What about adding test for\n # SYNC_SERVICES_ONLY_CALCULATED_IN_SCROOGE..?\n","sub_path":"src/ralph_scrooge/tests/plugins/collect/test_ralph3_service_environment.py","file_name":"test_ralph3_service_environment.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"466552483","text":"# Robot Test Script for OSU Mercury Robotics\n#\n# File: robot.py\n# Author: Matthew Spinks\n# \n#\n# Description:\n# A test script that combines elements from my Adafruit servo script with\n# the socket library. I want to try and recieve some a byte array from a\n# socket and use this to set the pwm. \n\nimport math\nimport socket\nimport sys\n\nimport RPi.GPIO as GPIO\nfrom Adafruit_PWM_Servo_Driver import PWM\n\n# Print the packet in it's string form when it's received\nDEBUG = False\n\n# Show the whole packet in an easy to read format. This is only useful for\n# sending one packet. Otherwise, the screen scrolls too fast and you can't\n# read it. This will probably be deleted later.\nPACKET_INFO = False\n\n# ----- Functions --------------------------------------------------------------\n\n\ndef shutdown_motors():\n # Stop the motors. Don't forget active low!\n print(\"Shutting down motors.\")\n GPIO.output(L_DIR_A, GPIO.HIGH)\n GPIO.output(L_DIR_B, GPIO.HIGH)\n GPIO.output(R_DIR_A, GPIO.HIGH)\n GPIO.output(R_DIR_B, GPIO.HIGH)\n pwm_ada.setPWM(l_motor_channel, 0, 0)\n pwm_ada.setPWM(r_motor_channel, 0, 0)\n\n# function to re-map one range of numbers to another. I'm going to use it to\n# map the joystick floating values to duty cycle values. The RPIO library\n# duty cycle arguments are 0 to 100.\ndef remap(x, old_min, old_max, new_min, new_max):\n return ((((x - old_min) * (new_max - new_min)) / (old_max - old_min))\n + new_min)\n\n# A function to compute a servo pulse for the I2C servo driver\n# The timers on the servo driver are 12-bit.\n# The input is given in microseconds (1000-2000us)\ndef setServoPulse(channel, pulse):\n pulseLength = 1000000 / frequency # pulse length in microseconds\n #print(\"%d us per period\" % pulseLength)\n pulseLength /= 4096 # 12 bits of resolution\n #print(\"%d us per bit\" % pulseLength)\n #pulse *= 1000 # convert from ms\n pulse = int(round(pulse / pulseLength))\n\n # for 60Hz\n # 16666.667 us per timer period\n # 16666.667 / 4096 = 4.069\n\n # max servo pulse input value is 2000\n # timer value is \n # 2000 / 4.096 = 491.52\n\n #print(repr(pulse))\n pwm_ada.setPWM(channel, 0, pulse)\n\n\n# ***** Main *******************************************************************\n\n\n# ----- Setup GPIO -------------------------------------------------------------\n\n\n# Use pin numbers written on cobbler on breakout board\nGPIO.setmode(GPIO.BCM)\n\n# The motor driver board uses four direction pins. More on that below.\nL_DIR_A = 18\nL_DIR_B = 23\nR_DIR_A = 24\nR_DIR_B = 25\n\n# In case I want to change the channels on the servo board for whatever reason\nservo_0_channel = 0\nservo_1_channel = 1\nservo_2_channel = 2\nservo_3_channel = 3\n\nl_motor_channel = 14\nr_motor_channel = 15\n\n# Turn off warnings\nGPIO.setwarnings(False)\n\n# Set as outputs\nGPIO.setup(L_DIR_A, GPIO.OUT)\nGPIO.setup(L_DIR_B, GPIO.OUT)\nGPIO.setup(R_DIR_A, GPIO.OUT)\nGPIO.setup(R_DIR_B, GPIO.OUT)\n\n\n# ----- Adafruit Servo Board ---------------------------------------------------\n\n\n# Experiment with Adafruit's 16 channel servo driver\n\n# Initialize driver board.\npwm_ada = PWM(0x40, debug=True)\n\n# Set to 60Hz\nfrequency = 60\npwm_ada.setPWMFreq(frequency)\n\n\n# ----- Initialize Socket -------------------------------------------------------------\n\n\nHOST = '' # Symbolic name meaning all available interfaces\nPORT = 50007 # Arbitrary non-privileged port\n\nbuf = bytearray()\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\nprint(\"Listening on port {p}...\".format(p=PORT))\n\nwhile True:\n s.listen(1)\n client, addr = s.accept()\n print('Connected by', addr)\n client.sendall(b'Hi!') # send something back to the client\n\n\n# ----- Main Loop --------------------------------------------------------------\n\n\n while True:\n try:\n data = client.recv(1024)\n if not data:\n print(\"Listening on port {p}...\".format(p=PORT))\n break\n\n # Show packet received\n if DEBUG: print(repr(data))\n\n\n# ----- Get Direction Bits -------------------------------------------------------------\n\n\n # The first byte I send will probably be the one that contains the direction\n # pins for the motors.\n # 00 brake to ground\n # 01 direction 1\n # 10 direction 2\n # 11 brake to Vcc\n\n # Direction Byte:\n # x x x x 0 0 0 0\n # | | | |\n # La Lb Ra Rb\n\n # Byte arrays are actually ints with values from 0-256. However when I\n # sliced out the part I needed, I think it actually made it a byte. BUT,\n # it wouldn't let me use shift operators, because in order to do bit\n # manipulation you have to use ints. So, I converted it to an int using\n # the ord() function. Don't worry I'm just as confused as you are right\n # now.\n\n # Have a look at this: http://docs.python.org/2/library/functions.html\n\n # Get the first \"byte\"\n direction_byte = ord(data[0:1])\n\n\n# ----- Get Left Motor Speed ---------------------------------------------------\n\n\n # The next two bytes are going to be the left motor speed. I decided to use\n # the full 12 bits for the pwm chip, because Jason wanted more precision.\n # So the values are from 0-4096\n\n # In Python 3.2 there is a very nice \"from_bytes\" function that can do\n # this part. However, smbus does not work in Python 3.2, so I have to \n # do it this way instead:\n\n # Don't leave out the first set of parenthesis. Order of operations is\n # extremely important here!\n l_motor_speed = (ord(data[1:2]) << 8) + ord(data[2:3])\n\n\n# ----- Get Right Motor Speed ---------------------------------------------------\n\n\n # The next two bytes will be the right motor speed. Same as above.\n r_motor_speed = (ord(data[3:4]) << 8) + ord(data[4:5])\n\n\n# ----- Get Servo 0 ------------------------------------------------------------\n\n\n # The next two bytes are the servo speed\n servo_0 = (ord(data[5:6]) << 8) + ord(data[6:7])\n\n\n# ----- Get Servo 1 ------------------------------------------------------------\n\n\n # Servo values should be in microseconds (from 1000 to 2000 us)\n servo_1 = (ord(data[7:8]) << 8) + ord(data[8:9])\n\n\n# ----- Get Servo 1 ------------------------------------------------------------\n\n\n # Servo values should be in microseconds (from 1000 to 2000 us)\n servo_2 = (ord(data[9:10]) << 8) + ord(data[10:11])\n\n\n# ----- Get Servo 1 ------------------------------------------------------------\n\n\n # Servo values should be in microseconds (from 1000 to 2000 us)\n servo_3 = (ord(data[11:12]) << 8) + ord(data[12:13])\n\n\n# ----- Update PWM and GPIO ------------------------------------------------------\n\n # The pins on the RPi are just a bit too weak to drive the motor driver\n # input pins high. It requires 3.25 V, so sometimes it would work and\n # sometimes it wouldn't. I have placed open collector npn transistors on all\n # the outputs to fix this problem. SO NOW THE OUTPUT IS INVERTED.\n\n # Update direction pins\n if not (direction_byte & (1 << 3)):\n GPIO.output(L_DIR_A, GPIO.HIGH)\n #if PACKET_INFO: print('La: High', end='') # no newline character\n else:\n GPIO.output(L_DIR_A, GPIO.LOW)\n #if PACKET_INFO: print('La: Low', end='')\n # Left Motor pin B\n if not (direction_byte & (1 << 2)):\n GPIO.output(L_DIR_B, GPIO.HIGH)\n #if PACKET_INFO: print(' Lb: High')\n else:\n GPIO.output(L_DIR_B, GPIO.LOW)\n #if PACKET_INFO: print(' Lb: Low')\n # Right Motor pin A\n if not (direction_byte & (1 << 1)):\n GPIO.output(R_DIR_A, GPIO.HIGH)\n #if PACKET_INFO: print('Ra: High', end='')\n else:\n GPIO.output(R_DIR_A, GPIO.LOW)\n #if PACKET_INFO: print('Ra: Low', end='')\n # Right Motor pin B\n if not (direction_byte & 1):\n GPIO.output(R_DIR_B, GPIO.HIGH)\n #if PACKET_INFO: print(' Rb: High')\n else:\n GPIO.output(R_DIR_B, GPIO.LOW)\n #if PACKET_INFO: print(' Rb: Low')\n\n if PACKET_INFO:\n print('Left Motor Speed: ', l_motor_speed)\n print('Right Motor Speed:', r_motor_speed)\n print('Servo 0: ', servo_0)\n print('Servo 1: ', servo_1)\n\n # Update pwm values for motors\n # First number is the channel on the I2C board.\n # Second number is the start (on) value. It will almost always be zero\n # Third value is the stop value (off)\n pwm_ada.setPWM(l_motor_channel, 0, l_motor_speed)\n pwm_ada.setPWM(r_motor_channel, 0, r_motor_speed)\n\n # Update servo values.\n # First argument is the channel number on the servo board\n setServoPulse(servo_0_channel, servo_0)\n setServoPulse(servo_1_channel, servo_1)\n setServoPulse(servo_2_channel, servo_2)\n setServoPulse(servo_3_channel, servo_3)\n\n client.sendall(b'Hi')\n except socket.error as msg:\n print('Socket Error!')\n print(msg)\n shutdown_motors()\n\n except KeyboardInterrupt:\n print('Stopping')\n shutdown_motors()\n sys.exit()\n\n# ----- End of loop ------------------------------------------------------------\n\nGPIO.cleanup() # Shutdown all PWM and DMA activity\nclient.close() # Close socket\n\n\n\n","sub_path":"python/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":9593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"369401676","text":"import logging\nimport os\nimport re\nfrom logging.config import dictConfig\n\nfrom flask import Flask\n\nfrom train_schedule.db import create_engine\nfrom train_schedule.handlers.train_schedule import schedule\nfrom train_schedule.config import Config\n\n\ndef setup_logging(path, logfilename):\n try:\n logging.config.fileConfig(\n os.path.join(path, \"logging.conf\"),\n defaults={\"logfilename\": os.path.join(path, logfilename)}\n )\n except FileNotFoundError as e:\n logging.warning(e)\n logging.warning(\"Error in logging configuration. Using default\")\n logging.basicConfig(level=logging.INFO)\n\n\ndef create_app(database=None):\n app = Flask(__name__)\n app.config.from_object(Config)\n if database:\n print(database)\n app.db = create_engine(database)\n else:\n app.db = create_engine(app.config[\"DATABASE_URL\"])\n app.register_blueprint(schedule, url_prefix=\"/api\")\n setup_logging(app.config[\"BASEDIR\"], app.config[\"LOG\"])\n return app\n\n\nif __name__ == \"__main__\":\n app = create_app()\n logger = logging.getLogger(\"schedule\")\n logger.info(\n \"DATABASE_URL %s\",\n re.sub(r\"(\\/\\/).*@\", \"//user:password@\", app.config[\"DATABASE_URL\"]),\n )\n logger.info(\"Running on {}:{}\".format(app.config[\"HOST\"], app.config[\"PORT\"]))\n app.run(host=app.config[\"HOST\"], port=app.config[\"PORT\"])\n","sub_path":"train_schedule/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"369768105","text":"#build max heap\n\nfrom heapify import max_heapify\n\ndef build_heap(v, n):\n i=n//2-1\n while i>= 0:\n max_heapify(v, i, n)\n i-=1\n\n\nv = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7]\nprint('The original list is:')\nprint(v)\nprint('The list after building heap is:')\nn=len(v)\nbuild_heap(v, n)\nprint(v)","sub_path":"Python/Data Structures/Heaps/build_heap.py","file_name":"build_heap.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"179432806","text":"from flask import Flask, Blueprint, render_template, url_for, request, redirect, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_heroku import Heroku\nfrom sqlalchemy import func\n\nfrom forms import ArticleForm, EditArticleForm, RemoveArticleForm\n\nimport sys\nfrom copy import copy\nimport json\nimport datetime\n\napp = Flask(__name__, static_url_path=\"/static\")\napp.secret_key = \"lkj()984kljl;:LKJF?.a form.id.data).order_by(Article.id.asc())\n for item in decrements:\n item.id -= 1\n db.session.commit()\n except Exception as e:\n print(\"\\n FAILED entry: {}\\n\".format(json.dumps(form.data)))\n print(e)\n sys.stdout.flush()\n return redirect(url_for(\"manager.errors\", errors=\"\\n FAILED entry: {}\\n\".format(json.dumps(form.data))))\n return redirect(url_for(\"manager.success\"))\n return render_template(\"/remove_article.html\", form=form)\n\n@manager.route(\"/success/\", methods=[\"GET\"])\ndef success():\n return render_template(\"/success.html\")\n\n@manager.route(\"/errors/\", methods=[\"GET\"])\ndef errors():\n problems = request.args.get(\"errors\")\n return render_template(\"/errors.html\", errors=problems)\n\napp.register_blueprint(manager)\n\n#------------------------------------------------------------------------------#\n#Data fetching\nfetcher = Blueprint(\"fetcher\", __name__, template_folder='templates')\n\n@fetcher.route(\"/view_articles/\")\ndef view_articles():\n data = Article.query.order_by(Article.id).all()\n return render_template(\"view_articles.html\", articles=data)\n\n# routes to get shit\n@fetcher.route(\"/get_article//\", methods=[\"GET\"])\ndef get_article_data(id):\n result = {\n \"status\": None,\n \"data\": None\n }\n try:\n num = float(id)\n article = Article.query.get(id)\n result[\"status\"] = \"SUCCESS\"\n result[\"data\"] = {\n \"id\": article.id,\n \"title\": article.title,\n \"date\": article.date.strftime(\"%B %d, %Y\"),\n \"author\": article.author,\n \"image\": article.image,\n \"caption\": article.caption,\n \"location\": article.location,\n \"article\": article.article,\n \"category\": article.category,\n \"scope\": article.scope\n }\n return jsonify(result)\n except Exception as e:\n result[\"status\"] = \"ERROR: can't get article data for id \" + id\n result[\"data\"] = repr(e)\n return jsonify(result)\n\ndef get_articles_data(type, value):\n articles = None\n if type == \"id\":\n articles = Article.query.order_by(Article.id.desc()).all()\n elif type == \"time\":\n today = datetime.date.today()\n if value == \"weekly\":\n cutoff = today - datetime.timedelta(days=7)\n articles = Article.query.filter(Article.date >= str(cutoff)).order_by(Article.date.desc()).all()\n elif value == \"monthly\":\n cutoff = today.replace(day=1)\n articles = Article.query.filter(Article.date >= str(cutoff)).order_by(Article.date.desc()).all()\n elif value == \"yearly\":\n cutoff = today - datetime.timedelta(days=365)\n articles = Article.query.filter(Article.date >= str(cutoff)).order_by(Article.date.desc()).all()\n else:\n articles = Article.query.order_by(Article.date.desc()).all()\n elif type == \"author\":\n articles = Article.query.filter(func.lower(Article.author) == value).order_by(Article.id.desc()).all()\n elif type == \"category\":\n articles = Article.query.filter(func.lower(Article.category) == value).order_by(Article.id.desc()).all()\n elif type == \"scope\":\n articles = Article.query.filter(func.lower(Article.scope) == value).order_by(Article.id.desc()).all()\n data = []\n for article in articles:\n itemdata = {\n \"id\": article.id,\n \"title\": article.title,\n \"date\": article.date.strftime(\"%B %d, %Y\"),\n \"author\": article.author,\n \"image\": article.image,\n \"caption\": article.caption,\n \"location\": article.location,\n \"article\": article.article,\n \"category\": article.category,\n \"scope\": article.scope\n }\n data.append(itemdata)\n return data\n\n# route to get articles\n@fetcher.route(\"/get_articles///\", methods=[\"GET\"])\ndef get_articles(type, value):\n #some standardization to input\n type = type.lower()\n value = value.lower()\n result = {\n \"status\": None,\n \"data\": None\n }\n if type not in [\"time\", \"author\", \"category\", \"scope\", \"id\"]:\n result[\"status\"] = \"ERROR: '\" + type + \"' is not a valid query type\"\n result[\"data\"] = None\n return jsonify(result)\n try:\n result[\"data\"] = get_articles_data(type, value)\n result[\"status\"] = \"SUCCESS\"\n return jsonify(result)\n except Exception as e:\n result[\"status\"] = \"ERROR: can't get articles of type '\" + type + \"' and value '\" + value + \"'\"\n result[\"data\"] = repr(e)\n return jsonify(result)\n\ndef get_options_data(type):\n options = []\n if type == \"time\":\n options = [\"Weekly\", \"Monthly\", \"Yearly\", \"All Time\"]\n elif type == \"author\":\n for option in Article.query.distinct(Article.author):\n options.append(option.author)\n elif type == \"category\":\n for option in Article.query.distinct(Article.category):\n options.append(option.category)\n elif type == \"scope\":\n for option in Article.query.distinct(Article.scope):\n options.append(option.scope)\n return options\n\n#route to get options for a browse search\n@fetcher.route(\"/get_options//\", methods=[\"GET\"])\ndef get_options(type):\n result = {\n \"status\": None,\n \"data\": None\n }\n if type not in [\"time\", \"author\", \"category\", \"scope\"]:\n result[\"status\"] = \"ERROR: '\" + type + \"' is not a valid option type\"\n result[\"data\"] = None\n return result\n try:\n options = get_options_data(type)\n result[\"status\"] = \"SUCCESS\"\n result[\"data\"] = options\n return jsonify(result)\n except Exception as e:\n result[\"status\"] = \"ERROR: can't get options of type '\" + type + \"'\"\n result[\"data\"] = repr(e)\n return jsonify(result)\n\ndef get_ordered_titles_data():\n data = []\n for article in Article.query.order_by(Article.title).all():\n itemdata = {\n \"id\": article.id,\n \"title\": article.title,\n \"date\": article.date.strftime(\"%B %d, %Y\"),\n \"author\": article.author,\n \"image\": article.image,\n \"caption\": article.caption,\n \"location\": article.location,\n \"article\": article.article,\n \"category\": article.category,\n \"scope\": article.scope\n }\n data.append(itemdata)\n return data\n\n#route to get articles sorted by title\n@fetcher.route(\"/get_ordered_titles/\", methods=[\"GET\"])\ndef get_ordered_titles():\n result = {\n \"status\": None,\n \"data\": None\n }\n try:\n result[\"data\"] = get_ordered_titles_data()\n result[\"status\"] = \"SUCCESS\"\n return jsonify(result)\n except Exception as e:\n result[\"status\"] = \"ERROR: can't get articles sorted by title\"\n result[\"data\"] = repr(e)\n return jsonify(result)\n\n#the \"clutch\" call\n@fetcher.route(\"/load_app/\", methods=[\"GET\"])\ndef load_app():\n result = {\n \"status\": None,\n \"data\": None\n }\n try:\n result[\"data\"] = {\n \"articles\": [],\n \"titles\": [],\n \"options\": {}\n }\n result[\"data\"][\"articles\"] = get_articles_data(\"id\", \"null\")\n result[\"data\"][\"titles\"] = get_ordered_titles_data()\n for option in [\"time\", \"author\", \"category\", \"scope\"]:\n result[\"data\"][\"options\"][option] = get_options_data(option)\n result[\"status\"] = \"SUCCESS\"\n return jsonify(result)\n except Exception as e:\n result[\"status\"] = \"ERROR: can't load app\"\n result[\"data\"] = repr(e)\n return jsonify(result)\n\napp.register_blueprint(fetcher)\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"17176503","text":"import random\r\n\r\n\r\n\r\ndef run():\r\n numero_aleatorio = random.randint(1, 100)\r\n numero_elegido = int(input(\"Elige un numero del 1 al 100: \"))\r\n while numero_aleatorio != numero_elegido:\r\n if numero_aleatorio < numero_elegido:\r\n print(\"Busca un numero mas pequeño\")\r\n else:\r\n print(\"Busca un numero mas grande\")\r\n numero_elegido = int(input(\"Elige otro numero: \"))\r\n print(\"¡ganaste!\")\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run()","sub_path":"introduccion python/adivina_el_numero.py","file_name":"adivina_el_numero.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"45589114","text":"from typing import List\nfrom argparse import Namespace\nfrom gooey import GooeyParser\n\nimport numpy as np\n\nfrom model.Mask_RCNN.mrcnn.config import Config\n# from .fix_validator import fix_validator\n\n\ndef image_preprocess_parser(\n parser: GooeyParser = GooeyParser(),\n title='Image Preprocess',\n config: Config = Config(),\n ) -> GooeyParser:\n\n image_preprocess_parser = parser.add_argument_group(title=title)\n\n image_resize_help = \\\n \"none: No resizing or padding. Return the image unchanged.\\n\"\\\n \"square: Resize and pad with zeros to get a square image of size\"\\\n \"[max_dim, max_dim].\\n\"\\\n \"pad64: Pads width and height with zeros to make them multiples\"\\\n \"of 64.\"\\\n \"If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, \"\\\n \"then it scales up before padding. \"\\\n \"IMAGE_MAX_DIM is ignored in this mode. \"\\\n \"The multiple of 64 is needed to ensure smooth scaling of \"\\\n \"feature maps up and down the 6 levels of the FPN pyramid \"\\\n \"(2**6=64).\\n\"\\\n \"crop: Picks random crops from the image. \"\\\n \"First, scales the image based on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, \"\\\n \"then picks a random crop of size IMAGE_MIN_DIM x IMAGE_MIN_DIM. \"\\\n \"Can be used in training only. \"\\\n \"IMAGE_MAX_DIM is not used in this mode.\"\n image_preprocess_parser.add_argument(\n '--image_resize_mode',\n metavar=\"Image Resize Mode\",\n help=image_resize_help,\n choices=['none', 'square', 'pad64', 'crop'],\n default=config.IMAGE_RESIZE_MODE,\n )\n\n image_preprocess_parser.add_argument(\n '--image_min_dim', type=int,\n metavar=\"Image minimum dimension\",\n default=config.IMAGE_MIN_DIM,\n )\n\n image_preprocess_parser.add_argument(\n '--image_max_dim', type=int,\n metavar=\"Image maximum dimension\",\n help=\"Ignored in pad64 mode\",\n default=config.IMAGE_MAX_DIM,\n )\n\n min_scale_help = \\\n \"Minimum scaling ratio.\\n \"\\\n \"Checked after MIN_IMAGE_DIM and can force further up scaling.\\n\"\\\n \"For example, if set to 2 then images are scaled up to double \"\\\n \"the width and height, or more, even if \"\\\n \"MIN_IMAGE_DIM doesn't require it.\\n\"\\\n \"However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM.\"\n image_preprocess_parser.add_argument(\n '--image_min_scale', type=float,\n metavar=\"Image min scale\",\n help=min_scale_help,\n default=str(config.IMAGE_MIN_SCALE),\n )\n\n image_preprocess_parser.add_argument(\n '--mean_pixel', type=eval,\n metavar=\"Image Mean (RGB)\",\n default=config.MEAN_PIXEL.tolist(),\n )\n return parser\n\n\ndef image_preprocess(args: Namespace) -> Config:\n class GeneratorConfig(Config):\n IMAGE_RESIZE_MODE = args.image_resize_mode\n\n IMAGE_MIN_DIM = args.image_min_dim\n IMAGE_MAX_DIM = args.image_max_dim\n IMAGE_MIN_SCALE = args.image_min_scale\n\n MEAN_PIXEL = np.array(args.mean_pixel)\n\n return GeneratorConfig()\n","sub_path":"generator/image_preprocess/image_preprocess_mask_rcnn.py","file_name":"image_preprocess_mask_rcnn.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"124220426","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time\t\t: 2018/1/14 13:17\n# @Author\t: chang.feng\n# @File\t\t: merge_pdf_files.py\n\nimport os\nimport glob\n\nimport PyPDF2\n\n\n'''思路:\n1、找到工作目录下所有的pdf文档\n2、为了有序添加pdf文件,按文件名进行排序\n3、除了第一页,将每个pdf的所有页都添加到PdfFileMerger对象中\n4、将PdfFileMerger对象存盘\n'''\n\n\ndef get_all_pdf_files(pdf_dirs):\n all_pdfs = glob.glob(\"{path}/*.pdf\".format(path=pdf_dirs))\n all_pdfs.sort(key=str.lower)\n return all_pdfs\n\n\ndef main():\n pdf_dirs = r'd:\\pdfs\\test_merge'\n all_pdfs = get_all_pdf_files(pdf_dirs)\n\n if not all_pdfs:\n raise SystemExit('No pdf file found.')\n\n merger = PyPDF2.PdfFileMerger()\n with open(all_pdfs[0], 'rb') as first_obj:\n merger.append(first_obj)\n\n for pdf in all_pdfs[1:]:\n with open(pdf, 'rb') as obj:\n reader = PyPDF2.PdfFileReader(obj)\n # 从第一页开始,不合并第一页(封面)\n merger.append(fileobj=obj, pages=(1, reader.getNumPages()))\n\n with open('mergerd-pdfs.pdf', 'wb') as f:\n merger.write(f)\n\n\nif __name__ == '__main__':\n main()","sub_path":"PFLSAAO/chapter_7_文档与报告/src/merge_pdf_files.py","file_name":"merge_pdf_files.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"225224194","text":"import requests\n\n_api_key = 'c7c02e97f617c43fe537a11c49b9b9da'\ndef get_names(url):\n result = requests.get(url)\n if result.status_code == 200:\n return result.json()\n else:\n print('Something wrong')\n\nif __name__ == \"__main__\":\n data = get_names(\"http://api.data.mos.ru/v1/datasets/2009/rows?api_key=%s\" % _api_key)\n print(data)\n\n with open('names.txt', 'w', newline='', encoding='utf-8') as f:\n for user in data:\n f.write(str(user['Cells']) + '\\n')","sub_path":"names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"285538657","text":"import random\n\n\ndef get_random_photo(seed=None):\n url = \"https://picsum.photos/400/400?random=\"\n if seed == None or type(seed) is not int:\n url = url + random.randint(0, 31337)\n else:\n url = url + str(seed)\n return url","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"534587073","text":"########################################################################################################\n# Define the a SpyNet like model\n########################################################################################################\n\nimport numpy as np\nimport torch, torch.nn as nn, torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.nn.functional import grid_sample # in (N x C x IH x IW), out (N x OH x OW x 2)\n\n\n\nclass DownConv(nn.Module):\n def __init__(self, k=5, f=nn.ReLU()):\n super(DownConv, self).__init__()\n p = (k-1) // 2\n\n# some custom thing eric did\nclass DConv(nn.Module):\n def __init__(self, infm, outfm, k, padding, dilation=1, groups=1, f=nn.ReLU(inplace=True)):\n assert infm == outfm\n\n super(DConv, self).__init__()\n self.f = f\n self.conv = nn.Conv2d(infm, outfm, k, padding=padding, groups=groups, dilation=dilation)\n weights = torch.zeros((outfm, infm, k, k)).normal_(0, 0.01)\n for i in range(infm):\n weights[i,i,k//2,k//2] = 1\n self.conv.weight = nn.Parameter(weights)\n self.conv.bias.data /= 10\n\n def forward(self, x):\n return self.f(self.conv(x))\n\n# dilation level\nclass DG(nn.Module):\n def __init__(self, k=5, f=nn.ReLU(), t=1): # t = ntargets\n super(DG, self).__init__()\n print('building DG wtih %dx%d kernels and %d targets' % (k, k, t))\n p = (k-1) // 2; d = (k+1) // 2\n self.f = f\n fm = 32 * (t+1)\n self.conv1 = nn.Conv2d(t+1, fm, k, padding=p, groups=t+1)\n self.conv2 = nn.Conv2d(fm, fm, k, padding=p)\n self.conv3 = DConv(fm, fm, k, padding=p*d, dilation=d)\n self.conv4 = DConv(fm, fm, k, padding=p*d*2, dilation=d*2)\n self.conv5 = DConv(fm, fm, k, padding=p*d*4, dilation=d*4)\n self.conv6 = DConv(fm, fm, k, padding=p*d*8, dilation=d*8)\n self.conv7 = nn.Conv2d(fm, 16, 3, padding=1)\n self.conv8 = nn.Conv2d(16, 2, 3, padding=1)\n self.conv8.weight.data /= 10\n self.conv8.bias.data /= 10\n\n def forward(self, x):\n out = self.f(self.conv1(x))\n out = self.f(self.conv2(out))\n out = self.f(self.conv3(out))\n out = self.f(self.conv4(out))\n out = self.f(self.conv5(out))\n out = self.f(self.conv6(out))\n out = self.f(self.conv7(out))\n out = self.f(self.conv8(out))\n return out.permute(0,2,3,1)\n\n\n# pyramid \nclass Pyramid(nn.Module):\n def get_identity_grid(self, dim):\n gx, gy = np.linspace(-1, 1, dim), np.linspace(-1, 1, dim)\n I = np.stack(np.meshgrid(gx, gy)) # (2, dim, dim)\n I = np.expand_dims(I, 0) # (1, 2, dim, dim)\n I = Variable(torch.Tensor(I), requires_grad=False).cuda()\n I = I.permute(0,2,3,1) # (1, dim, dim, 2)\n return I\n\n def __init__(self, nlevels):\n super(Pyramid, self).__init__()\n print('--- Building PyramidNet with %d levels' % nlevels)\n self.nlevels = nlevels\n\n #self.mlist = nn.ModuleList([G() for level in xrange(nlevels)])\n self.mlist = nn.ModuleList([DG() for level in xrange(nlevels)])\n\n self.f_up = lambda x: nn.Upsample(scale_factor=x, mode='bilinear')\n self.up = self.f_up(2)\n self.down = nn.AvgPool2d(2, 2)\n\n self.I_initialized = False\n\n def forward(self, stack, idx, lastlevel): # stack: B x 2 x _ x _\n if not self.I_initialized: # I do this here so we don't have to specify dim\n _, _, w, _ = stack.size()\n self.I = self.get_identity_grid(w / 2**self.nlevels)\n self.I_initialized = True\n\n # top level: return identity\n if idx == self.nlevels:\n I = self.I.repeat(stack.size()[0], 1, 1, 1) # B x 2 x _ x _\n return I, [ I ]\n # non-top level: run levels above\n frame, target = stack[:,0:1,:,:], stack[:,1:2,:,:]\n field_so_far, residuals_so_far = self.forward(self.down(stack), idx+1, lastlevel) # B x _ x _ x 2\n field_so_far = self.up(field_so_far.permute(0,3,1,2)).permute(0,2,3,1) # B x _ x _ x 2\n\n # included level: do work\n if idx >= lastlevel:\n updated_frame = grid_sample(frame, field_so_far) \n new_stack = torch.cat((updated_frame, target), 1)\n residual = self.mlist[idx](new_stack) # B x W x W x 2\n # excluded level: pass it on\n else:\n residual = Variable(torch.zeros(field_so_far.size()), requires_grad=False).cuda().detach()\n\n residuals_so_far.insert(0, residual)\n return residual + field_so_far, residuals_so_far\n\n\n# wrapper \nclass PyramidTransformer(nn.Module):\n def __init__(self, nlevels=5):\n super(PyramidTransformer, self).__init__()\n self.pyramid = Pyramid(nlevels)\n\n def forward(self, x, lastlevel=2):\n field, residuals = self.pyramid.forward(x, idx=0, lastlevel=lastlevel)\n pred = grid_sample(x[:,0:1,:,:], field).squeeze() # sample frame with field\n return pred, field, residuals\n\n def select_module(self, idx):\n for g in self.pyramid.mlist:\n g.requires_grad = False\n self.pyramid.mlist[idx].requires_grad = True\n\n def select_all(self):\n for g in self.pyramid.mlist:\n g.requires_grad = True\n \n \n","sub_path":"main/models/irrelevant/embedding_pyramid.py","file_name":"embedding_pyramid.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"96788277","text":"\"\"\"subClient URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom priClient import views\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls), # the default admin page\n url(r'^home/$', views.home), # the home page, display some basic\n url(r'^$', views.home), # the home page, display some basic\n url(r'^settings/$', views.show_settings), # in this page, change the settings\n url(r'^change_settings/$', views.change_settings), # in this page, change the settings\n url(r'^users/(\\d+)/$', views.show_users), # in this page, change the settings\n url(r'^users/$', views.show_users), # in this page, change the settings\n url(r'^sendHeart/$', views.send_heart), # in this page, change the settings\n url(r'^iviaddress/$', views.ivi_address), # in this page, change the settings\n url(r'^change_ivi/$', views.change_ivi), # in this page, change the settings\n # the auto refresh page for priClient itself\n url(r'^auto_refresh/$', views.refresh_client_info),\n # the heart beat page for subClient\n url(r'^heart/$', views.receive_heart_beat),\n url(r'^prefix/$', views.receive_prefix_request),\n url(r'^info/$', views.about),\n]\n","sub_path":"priClient/priClient/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"272310399","text":"\n\nfrom xai.brain.wordbase.nouns._trope import _TROPE\n\n#calss header\nclass _TROPES(_TROPE, ):\n\tdef __init__(self,): \n\t\t_TROPE.__init__(self)\n\t\tself.name = \"TROPES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"trope\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_tropes.py","file_name":"_tropes.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"136666302","text":"# Example of displaying a Streptococcal protein and showing peptide coverage\nimport pymol\nimport pyopenms\nprint(\"Starting pymol\")\npymol.finish_launching()\npepxml_file = \"data/4D8B.pepXML\"\n\ndef get_peptides_protein_seq():\n\n # Read in PepXML\n protein_ids = []\n peptide_ids = []\n pyopenms.PepXMLFile().load(pepxml_file, protein_ids, peptide_ids)\n peptides = [pid.getHits()[0].getSequence().toString() for pid in peptide_ids]\n\n # Sequence could be from FASTA file (or just provided here)\n sequence = \"\".join(\"\"\"\n MNKKKLGIRLLSLLALGGFVLANPVFADQNFARNEKEAKDSAITFIQKSAAIKAGARSAE\n DIKLDKVNLGGELSGSNMYVYNISTGGFVIVSGDKRSPEILGYSTSGSFDANGKENIASF\n MESYVEQIKENKKLDTTYAGTAEIKQPVVKSLLNSKGIHYNQGNPYNLLTPVIEKVKPGE\n QSFVGQHAATGCVATATAQIMKYHNYPNKGLKDYTYTLSSNNPYFNHPKNLFAAISTRQY\n NWNNILPTYSGRESNVQKMAISELMADVGISVDMDYGPSSGSAGSSRVQRALKENFGYNQ\n SVHQINRSDFSKQDWEAQIDKELSQNQPVYYQGVGKVGGHAFVIDGADGRNFYHVNWGWG\n GVSDGFFRLDALNPSALGTGGGAGGFNGYQSAVVGIKP\n \"\"\".split())\n return peptides, sequence\n\nprint(\"Plotting Protein 4D8B\")\npymol.cmd.window(\"show\")\npymol.cmd.fetch(\"4D8B\")\npymol.cmd.hide(\"everything\")\npymol.cmd.show(\"cartoon\")\n\npeptides, protein_sequence = get_peptides_protein_seq()\n# select which residues to color (set of peptides, on protein sequence)\nres_str = \"\"\nfor peptide in peptides:\n found_at = protein_sequence.find(peptide)\n res_str += \" res %s-%s\" % (found_at, found_at + len(peptide))\n\nprint(\"Superimposing MS-identified peptides .. please wait\")\npymol.cmd.color(\"bluewhite\")\npymol.cmd.bg_color(\"white\")\npymol.cmd.color(\"wheat\", res_str)\n\n# Do raytracing \npymol.cmd.set(\"orthoscopic\")\npymol.cmd.set(\"ray_trace_mode\", 1)\npymol.cmd.ray(1920,1440)\n\nprint(\"Printing output image\")\npymol.cmd.png(\"4D8B_coverage_white_5_ray_ortho_mode1.png\", dpi=600)\n\n","sub_path":"src/examples/pymol_example.py","file_name":"pymol_example.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"458547575","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.optimizers import SGD, adam, RMSprop\r\nfrom keras.layers import Dense, Dropout, Activation, Flatten\r\n\r\nfrom sklearn import datasets\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.neural_network import MLPClassifier\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import precision_score\r\nfrom sklearn.metrics import recall_score\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import roc_curve, auc\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\nans1 = open(\"sentence_train_ans.txt\", 'r')\r\nans1Handled = open(\"sentence_train_ans_handled.txt\", 'w')\r\nans2 = open(\"sentence_test_ans.txt\", 'r')\r\nans2Handled = open(\"sentence_test_ans_handled.txt\", 'w')\r\nwhile True:\r\n line = ans1.readline()\r\n if not line:\r\n break;\r\n if \"c\" in line:\r\n ans1Handled.write(\"1\\n\")\r\n else:\r\n ans1Handled.write(\"0\\n\")\r\nwhile True:\r\n line = ans2.readline()\r\n if not line:\r\n break;\r\n if \"c\" in line:\r\n ans2Handled.write(\"1\\n\")\r\n else:\r\n ans2Handled.write(\"0\\n\")\r\nans1.close()\r\nans2.close()\r\nans1Handled.close()\r\nans2Handled.close()\r\n\r\ntrainX = np.loadtxt(\"all_sentence_results_train.txt\")\r\ntrainX = trainX[:,1:]\r\ntrainY = np.loadtxt(\"sentence_train_ans_handled.txt\")\r\n\r\n#print(trainX)\r\n#print(trainY)\r\n\r\nlogRes = LogisticRegression()\r\nlogRes.fit(trainX, trainY)\r\n\r\ntestX = np.loadtxt(\"all_sentence_results_test.txt\")\r\ntestX = testX[:,1:]\r\ntestY = np.loadtxt(\"sentence_test_ans_handled.txt\")\r\n\r\n#random result\r\nimport random\r\n\r\nprint(\"*****results for Random Baseline*****\")\r\ntag = [0, 1]\r\nrandom_predict = []\r\nfor t_line in testX:\r\n ans = random.choice(tag)\r\n random_predict.append(ans)\r\npredict_all = np.array(random_predict)\r\n\r\nall_accuracy = accuracy_score(testY, predict_all)\r\nall_precision = precision_score(testY, predict_all, average=None)\r\nall_recall = recall_score(testY, predict_all, average=None)\r\nall_f1 = f1_score(testY, predict_all, average=None)\r\n\r\nprint(\"Accuracy: %0.9f\" % all_accuracy)\r\n\r\nprint(\"Fact Precision: %0.9f\" % all_precision[0])\r\nprint(\"Fact Recall: %0.9f\" % all_recall[0])\r\nprint(\"Fact F1: %0.9f\" % all_f1[0])\r\n\r\nprint(\"Question Precision: %0.9f\" % all_precision[1])\r\nprint(\"Question Recall: %0.9f\" % all_recall[1])\r\nprint(\"Question F1: %0.9f\" % all_f1[1])\r\n\r\nprint('\\nTesting ------------')\r\ntestRes = logRes.predict(testX)\r\nprint(\"accuracy: \" + str(accuracy_score(testY, testRes)))\r\nprint(\"precision: \" + str(precision_score(testY, testRes)))\r\nprint(\"recall: \" + str(recall_score(testY, testRes)))\r\nprint(\"f1: \" + str(f1_score(testY, testRes)))\r\n'''\r\ncount = 0\r\nLength = len(testY)\r\nfor i in range(Length):\r\n if logRes.predict(testX)[i] != testY[i]: # 预测测试样本\r\n count += 1\r\n print(testY[i])\r\n\r\nprint(count)\r\n'''\r\n'''\r\nscores = np.zeros(1024)\r\nswitchs = np.zeros((1024, 10))\r\nfor i in range(1024):\r\n icopy = i\r\n switch = np.zeros((10,))\r\n for j in range(10):\r\n (icopy, switch[j]) = np.divmod(icopy,2)\r\n length = 0\r\n newTrainX = np.ones((len(trainY), 1))\r\n newTestX = np.ones((len(testY), 1))\r\n for j in range(10):\r\n if switch[j] == 1:\r\n length = length + 1\r\n newTrainX = np.c_[newTrainX, trainX[:,j]]\r\n newTestX = np.c_[newTestX, testX[:,j]]\r\n\r\n logRes = LogisticRegression()\r\n #logRes.fit(newTrainX, trainY)\r\n #testRes = logRes.predict(newTestX)\r\n #score = precision_score(testY, testRes)\r\n scores[i] = np.mean(cross_val_score(logRes, newTrainX, trainY, cv=10, scoring='f1_macro'))\r\n switchs[i] = switch\r\n #print(switch)\r\n #print(newTrainX.shape[1])\r\n #print(score)\r\n\r\nprint(\"best cases\")\r\nmaxposi = scores.argsort()\r\nfor i in range(10):\r\n print(switchs[maxposi[1023-i]])\r\n print(scores[maxposi[1023-i]])\r\n'''\r\nprint(\"sentence level final: \")\r\nsswitchs = np.ones((10,10))\r\nsswitchs[0] = [1, 1, 1, 0, 1, 1, 1, 0, 1, 0]\r\nsswitchs[1] = [1, 1, 1, 0, 1, 1, 1, 1, 1, 1]\r\nsswitchs[2] = [1, 1, 1, 1, 1, 1, 1, 0, 1, 0]\r\nsswitchs[3] = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]\r\nsswitchs[4] = [1, 1, 1, 0, 1, 1, 1, 0, 1, 1]\r\nsswitchs[5] = [1, 1, 1, 0, 1, 1, 1, 1, 0, 0]\r\nsswitchs[6] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\r\nsswitchs[7] = [1, 1, 1, 0, 1, 1, 1, 1, 1, 0]\r\nsswitchs[8] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 0]\r\nsswitchs[9] = [1, 1, 0, 0, 1, 1, 1, 1, 1, 1]\r\ntswitchs = np.ones((11,10))\r\ntswitchs[0] = [1, 1, 0, 1, 1, 0, 0, 0, 1, 0]\r\ntswitchs[1] = [1, 1, 0, 1, 1, 0, 0, 0, 1, 1]\r\ntswitchs[2] = [1, 1, 1, 0, 0, 0, 1, 0, 0, 1]\r\ntswitchs[3] = [1, 1, 0, 0, 0, 1, 1, 0, 0, 1]\r\ntswitchs[4] = [1, 1, 0, 0, 0, 0, 1, 0, 0, 1]\r\ntswitchs[5] = [1, 1, 1, 0, 0, 0, 1, 0, 0, 0]\r\ntswitchs[6] = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]\r\ntswitchs[7] = [1, 1, 0, 0, 0, 0, 1, 0, 1, 1]\r\ntswitchs[8] = [1, 1, 1, 0, 0, 0, 1, 0, 1, 1]\r\ntswitchs[9] = [1, 1, 1, 1, 0, 1, 1, 1, 0, 0]\r\ntswitchs[10]= [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\r\n\r\nfor i in range(10):\r\n print(str(i)+\"'th best f1 result: \")\r\n newTrainX = np.ones((len(trainY), 1))\r\n newTestX = np.ones((len(testY), 1))\r\n for j in range(10):\r\n # tswitch should be changed to sswitch when change to sen level\r\n if sswitchs[i][j] == 1:\r\n newTrainX = np.c_[newTrainX, trainX[:, j]]\r\n newTestX = np.c_[newTestX, testX[:, j]]\r\n logRes = LogisticRegression(solver=\"lbfgs\", max_iter=100000)\r\n logRes.fit(newTrainX, trainY)\r\n testRes = logRes.predict(newTestX)\r\n print(logRes.coef_)\r\n print(\"accuracy: \" + str(accuracy_score(testY, testRes)))\r\n print(\"precision: \" + str(precision_score(testY, testRes)))\r\n print(\"recall: \" + str(recall_score(testY, testRes)))\r\n print(\"f1: \" + str(f1_score(testY, testRes))+\"\\n\")\r\n '''\r\n for j in range(len(testY)):\r\n # false positive\r\n if testRes[j] == 1 and testY[j] == 0:\r\n print(\"false positive: \" + str(j))\r\n\r\n for j in range(len(testY)):\r\n # false negative\r\n if testRes[j] == 0 and testY[j] == 1:\r\n print(\"false negative: \" + str(j))\r\n '''\r\n\r\n probs = logRes.predict_proba(newTestX)[:, 1]\r\n fpr, tpr, threshold = roc_curve(testY, probs)\r\n roc_auc = auc(fpr, tpr)\r\n print(roc_auc)\r\n\r\n plt.figure()\r\n lw = 2\r\n plt.plot(fpr, tpr, color='red', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n plt.legend(loc=\"lower right\")\r\n plt.xlim([0, 1])\r\n plt.ylim([0, 1.05])\r\n plt.ylabel('True Positive Rate')\r\n plt.xlabel('False Positive Rate')\r\n plt.savefig(\"roc_curve_sentence_level.png\")\r\n plt.show()\r\n","sub_path":"ML-ensemble/KerasLRTest.py","file_name":"KerasLRTest.py","file_ext":"py","file_size_in_byte":6657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"545144697","text":"import curses\nstdscr = curses.initscr()\n\n# Inorder to be able to read keys and echo them in only special situations\ncurses.noecho()\n\n#Applications will need to react to Keys instantly, without pressing the Enter Key ie the CBREAK Mode\ncurses.cbreak()\n\n# Terminals many a times return SPECIAL KEYs and sometimes their combinations\nstdscr.keypad (True)\n\n\n# Terminating the curses application\ndef exit ():\n curses.nocbreak()\n stdscr.keypad (False);\n curses.echo()\n\n # Return the terminal to its original working mode\n curses.endwin()\n","sub_path":"python/Curses_Programming/Basics.py","file_name":"Basics.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"348741624","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n=========================================================================\n DIAlignPy -- Alignment of Targeted Mass Spectrometry Runs\n=========================================================================\n\n\nCopyright (C) 2020 Shubham Gupta\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\n--------------------------------------------------------------------------\n$Maintainer: Shubham Gupta$\n$Authors: Shubham Gupta$\n--------------------------------------------------------------------------\n\"\"\"\n\nclass referenceForPrecursor():\n \"\"\"\n Calculates reference run for each precursor.\n Returns a dictionary with Precursor_id as key and Run as value.\n refType must be either best_run, multipeptide_specific or precursor_specific.\n \"\"\"\n def __init__(self, refType=\"best_run\", run = None, alignment_fdr_threshold = 0.05):\n self.referenceType = refType\n self.alignment_fdr_threshold = alignment_fdr_threshold\n self.best_run = run\n\n def get_reference_for_precursors(self, multipeptides):\n if (self.referenceType == \"best_run\"):\n if not self.best_run:\n raise Exception(\"run should be provided with initialization if refType is best_run.\")\n else:\n return self._get_reference_run(multipeptides)\n elif (self.referenceType == \"precursor_specific\"):\n return self._get_precursor_reference_run(multipeptides)\n elif (self.referenceType == \"multipeptide_specific\"):\n return self._get_multipeptide_reference_run(multipeptides)\n else:\n raise Exception(\"refType must be either best_run, multipeptide_specific or precursor_specific.\")\n\n # Get a single reference run\n def _get_reference_run(self, multipeptides):\n \"\"\"\n Returns a dectionary with precursor id as key and run as value.\n Single reference run for each precursor.\n \"\"\"\n reference_run = {}\n run_id = self.best_run.get_id()\n precursor_ids = set()\n for i in range(len(multipeptides)):\n # Get all precursors from each multipeptide\n precs = multipeptides[i].getAllPeptides()\n for prec in precs:\n # Add precursor ID if it is from best_run.\n if prec.getRunId() == run_id:\n precursor_ids.add(prec.get_id())\n # Get a sorted list\n precursor_ids = sorted(precursor_ids)\n # Assign best_run as reference run for each precursor_id\n reference_run = dict.fromkeys(precursor_ids , self.best_run)\n return reference_run\n\n # Get a reference run for each precursor\n def _get_precursor_reference_run(self, multipeptides):\n \"\"\"\n Returns a dectionary with precursor id as key and run as value.\n Precursors may have different reference runs based on FDR score of associated best peak group.\n \"\"\"\n reference_run = {}\n for i in range(len(multipeptides)):\n # Get precursor groups from each multipeptide\n prec_groups = multipeptides[i].getPrecursorGroups()\n for prec_group in prec_groups:\n # Get precursors from a precursor group\n precs = prec_group.getAllPrecursors()\n max_fdr = 1.0\n for prec in precs:\n # Get all precursor ids\n prec_id = prec.get_id()\n # Get best FDR value for the precursor\n cur_fdr = prec.get_best_peakgroup().get_fdr_score()\n if cur_fdr <= self.alignment_fdr_threshold and cur_fdr < max_fdr:\n max_fdr = cur_fdr\n # Make Run of the current precursor as the reference run\n reference_run[prec_id] = prec.getRun()\n if prec_id not in reference_run:\n # No peak group has FDR lower than alignment_fdr_threshold\n reference_run[prec_id] = None\n return reference_run\n\n # Get a reference run for each precursor-group\n def _get_multipeptide_reference_run(self, multipeptides):\n \"\"\"\n Returns a dectionary with precursor id as key and run as value.\n Precursors may have different reference runs based on FDR score of associated multipeptide's best peak group.\n \"\"\"\n reference_run = {}\n for i in range(len(multipeptides)):\n # Get precursor groups from each multipeptide\n prec_groups = multipeptides[i].getPrecursorGroups()\n max_fdr = 1.0\n refRun = None\n for prec_group in prec_groups:\n precs = prec_group.getAllPrecursors()\n cur_fdr = prec_group.getOverallBestPeakgroup().get_fdr_score()\n if cur_fdr <= self.alignment_fdr_threshold and cur_fdr < max_fdr:\n max_fdr = cur_fdr\n refRun = prec_group.run_\n for prec in precs:\n # Get all precursor ids\n prec_id = prec.get_id()\n # Make Run of the current precursor as the reference run\n reference_run[prec_id] = refRun\n return reference_run\n","sub_path":"analysis/alignment/reference_run_selection.py","file_name":"reference_run_selection.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"105990726","text":"import os\nfrom nsml import DATASET_PATH\n\n\ndef feed_infer(output_file, infer_func):\n \"\"\"\"\n infer_func(function): inference 할 유저의 함수\n output_file(str): inference 후 결과값을 저장할 파일의 위치 패스\n (이위치에 결과를 저장해야 evaluation.py 에 올바른 인자로 들어옵니다.)\n \"\"\"\n root_path = os.path.join(DATASET_PATH, 'test')\n top1_reference_ids = infer_func(root_path)\n top1_reference_ids_str = [' '.join(l) for l in top1_reference_ids]\n print('write output')\n with open(output_file, 'w') as file_writer:\n file_writer.write(\"\\n\".join(top1_reference_ids_str))\n\n if os.stat(output_file).st_size == 0:\n raise AssertionError('output result of inference is nothing')\n\n\ndef test_data_loader(root_path):\n return root_path\n","sub_path":"9_iret_car/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"389365858","text":"import os\nimport pickle\nimport logging\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nimport warnings\n\ntry:\n from constant import MB_PATH, TR_PATH, ROUTE_TRANS_PATH\nexcept:\n from .constant import MB_PATH, TR_PATH, ROUTE_TRANS_PATH\n\nwarnings.filterwarnings(\"ignore\")\n\npd.option_context('display.max_columns', 35)\n\nlogger = logging.getLogger(__name__)\n\nMB_model = pickle.load(open(MB_PATH, 'rb'))\nTR_model = pickle.load(open(TR_PATH, 'rb'))\n\n# print('\\n\\n\\nFLEET DATAFRAME:', fleet) # jinghan\n# print('\\n\\n\\nPRICE DATAFRAME:', price) # jinghan\n# print('\\n\\n\\nCONTRACT DATAFRAME:', share) # jinghan\n# print('\\n\\n\\nFLEET DATAFRAME:', flight) # jinghan\n\n#\n###############################################################\n################# Load Model #########################\n###############################################################\n\n\n# Load Model from pickle file\n###############################################################\n############## Load Data #########################\n###############################################################\n\ndef loadcsv(f_path,fname):\n df = pd.read_csv(f_path.format(fname))\n return df\n\nf_data = r'./data/{}.csv'\n\n######### Load region translation table #######################\ndf_region_trans = pd.read_csv(ROUTE_TRANS_PATH)\n\n\n########### Sample Generator for Flight Info ###############\n\ndef SampleInput(df, TestSize=0.2, RandomState=23):\n x = df.drop(['Airline'], axis=1)\n\n # stratified sampling by Category\n y_ss = x.pop('Category')\n x_train, x_test, y_train, y_test = train_test_split(x, y_ss,\\\n test_size=TestSize,\\\n stratify=y_ss,\\\n random_state=RandomState)\n\n # Generate flight group id\n x_test.reset_index(inplace=True, drop=True)\n x_test.reset_index(inplace=True)\n x_test.rename(columns={'index':'Flight_Group'}, inplace=True)\n\n # Generate flight percentage\n size = x_test.shape[0]\n flight_pct = np.random.dirichlet(np.ones(size), size=1)\n flight_pct = flight_pct[0] * 100\n x_test['Flight_Pct'] = np.asarray(flight_pct)\n\n print('STRATIFIED DATA:')\n print('training size:{}'.format(x_train.shape))\n print('test size:{}'.format(x_test.shape))\n print(x_test.head())\n\n return x_test\n#print(MB_model)\n#print(TR_model)\n\n\n###############################################################\n############### UI Input Translation ####################\n###############################################################\n\n\n\n\ndef ui_translation(df_flight, df_price, df_fleet, df_contract):\n\n # convert data type for four input dataframes\n df_fleet.drop(['AC_Type'], axis=1, inplace=True)\n df_fleet = df_fleet.astype('float64')\n\n num_col_flight = df_flight.columns.difference(['Orig_Region', 'Dest_Region'])\n df_flight[num_col_flight] = df_flight[num_col_flight].astype('float64')\n\n num_col_price = df_price.columns.difference(['unit_text', 'unit_browse', 'unit_stream'])\n df_price[num_col_price] = df_price[num_col_price].astype('float64')\n\n num_col_contract = df_contract.columns.difference(['airline_region'])\n df_contract[num_col_contract] = df_contract[num_col_contract].astype('float64')\n\n\n\n # Calculated weighted average fleet info for both short-haul and long-haul fleets\n\n def fleet_weighted(df_fleet):\n df_fleet['flight_count_weighted'] = df_fleet['AC_Count'] * df_fleet['Flight_Per_AC']\n df_fleet['weight'] = df_fleet['flight_count_weighted']/(df_fleet['flight_count_weighted'].sum())\n df_fleet['Seat_Count_weighted'] = df_fleet['Seat_Count'] * df_fleet['weight']\n df_fleet['Eco_Count_weighted'] = df_fleet['Eco_Count'] * df_fleet['weight']\n df_fleet['Flight_Duration_weighted'] = df_fleet['Flight_Duration'] * df_fleet['weight']\n df_fleet['IFE_weighted'] = df_fleet['IFE'] * df_fleet['weight']\n df_fleet['TV_weighted'] = df_fleet['TV'] * df_fleet['weight']\n df_fleet['Phone_weighted'] = df_fleet['Phone'] * df_fleet['weight']\n df_fleet['OneMedia_weighted'] = df_fleet['OneMedia'] * df_fleet['weight']\n col = df_fleet.columns.str.contains('weighted')\n weighted_fleet = df_fleet[df_fleet.columns[col]]\n weighted_fleet = weighted_fleet.sum().to_frame().transpose()\n weighted_fleet['Flight_Type'] = df_fleet.Fleet_Type.max()\n weighted_fleet.rename(columns={'flight_count_weighted': 'flight_count_pre',\n 'Seat_Count_weighted': 'Seat_Count',\n 'Eco_Count_weighted': 'Economy_Seat_Count',\n 'Flight_Duration_weighted': 'Flight_Duration',\n 'IFE_weighted': 'IFE',\n 'TV_weighted': 'TV',\n 'Phone_weighted': 'Phone',\n 'OneMedia_weighted': 'OneMedia'}, inplace=True)\n return weighted_fleet\n\n\n sh_fleet = fleet_weighted(df_fleet[df_fleet.Fleet_Type == 0])\n lh_fleet = fleet_weighted(df_fleet[df_fleet.Fleet_Type == 1])\n # print('^'*50)\n # print('THIS IS HOW weighted_fleet SH looks like')\n # print(sh_fleet)\n # print('^'*50)\n # print('THIS IS HOW weighted_fleet LH looks like')\n # print(lh_fleet)\n fleet_new = pd.concat([sh_fleet, lh_fleet], axis=0, ignore_index=True)\n # print('^'*50)\n # print('THIS IS HOW fleet_new looks like')\n # print(fleet_new)\n\n\n # Append new fleet info to flight info\n df_input = df_flight.merge(fleet_new, on='Flight_Type', how='left')\n\n\n # Convert Seat Count to Total Passenger Count\n df_input['TotalPassengers'] = df_input['Seat_Count'] * df_input['Load_Factor']/100\n\n # Convert Economy_Seat_Count to LnBusPassPercent\n df_input['BusPassPercent'] = 100 - 0.85 * (df_input['Economy_Seat_Count']/df_input['Seat_Count'])\n df_input.drop(['Economy_Seat_Count', 'Seat_Count'], axis=1, inplace=True)\n\n\n # Calculate true flight count and true flight percentage for each flight group\n\n df_input['total_flight_count'] = df_input['flight_count_pre'] * df_input['per_Total_Flight'] / 100\n\n # print('-' * 30, 'original df_input', '-' * 30)\n # print(df_input)\n\n # prepare night flights\n df_night = df_input.copy()\n df_night['Red_Eye'] = 1\n df_night['flight_count'] = df_night['total_flight_count'] * df_night['per_Night_Flight'] / 100\n # print('-' * 30, 'table df_night', '-' * 30)\n # print(df_night)\n\n # prepare day flights\n df_day = df_input.copy()\n df_day['Red_Eye'] = 0\n df_day['flight_count'] = df_day['total_flight_count'] * (100 - df_day['per_Night_Flight']) / 100\n # print('-' * 30, 'table df_day', '-' * 30)\n # print(df_night)\n\n # concat day flights and night flights together and then calculate true flight percentage\n df_input = pd.concat([df_night, df_day], axis=0, ignore_index=True)\n df_input['Flight_Pct'] = df_input['flight_count']/(df_input['flight_count'].sum()) * 100\n\n # clean columns\n df_input = df_input.reset_index()\n df_input.rename(columns={'index': 'Flight_Group'}, inplace=True)\n df_input.drop([#'Flight_ID',\n 'per_Total_Flight',\n 'Load_Factor',\n 'per_Night_Flight',\n 'total_flight_count',\n 'flight_count_pre'],\n axis=1,\n inplace=True)\n # print('^-^' * 30, 'df_input before df_factor', '^-^' * 30)\n # print(df_input)\n\n\n\n\n # extract base table for revenue calculation\n df_factor = df_input[['Flight_Group', 'Flight_Type', 'Flight_ID', 'TotalPassengers', 'Flight_Pct', 'flight_count', 'Flight_Duration']]\n df_factor['price_text'] = df_price.loc[0, 'text']\n df_factor['price_browse'] = df_price.loc[0, 'browse']\n df_factor['price_stream'] = df_price.loc[0, 'stream']\n df_factor['text_factor'] = df_factor['TotalPassengers'] * df_factor['flight_count'] * df_factor['price_text']\n df_factor['browse_factor'] = df_factor['TotalPassengers'] * df_factor['flight_count'] * df_factor['price_browse']\n df_factor['stream_factor'] = df_factor['TotalPassengers'] * df_factor['flight_count'] * df_factor['price_stream']\n df_factor = df_factor[['Flight_Group','Flight_Type', 'Flight_ID','Flight_Pct', 'flight_count', \\\n 'text_factor', 'browse_factor', 'stream_factor', \\\n 'Flight_Duration']]\n\n\n\n # Translate Region level to IATA level\n df_input['RouteRegion'] = df_input['Orig_Region'] + '-' + df_input['Dest_Region']\n df_input = df_input.merge(df_region_trans, on='RouteRegion', how='left')\n df_input = df_input.drop(['RouteRegion', 'Orig_Region', 'Dest_Region'], axis=1)\n\n\n\n # set variable order in line with model\n ordered_col = ['Flight_Group', 'Category',\\\n 'MB', 'Hrs', 'Flight',\\\n 'Orig_Country_frq', 'Dest_Country_frq', 'RouteCountry_frq',\\\n 'OriginIATA_frq', 'DestinationIATA_frq', 'RouteIATA_frq',\\\n 'RouteRegion_frq',\\\n #'Orig_Region_Americas', 'Orig_Region_Asia', 'Orig_Region_Europe','Orig_Region_Middle East', \\\n #'Dest_Region_Americas', 'Dest_Region_Asia', 'Dest_Region_Europe', 'Dest_Region_Middle East',\\\n 'Flight_Duration',\\\n 'IFE', 'OneMedia', 'Phone', 'TV', \\\n 'Red_Eye',\\\n 'TotalPassengers', 'BusPassPercent',\\\n 'lnPrice_text', 'lnPrice_browse', 'lnPrice_stream']\n\n\n return df_input, df_factor, df_contract, ordered_col\n\n\ndef df_category(df_input, df_price, unit, num, category, ordered_col):\n\n df_input_category = df_input.copy()\n df_input_category.loc[:, 'Category'] = category\n\n # Append Price info and convert to LnPriceUSD\n df_input_category.loc[:, 'lnPrice_text'] = np.log1p(df_price.loc[0, 'text'])\n df_input_category.loc[:, 'lnPrice_browse'] = np.log1p(df_price.loc[0, 'browse'])\n df_input_category.loc[:, 'lnPrice_stream'] = np.log1p(df_price.loc[0, 'stream'])\n\n\n unit_cat = df_price.loc[0, unit]\n num_cat = df_price.loc[0, num]\n\n # parse the ProductName info for model\n if unit_cat =='mb':\n df_input_category.loc[:, 'MB'] = num_cat\n df_input_category.loc[:, 'Hrs'] = -1\n df_input_category.loc[:, 'Flight'] = 0\n elif unit_cat =='time':\n df_input_category.loc[:, 'Hrs'] = num_cat\n df_input_category.loc[:, 'MB'] = -1\n df_input_category.loc[:, 'Flight'] = 0\n elif unit_cat == 'flight':\n df_input_category.loc[:, 'Flight'] = 1\n df_input_category.loc[:, 'Hrs'] = -1\n df_input_category.loc[:, 'MB'] = -1\n else:\n print('Errors in Paring Price Info')\n\n # keep the feature order in line with model inputs\n df_input_category = df_input_category[ordered_col]\n\n\n return df_input_category\n\n\n\n\n###############################################################\n############# Run Model to Get Outputs #################\n###############################################################\n\ndef RunModel(df_data,mb_model=MB_model, tr_model=TR_model):\n\n df_result = df_data[['Flight_Group']]\n df_data.drop(['Flight_Group'], axis=1,\n inplace=True)\n MB = mb_model.predict(df_data)\n TR = tr_model.predict(df_data)\n df_result.loc[:, 'TotalUsageMB'] = MB\n df_result.loc[:, 'TakeRate'] = TR\n\n # print(df_result.describe())\n\n return df_result\n\n\n\n\n\nif __name__ == '__main__':\n ######## Load Sample Input for fleet and price ###############\n df_fleet = pd.read_csv(os.path.join('.','data','sample_fleet.csv'))\n df_price = pd.read_csv(os.path.join('.','data','sample_price.csv'))\n df_flight = pd.read_csv(os.path.join('.','data','sample_flight.csv'))\n df_contract = pd.read_csv(os.path.join('.','data','sample_contract.csv'))\n print(df_fleet)\n print(df_price)\n print(df_flight)\n print(df_contract)\n\n df_input, df_factor, df_contract, col_order = ui_translation(df_flight, df_price, df_fleet, df_contract)\n\n print('-'*20, 'Input table', '-'*20)\n print(df_input.describe())\n print(df_input.head())\n print(df_input.isnull().sum())\n print('-' * 20, 'factor table', '-'*20)\n print(df_factor)\n print('-' * 20, 'contract', '-'*20)\n print(df_contract)\n print(col_order)\n print('-' * 20)\n\n df_input_text = df_category(df_input, df_price, \\\n unit='unit_text', num='num_text', \\\n category=1, ordered_col=col_order)\n df_input_browse = df_category(df_input, df_price, \\\n unit='unit_browse', num='num_browse', \\\n category=2, ordered_col=col_order)\n df_input_stream = df_category(df_input, df_price, \\\n unit='unit_stream', num='num_stream', \\\n category=3, ordered_col=col_order)\n\n # Print translation result\n # print(df_input_text)\n # print(df_input_browse)\n # print(df_input_stream)\n\n result_text = RunModel(df_input_text)\n result_browse = RunModel(df_input_browse)\n result_stream = RunModel(df_input_stream)\n\n\n print(result_text.groupby('Flight_Group').describe())\n print(result_browse.groupby('Flight_Group').describe())\n print(result_stream.groupby('Flight_Group').describe())\n","sub_path":"InputTranslation.py","file_name":"InputTranslation.py","file_ext":"py","file_size_in_byte":13535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"61155343","text":"\n## @package data_preparation\n# Module de la data préparation.\n# @author Ilyas\n# @version 1.1.0\n# @date 29 septembre 2017\n\nimport pandas as pd\nimport numpy as np\nimport glob, os.path\nfrom math import *\nfrom sklearn.linear_model import LinearRegression\nfrom scipy.stats import spearmanr\nimport correlation as corr\nimport re\nlr = LinearRegression() \n\n\n## Chargement des données\n# @brief Cette fonction permet de charger les données explicatives\n# @date 17 Octobre 2017\n# @details Elle supprime les colonnes ayant les mêmes valeurs sur chaque ligne, reindexe les données\n# @param fichier : Pnom ou chemin du fichier csv contenant les données explicatives\n# @param taille_ech : nombre de lignes à charger (par défaut charge toutes les lignes)\n# @param drop : liste de colonnes à supprimer\n# @param index_col : nom de la colonne à utiliser pour indexer les données\n# @return T : Dataframe des données chargées\n#\n# __________________________________________________________________________________________________\ndef charge_expl(fichier = 'matrice_ilb_201603.csv', taille_ech = None, drop = [], index_col = None):\n \n # Charge données\n #T = pd.read_csv(fichier, nrows = taille_ech, sep=';')\n T = fichier\n # Supprime les colonnes non-voulues\n T.drop(drop, axis = 1, inplace = True)\n # Reindexe\n if index_col != None:\n T.index = T[index_col]\n T.drop(index_col, axis = 1, inplace = True)\n # Supprime les colonnes inutiles, i.e. ayant la meme valeur sur toutes les\n # lignes\n for col in T.columns:\n if len(T[col].unique()) == 1:\n if sum(T[col].isnull()) == 0:\n T.drop(col, axis = 1, inplace = True)\n return T\n\n\n## Charge les données à expliquer\n# @brief Cette fonction permet de charger les données à expliquer\n# @date 17 Octobre 2017\n# @details Elle permet de charger les données à expliquer\n# @param T : dataframe des varaibles explicatives\n# @param fichier_sortie: nom ou chemin du fichier csv contenant la variable à expliquer\n# @param input_cal: nom de la colonne contenant la variable de sortie \n# @param index_col: nom de la colonne à utiliser pour indexer les données\n# @return T : serie de la variable à expliquer\n#\n# __________________________________________________________________________________________________\ndef charge_sortie(T, fichier_sortie, input_col, index_col = None):\n\n # Charge données\n #temp = pd.read_csv(fichier_sortie, sep=';', encoding='utf-8')\n temp = fichier_sortie\n # Reindexe\n if index_col != None:\n temp.index = temp[index_col]\n temp.drop(index_col, axis = 1, inplace = True)\n #enlever element aberrante\n temp = temp[temp[input_col] != 1]\n \n Y = pd.DataFrame(index = T.index)\n Y.loc[temp.index.intersection(Y.index),'sortie'] = temp.loc[temp.index.intersection(Y.index), input_col]\n return Y['sortie']\n\n## Sélectionne les colonnes discrètes et quantitatives\n# @brief Cette fonction de selectionner les colonnes discrètes et les colonnes quantitatives\n# @date 17 Octobre 2017\n# @details Elle crée deux listes : l'une contenant les noms des colonnes discrètes et l'autre les noms des colonnes continues\n# @param T : dataframe des varaibles explicatives\n# @return discrete_column: liste des colonnes discretes\n# @return quantitatives_column: liste des colonnes quantitatives\n#\n# __________________________________________________________________________________________________\n\ndef get_column_type(T, seuil = 12):\n\n discrete_column = []\n quantitative_column = []\n for col in T.columns:\n if len(T[col].unique()) < seuil or T.dtypes[col] == \"object\":\n discrete_column.append(col)\n else:\n quantitative_column.append(col)\n return discrete_column, quantitative_column\n\n\n# =============================================================================\n# Traite colonnes discretes\n# =============================================================================\n\n## Traitement des colonnes discrètes\n# @brief Cette fonction traite les colonnes discrètes en identifiant les variables assez corrélés avec le Y à retenir dans la suite de l'algorithme\n# @details Elle crée deux sortie : Un dataframe transformé et un dictionnaire regroupant les corellation des variables\n# @param T : dataframe : Dataframe des variables explicatives discretes\n# @param Y : dataframe : Dataframe de la sortie\n# @param R_dico : dictionnaire: Dictionnaire regroupant les R des variables explicatives\n# @param verbose : boolean : Afficher ou pas l'avancee de la fonction\n# @param method : string : Méthode utilisée pour calculer la corrélation entre la variable explivative discrète et la VAE. Prend que les valeurs suivantes :\n# - regression si la variable explicative est continue\n# - Cramer si la variable explicative est discrète\n# @param R_min : float : Valeur minimale de la corellation\n# @return F, R_dico: tuple : Renvoie un tuple contenant les objets suivants :\n# - dataframe transformé\n# - dictionnaire regroupant les R des variables explicatives\n# @date 29 septembre 2017\n# ____________________________________________________________________________________________________________________________________________________\n\ndef treat_discrete_columns(T, Y, R_dico, dic, method = 'regression', R_min = 0.1, verbose = False):\n\n if verbose == True:\n def vprint(*args):\n # Print each argument separately so caller doesn't need to\n # stuff everything to be printed into a single string\n for arg in args:\n print(arg,)\n print\n else: \n vprint = lambda *a: None # do-nothing function \n \n # Replace nan\n for col in T.columns:\n if T.dtypes[col]=='object' or T.dtypes[col]=='O':\n T.loc[T[col] == \".\",col] = 'Na'\n T.loc[T[col].isnull(),col] = float('NaN')\n \n F = pd.DataFrame(index = T.index)\n drop_tb = pd.DataFrame(columns=['col_name','R2'])\n drop_index = 0\n keep_tb = pd.DataFrame(columns=['col_name','R2'])\n keep_index = 0\n keep_tb_1 = pd.DataFrame(columns=['col_name','R2'])\n keep_index_1 = 0\n \n if method == 'regression':\n for col in T.columns:\n if (col in dic)== True:\n # Construit TDC\n T[col] = T[col].astype(str)\n T[col] = T[col].astype('category')\n tdc = pd.DataFrame(pd.get_dummies(T[col]))\n new_col_name =[]\n for i in range(0,len(tdc.columns)):\n new_col_name.append(col + '_' + str(tdc.columns[i]))\n tdc.columns = new_col_name\n # Garde ou pas les variables du TDC\n index = ~Y.isnull()\n for i in tdc.columns:\n if len(tdc.loc[index,i].unique()) == 2:\n R=dic[col][1]\n vprint('keep ' + i + ' with R2 = ' + str(round(R,3)))\n keep_tb_1.loc[keep_index_1] = [i,R]\n keep_index_1 = keep_index_1 + 1 \n \n else: \n # Construit TDC\n T[col] = T[col].astype(str)\n T[col] = T[col].astype('category')\n tdc = pd.DataFrame(pd.get_dummies(T[col]))\n new_col_name =[]\n for i in range(0,len(tdc.columns)):\n new_col_name.append(col + '_' + str(tdc.columns[i]))\n tdc.columns = new_col_name\n # Garde ou pas les variables du TDC\n index = ~Y.isnull()\n for i in tdc.columns:\n R, bool_R = corr.get_correlation(tdc.loc[index,i], Y.loc[index],\n seuil_cramer = 1, seuil_corr = 1)\n if len(tdc.loc[index,i].unique()) == 2:\n R, bool_R = corr.get_correlation(tdc.loc[index,i], Y.loc[index],\n seuil_cramer = 1, seuil_corr = 1) \n if R < R_min:\n vprint('vire ' + i + ' with R2 = ' + str(round(R,3)))\n tdc.drop(i, axis = 1, inplace = True)\n drop_tb.loc[drop_index] = [i,R]\n drop_index = drop_index + 1\n else:\n vprint('keep ' + i + ' with R2 = ' + str(round(R,3)))\n keep_tb.loc[keep_index] = [i,R]\n keep_index = keep_index + 1\n else: \n vprint('vire ' + i + ' with R2 = ' + str(round(R,3)))\n tdc.drop(i, axis = 1, inplace = True)\n drop_tb.loc[drop_index] = [i,R]\n drop_index = drop_index + 1\n # Add tdc to F\n F = pd.concat([F,tdc], axis = 1)\n del(tdc)\n \n drop_tb.sort_values(by = 'R2', inplace = True, ascending = False)\n keep_tb.sort_values(by = 'R2', inplace = True, ascending = False)\n R_dico['variables'] = pd.concat([R_dico['variables'],keep_tb_1], axis = 0)\n R_dico['variables discretes gardees'] = pd.concat([R_dico['variables discretes gardees'],keep_tb], axis = 0)\n R_dico['variables discretes jetees'] = pd.concat([R_dico['variables discretes jetees'],drop_tb], axis = 0)\n return F, R_dico\n \n elif method == 'Cramer':\n for col in T.columns:\n index = T[col].apply(np.isreal)\n index = index & ~T[col].isnull()\n T[col][index] = T[col][index].astype(int)\n T[col] = T[col].astype(str) \n # Cramer\n index = ~Y.isnull()\n if len(T.loc[index,col].unique()) > 1:\n R, bool_R = corr.get_correlation(T.loc[index,col], Y.loc[index], 1, 1)\n if (col in dic)== True:\n R=dic[col][1]\n vprint('garde ' + col + ' avec R2 = ' + str(round(R,3)))\n keep_tb_1.loc[keep_index_1] = [col,R]\n keep_index_1 = keep_index_1 + 1\n replace_dico = {}\n for value in T[col].unique():\n index = (T[col] == value)\n replace_dico[value] = round(Y.loc[index].mean(),0)\n F = pd.concat([F,T[col].replace(replace_dico)], axis =1)\n elif R < R_min:\n vprint('vire ' + col + ' avec R2 = ' + str(round(R,3)))\n drop_tb.loc[drop_index] = [col,R]\n drop_index = drop_index + 1\n else:\n vprint('garde ' + col + ' avec R2 = ' + str(round(R,3)))\n keep_tb.loc[keep_index] = [col,R]\n keep_index = keep_index + 1\n replace_dico = {}\n for value in T[col].unique():\n index = (T[col] == value)\n replace_dico[value] = round(Y.loc[index].mean(),0)\n F = pd.concat([F,T[col].replace(replace_dico)], axis =1)\n \n else:\n vprint('vire ' + col + ' car valeurs constantes')\n drop_tb.loc[drop_index] = [col,0]\n drop_index = drop_index + 1\n \n drop_tb.sort_values(by = 'R2', inplace = True, ascending = False)\n keep_tb.sort_values(by = 'R2', inplace = True, ascending = False)\n R_dico['variables'] = pd.concat([R_dico['variables'],keep_tb_1], axis = 0)\n R_dico['variables discretes gardees'] = pd.concat([R_dico['variables discretes gardees'],keep_tb], axis = 0)\n R_dico['variables discretes jetees'] = pd.concat([R_dico['variables discretes jetees'],drop_tb], axis = 0)\n return F, R_dico\n \n else:\n raise ValueError('methode non reconnue')\n\n\n# =============================================================================\n# Traite colonnes continues\n# =============================================================================\n\n## Traitement des colonnes continues\n# @brief Cette fonction traite les colonnes continues en identifiant les variables assez corrélées avec le Y à retenir dans la suite de l'algorithme.\n# @details Elle crée deux sorties : Un dataframe transformé et un dictionnaire regroupant les corellations des variables\n# @param E: dataframe : dataframe des variables explicatives quantitatives\n# @param Y: dataframe : dataframe de la sortie\n# @param R_dico: dictionnaire: dictionnaire regroupant les R des variables explicatives\n# @param verbose: boolean : afficher ou pas l'avancee de la fonction\n# @param method : string : Méthode utilisée pour calculer la corrélation entre la variable explivative discrète et la VAE. Prend que les valeurs suivantes :\n# - regression si la variable explicative est continue\n# - Cramer si la variable explicative est discrète\n# @param R_min : float : seuil minimun à partir desquels les variables sont conservées\n# @param R_cont_y: float : R minimum pour les variables continues\n# @param R_Cramer_y: float : R minimum pour les variables discretes\n# @return E: dataframe : dataframe transformé\n# @return R_dico: dictionnaire: dictionnaire regroupant les R des variables explicatives\n# @date 29 septembre 2017\n# __________________________________________________________________________________________________\n\n\ndef treat_continuous_columns(E, Y, R_dico, dic, method = 'regression',R_min = 0.1, R_cont_y = 0.3,\n R_Cramer_y = 0.25, verbose = False):\n\n if verbose == True:\n def vprint(*args):\n # Print each argument separately so caller doesn't need to\n # stuff everything to be printed into a single string\n for arg in args:\n print(arg,)\n print\n else: \n vprint = lambda *a: None # do-nothing function \n \n G = pd.DataFrame(index = E.index) \n drop_tb = pd.DataFrame(columns=['col_name','R2'])\n drop_index = 0\n keep_tb = pd.DataFrame(columns=['col_name','R2'])\n keep_index = 0\n #\n drop_tb_q = pd.DataFrame(columns=['col_name','R2'])\n drop_q_index = 0\n keep_tb_d = pd.DataFrame(columns=['col_name','R2'])\n keep_d_index = 0\n keep_tb_q = pd.DataFrame(columns=['col_name','R2'])\n keep_q_index = 0\n #\n keep_tb_d_1 = pd.DataFrame(columns=['col_name','R2'])\n keep_d_index_1 = 0\n keep_tb_q_1 = pd.DataFrame(columns=['col_name','R2'])\n keep_q_index_1 = 0\n \n if method == 'regression':\n for col in E.columns:\n if (col in dic) == True:\n # Add a log variable\n index = (E[col] < (E[col].mean() - 4*E[col].std())) | \\\n (E[col] > (E[col].mean() + 4*E[col].std()))\n if sum(index) > 0:\n sgn = (E[col] - E[col].mean()) / abs(E[col] - E[col].mean())\n v = pd.DataFrame(data = 0, columns = ['log_' + col] , index = E.index)\n v.loc[index] = sgn.loc[index] * np.log(sgn.loc[index] * (E.loc[index, col] - E.loc[:,col].mean()) \\\n / (4 *E.loc[:,col].std()))\n # Garde ou vire la log variable\n R = dic[col][1]\n vprint('garde log_' + col + ' avec R2 = ' + str(round(R,3)))\n keep_tb_q_1.loc[keep_q_index_1] = ['log_' + col,R]\n keep_q_index_1 = keep_q_index_1 + 1\n E = pd.concat([E,v], axis = 1)\n \n # Add a nan variable \n index = E[col].isnull()\n if sum(index) > 0: \n u = pd.DataFrame(data = 0, columns = ['nan_'+col] , index = E.index)\n u.loc[index] = 1\n # Garde ou pas la variable nan\n index_ = ~Y.isnull()\n R = dic[col][1]\n vprint('garde nan_' + col + ' avec R2 = ' + str(round(R,3)))\n keep_tb_d_1.loc[keep_d_index_1] = ['nan_' + col,R]\n keep_d_index_1 = keep_d_index_1 + 1\n E = pd.concat([E,u], axis = 1)\n \n # Garde ou vire la variable\n R = dic[col][1]\n vprint('garde ' + col + ' with R2 = ' + str(round(R,3)))\n keep_tb_q_1.loc[keep_q_index_1] = [col,R]\n keep_q_index_1 = keep_q_index_1 + 1\n else: \n # Add a log variable\n index = (E[col] < (E[col].mean() - 4*E[col].std())) | \\\n (E[col] > (E[col].mean() + 4*E[col].std()))\n if sum(index) > 0:\n sgn = (E[col] - E[col].mean()) / abs(E[col] - E[col].mean())\n v = pd.DataFrame(data = 0, columns = ['log_' + col] , index = E.index)\n v.loc[index] = sgn.loc[index] * np.log(sgn.loc[index] * (E.loc[index, col] - E.loc[:,col].mean()) \\\n / (4 *E.loc[:,col].std()))\n # Garde ou vire la log variable\n R = corr.get_R_continuous(v.iloc[:,0],Y, m = 3)\n if R > R_cont_y:\n vprint('garde log_' + col + ' avec R2 = ' + str(round(R,3)))\n keep_tb_q.loc[keep_q_index] = ['log_' + col,R]\n keep_q_index = keep_q_index + 1\n E = pd.concat([E,v], axis = 1)\n \n # Add a nan variable \n index = E[col].isnull()\n if sum(index) > 0: \n u = pd.DataFrame(data = 0, columns = ['nan_'+col] , index = E.index)\n u.loc[index] = 1\n # Garde ou pas la variable nan\n index_ = ~Y.isnull()\n R, bool_R = corr.get_correlation(u.loc[index_,'nan_'+col], Y.loc[index_],\n seuil_cramer = 1, seuil_corr = 1)\n if R > R_Cramer_y:\n vprint('garde nan_' + col + ' avec R2 = ' + str(round(R,3)))\n keep_tb_d.loc[keep_d_index] = ['nan_' + col,R]\n keep_d_index = keep_d_index + 1\n E = pd.concat([E,u], axis = 1)\n \n # Garde ou vire la variable\n R = corr.get_R_continuous(E[col],Y,3)\n if R > R_cont_y:\n vprint('garde ' + col + ' with R2 = ' + str(round(R,3)))\n keep_tb_q.loc[keep_q_index] = [col,R]\n keep_q_index = keep_q_index + 1\n else:\n vprint('vire ' + col + ' with R2 = ' + str(round(R,3)))\n E.drop(col, axis = 1, inplace = True)\n drop_tb_q.loc[drop_q_index] = [col,R]\n drop_q_index = drop_q_index + 1\n R_dico['variables'] = pd.concat([R_dico['variables'],keep_tb_q_1], axis = 0)\n R_dico['variables'] = pd.concat([R_dico['variables'],keep_tb_d_1], axis = 0)\n R_dico['variables continues gardees'] = pd.concat([R_dico['variables continues gardees'],keep_tb_q], axis = 0)\n R_dico['variables continues jetees'] = pd.concat([R_dico['variables continues jetees'],drop_tb_q], axis = 0) \n R_dico['variables discretes gardees'] = pd.concat([R_dico['variables discretes gardees'],keep_tb_d], axis = 0)\n return E, R_dico\n \n \n elif method == 'Cramer':\n for col in E.columns:\n # Convert to string\n #♀corr.quantify_col(E[col], treat_na_as_zero = False)\n \n index = E[col].apply(np.isreal)\n index = index & ~E[col].isnull()\n E[col][index] = E[col][index].astype(int)\n E[col] = E[col].astype(str)\n \n # Cramer\n index = ~Y.isnull()\n if len(E.loc[index,col].unique()) > 1:\n R, bool_R = corr.get_correlation(E.loc[index,col], Y.loc[index], 1, 1) \n if (col in dic)== True:\n R=dic[col][1]\n vprint('garde ' + col + ' avec R2 = ' + str(round(R,3)))\n keep_tb.loc[keep_index] = [col,R]\n keep_index = keep_index + 1\n replace_dico = {}\n for value in E[col].unique():\n index = (E[col] == value)\n replace_dico[value] = round(Y.loc[index].mean(),0)\n G = pd.concat([G,E[col].replace(replace_dico)], axis =1) \n elif R < R_min:\n vprint('vire ' + col + ' avec R2 = ' + str(round(R,3)))\n drop_tb.loc[drop_index] = [col,R]\n drop_index = drop_index + 1\n else:\n vprint('garde ' + col + ' avec R2 = ' + str(round(R,3)))\n keep_tb.loc[keep_index] = [col,R]\n keep_index = keep_index + 1\n replace_dico = {}\n for value in E[col].unique():\n index = (E[col] == value)\n replace_dico[value] = round(Y.loc[index].mean(),0)\n G = pd.concat([G,E[col].replace(replace_dico)], axis =1)\n \n else:\n vprint('vire ' + col + ' car valeurs constantes')\n #tdc.drop(i, axis = 1, inplace = True)\n drop_tb.loc[drop_index] = [col,0]\n drop_index = drop_index + 1\n \n drop_tb.sort_values(by = 'R2', inplace = True, ascending = False)\n keep_tb.sort_values(by = 'R2', inplace = True, ascending = False)\n R_dico['variables continues gardees'] = pd.concat([R_dico['variables continues gardees'],keep_tb], axis = 0)\n R_dico['variables continues jetees'] = pd.concat([R_dico['variables continues jetees'],drop_tb], axis = 0)\n return G, R_dico \n \n \n else:\n raise ValueError('methode non reconnue')\n\n# =============================================================================\n# Correlation\n# =============================================================================\n\n\n## Calcul des groupes de variables correlées entre elles\n# @brief Calcule la correlation entre les variables explicatives\n# @details Elle crée tableau des groupes de variables correllées entre elles\n# @param T: dataframe : tableau des variables explicatives\n# @param R_dico: dictionnaire: dictionnaire regroupant les R des variables explicatives\n# @param seuil_cramer: float : seuil à partir du quel des variables discretes sont considérées comme corrélées\n# @param seuil_corr: float : seuil à partir du quel des variables continues sont considérées comme corrélées\n# @return correlation_tb: dataframe : tableau des groupes de variables correllées\n# @date 29 septembre 2017\n# __________________________________________________________________________________________________\n\n\ndef correlation(T, R_dico, seuil_cramer, seuil_corr):\n corr_matrix, corr_matrix_bool = corr.correlation_matrix(T, seuil_cramer, seuil_corr)\n \n column_name = corr_matrix_bool.columns.values\n corel_arr = corr_matrix_bool.values\n \n dico = {}\n colonne_parcourue_arr = []\n for i in range(0, len(column_name)):\n colonne_parcourue_arr.append(False)\n dico = corr.fill_dico_by_corr(colonne_parcourue_arr, corel_arr, dico, column_name)\n \n correlation_tb = pd.DataFrame()\n for key in dico.keys():\n list_r = []\n for i in dico[key]:\n if len(R_dico['variables continues gardees'].loc[R_dico['variables continues gardees'] \n ['col_name'] == i, 'R2'].values) == 1:\n list_r.append(R_dico['variables continues gardees'].loc[R_dico['variables continues gardees']\n ['col_name'] == i, 'R2'].values[0])\n elif len(R_dico['variables discretes gardees'].loc[R_dico['variables discretes gardees']\n ['col_name'] == i, 'R2'].values) == 1:\n list_r.append(R_dico['variables discretes gardees'].loc[R_dico['variables discretes gardees']\n ['col_name'] == i, 'R2'].values[0])\n else:\n raise ValueError('Cannot find R2 for: ' + i)\n a = pd.DataFrame({'groupe_variables_correlees': [dico[key]], 'variable_representante':\n dico[key][np.argmax(list_r)],'R2': list_r[np.argmax(list_r)]})\n correlation_tb = pd.concat([correlation_tb,a], axis = 0)\n \n return correlation_tb\n\n\n# =============================================================================\n# Normalise\n# =============================================================================\n\n\n## Normalisation des variables continues \n# @brief Les variables continues sont normalisées et pondérées par leur coefficient de corellation\n# @details \n# @param E: dataframe : dataframe des colonnes à normaliser\n# @param R_dico: dictionnaire: dictionnaire regroupant les R des variables explicatives\n# @return E: dataframe : dataframe normalisé\n# @date 29 septembre 2017\n# __________________________________________________________________________________________________\n\ndef normalize_(E, R_dico):\n for col in E.columns:\n if sum(R_dico['variables continues gardees']['col_name'] == col) == 1:\n index = (R_dico['variables continues gardees']['col_name'] == col)\n R = R_dico['variables continues gardees'].loc[index,'R2'].values[0]\n \n elif sum(R_dico['variables discretes gardees']['col_name'] == col) == 1:\n index = (R_dico['variables discretes gardees']['col_name'] == col)\n R = R_dico['variables discretes gardees'].loc[index,'R2'].values[0]\n elif sum(R_dico['variables']['col_name'] == col) == 1:\n index = (R_dico['variables']['col_name'] == col)\n R = R_dico['variables'].loc[index,'R2'].values[0]\n else:\n raise ValueError('ne trouve pas le R2 correspondant à la colonne')\n \n E[col] = (E[col] - E[col].mean()) / E[col].std() * R\n return E\n\n# =============================================================================\n# Main\n# =============================================================================\n\n## Fonction main de la datapréparation\n# @brief Supprime tous les fichiers dans result & Charge les données, crée de nouvelles variables, séléctionne les variables et les normalise.\n# @details \n# @param fichier: fichier csv contenant les variables explicatives\n# @param R_dico: dictionnaire regroupant les R des variables explicatives\n# @param fichier_sortie: fichier csv contenant la variable expliquee\n# @param taille_ech: nombre de lignes du fichier de variables explicatives\n# @param R_cont_y: seuil à partir du quel nous considérons qu'une variable explicative continue est corrélée avec la sortie\n# @param R_Cramer_y: seuil à partir du quel nous considérons qu'une variable explicative discrete est corrélée avec la sortie\n# @param R_cont_x: seuil à partir du quel nous considérons que deux variables explicatives continues sont correllées\n# @param R_Cramer_x: seuil à partir du quel nous considérons que deux variables explicatives discretes sont correllées\n# @param verbose: afficher l'avancee de la fonction ou non\n# @return T: dataframe des variables explicatives transformées\n# @return Y: sortie\n# @return R_dico: dictionnaire regroupant les R des variables explicatives\n# @date 17 Octobre 2017\n# __________________________________________________________________________________________________\n\ndef main(fichier = 'variables_explicatives.csv',fichier_sortie = 'zz.csv', method_disc='regression',method_continuous='regression',\n taille_ech = 50000,R_cont_y = 0.3, R_Cramer_y = 0.25, R_cont_x = 0.8, R_Cramer_x = 0.7,dic={},\n normalize = True, verbose = True, path_rslt='chemin_vers_donnees', suffix_table='suffix_table'):\n\n ######################################\n #Delete all file in the folder result#\n ######################################\n print('Do not delete all file in the folder result! No need for the moment! ')\n #path_del=path_rslt + \"*\"\n #r = glob.glob(path_del)\n #for i in r:\n # os.remove(i)\n \n ############################\n # Définition des paramètres\n ############################\n print('Charge données.................')\n T = charge_expl(fichier = fichier, taille_ech = taille_ech,\n drop = [], index_col = 'IDCLI_CALCULE') #\n Y = charge_sortie(T = T, fichier_sortie = fichier_sortie,\n input_col = 'revenu', index_col = 'IDCLI_CALCULE')\n \n #####################################\n #Détecte variables quali et quanti ## \n ##################################### \n discrete_column, quantitative_column = get_column_type(T, seuil = 12)\n \n R_dico = { 'variables discretes jetees': None,\n 'variables continues jetees': None,\n 'variables discretes gardees': None,\n 'variables continues gardees': None,\n 'variables': None \n }\n\n ###################################################\n #Préparation et selection des variables discretes #\n ###################################################\n print('Preparation et selection des variables discretes...')\n F, R_dico = treat_discrete_columns(T[discrete_column], Y,\n R_dico,dic, method = method_disc, R_min = R_Cramer_y, verbose = verbose)\n T.drop(discrete_column, axis = 1, inplace = True)\n \n #######################################################\n #Préparation et selection des variables quantitatives #\n #######################################################\n print('Preparation et selection des variables continues...')\n E, R_dico = treat_continuous_columns(T[quantitative_column], Y,\n R_dico,dic, method = method_continuous, R_min = R_Cramer_y, R_cont_y = R_cont_y,\n R_Cramer_y = R_Cramer_y, verbose = verbose)\n T.drop(quantitative_column, axis = 1, inplace = True)\n \n #################################\n # Merge variables and R2 tables #\n #################################\n T = pd.concat([E,F], axis = 1)\n #del E, F\n \n T.drop([col for col in R_dico['variables']['col_name'] if col in T], axis=1, inplace=True)\n ###############\n # Correlation #\n ###############\n print('Calcul des correlations...')\n correlation_tb = correlation(T, R_dico, seuil_cramer = R_Cramer_x, seuil_corr = R_cont_x)\n R_dico['groupe variables'] = correlation_tb\n A = T[correlation_tb['variable_representante']]\n\n T = pd.concat([E,F], axis = 1)\n del E, F\n \n for col in R_dico['variables']['col_name']:\n if (col in A.columns)==False:\n A=pd.concat([A,T[col]],axis=1)\n \n \n# for col in dic.keys():\n# if (col in T.columns)==True and (col in A.columns)==False:\n# A=pd.concat([A,T[col]],axis=1)\n# if ('log_' + col in T.columns)==True and ('log_' + col in A.columns)==False:\n# A=pd.concat([A,T['log_' + col]],axis=1)\n# if ('nan_' + col in T.columns)==True and ('nan_' + col in A.columns)==False:\n# A=pd.concat([A,T['nan_' + col]],axis=1)\n ########################\n # Normalize and fill na#\n ########################\n if normalize == True:\n A = normalize_(A,R_dico)\n A.fillna(0, inplace = True)\n \n return A, Y, R_dico\n\n","sub_path":"Part_2/data_preparation.py","file_name":"data_preparation.py","file_ext":"py","file_size_in_byte":32106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"516480946","text":"import socket\nimport threading\nimport time\nfrom PIL import Image\nimport pyscreenshot as ImageGrab\nimport zlib\nfrom pymouse import PyMouse\n\nIMAGE_SIZE = 1366, 768\nPORT_NUMBER = 6666\n\ndef capscreen():\n\t#return zlib.compress(ImageGrab.grab().resize(IMAGE_SIZE).tobytes())\n\treturn ImageGrab.grab().resize(IMAGE_SIZE).tobytes()\n\nclass Server():\n\tdef __init__(self):\n\t\tself.running = True\n\t\tself.socket = None\n\t\tself.client_socket = None\n\t\tself.client_address = None\n\t\tself.mouse = PyMouse()\n\n\tdef run(self):\n\t\tself.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.socket.bind(('localhost', PORT_NUMBER))\n\t\tself.socket.listen(1) # max 1 connection for now\n\t\tprint('Waiting for connections...')\n\t\tself.client_socket, self.client_address = self.socket.accept()\n\t\tprint('connected to ' + str(self.client_address))\n\t\tthreading.Thread(target=self.receive).start()\n\t\t#self.client_socket.send(str(IMAGE_SIZE).encode()) #sending dimensions\n\t\twhile self.running:\n\t\t\tself.sendcap()\n\n\tdef sendcap(self):\n\t\tss = capscreen()\n\t\tself.client_socket.send(ss)\n\n\tdef receive(self):\n\t\twhile self.running:\n\t\t\tmouse_location = (self.client_socket.recv(1024).decode())\n\t\t\tprint(mouse_location)\n\t\t\tif ',' in mouse_location:\n\t\t\t\tx, y = map(float, mouse_location.split(','))\n\t\t\t\tself.mouse.move(int(x), int(y))\n\n\tdef kill(self):\n\t\tself.running = False\n\t\tself.client_socket.close()\n\nif __name__ == '__main__':\n\tserver = Server()\n\tserver.run()\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"317862478","text":"#!/usr/bin/env python3\n\n'''\nConventions:\n\n Name Object Type\n -------------------------------------------------------\n N scalar int\n dataset 1D data vector array\n\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass profit:\n\n def pnl (entryPrice, exitPrice, size, side):\n\n '''\n Profit and Loss computation for futures, derivatives or options trading.\n \n Arguments:\n entryPrice/\n exitPrice prices in collateral currency\n side \"buy\"/\"sell\"\n size total contract size (leveraged) in collateral \n currency (usually dollars)\n \n Returns the absolute profit in collateral currency. \n '''\n\n if side == 'buy':\n PnL = (exitPrice-entryPrice) * size\n elif side == 'sell':\n PnL = (1/exitPrice-1/entryPrice) * size\n else:\n raise ValueError(f'Provided side must be \"buy\" or \"sell\", not {side}')\n return PnL\n\n def roi (entryPrice, exitPrice):\n\n '''\n Return on Invest.\n Returns percentage ROI by the formula\n (gain from investment − cost of investment) / cost of investment\n https://en.wikipedia.org/wiki/Return_on_investment\n '''\n\n return (exitPrice-entryPrice)/entryPrice\n\n\nclass regression:\n\n def chi2(arg, dataSet, model):\n '''\n Sum over all square elements is distributed according to \n the chi-squared distribution with k degrees of freedom.\n\n Returns a scalar (float).\n '''\n out = []\n for i in range(len(dataSet)):\n out.append( (dataSet[i]-model(arg, i))**2 )\n return sum(out)\n\n def linear (dataset, extrapolate=0):\n '''\n Improved linear regression fit.\n 1. The linear fit should intersect the dataSet's mean which lies at the x range median -> anchor point\n 2. Rotate the linear model around the anchor to minimize the chi squared. This saves iterating over a whole parameter.\n '''\n # define ranges\n if type(dataset) is np.ndarray:\n l = dataset.shape[0]\n else:\n l = len(dataset)\n m = np.mean(dataset) # from montecarlo\n Raise = ( dataset[-1] - dataset[0] ) / l # crude but fast estimate\n a_range = ( Raise*0.8, Raise*1.2) # +/-20% window\n a_step = ( a_range[1] - a_range[0] ) / 1000\n\n # anchor and rotation notation of a linear function\n def model(alpha, x):\n return alpha * (x-l/2) + m\n\n alpha = a_range[0]\n alpha_best = alpha\n c = statistics.meanSquaredDistance(alpha, dataset, model)\n while alpha < a_range[1]:\n alpha += a_step\n c_new = tools.chiSquared(alpha, dataset, model)\n if c_new < c:\n c = c_new\n alpha_best = alpha\n \n \n return [model(alpha_best, x) for x in range(l+extrapolate)]\n\n\nclass statistics:\n\n def drift (dataset):\n\n '''\n Percentage Drift Implementation.\n Computes the expectation value of the drift from logarithmic\n returns array.\n\n '''\n\n return statistics.mean(statistics.logReturns(dataset))\n\n def mean (dataset, *args, **kwargs):\n '''\n Numpy mean alias.\n '''\n return np.mean(dataset, *args, **kwargs)\n\n def meanSquaredDistance (dataset1, dataset2):\n '''\n Sum over all square elements is distributed according to \n the chi-squared distribution with k degrees of freedom.\n\n Returns a scalar (float).\n '''\n if dataset1.shape[0] != dataset2.shape[1]:\n raise ValueError(f'Provided datasets need to have different size, but {dataset1.shape[0]} and {dataset2.shape[1]} were provided!')\n out = []\n for i in range(len(dataset1)):\n out.append( (dataset1[i]-dataset2[i])**2 )\n return sum(out)\n\n def logReturns (dataset):\n\n '''\n Computes the logarithmic returns.\n Returns a 1D array\n numpyp.ndarray([ log(d[1]/d[0], log(d[2]/d[1]), ... ])\n '''\n\n out = []\n for i in range(len(dataset)-1):\n out.append(np.log(dataset[i+1]/dataset[i]))\n return np.array(out)\n\n def sampleVariance (dataset, correction=1):\n '''\n Sample variance.\n The correction factor refers to Bessel's correction 1/(n-1) (default).\n https://en.wikipedia.org/wiki/Variance#Sample_variance\n '''\n return np.std(dataset, ddof=correction)\n\n def standardDeviation (dataset):\n '''\n Return the standard deviation obtained from sample variance.\n '''\n\n return np.std(dataset)\n \n def volatility(dataset):\n\n '''\n Computes the expected/meaned percentage volatility per element.\n '''\n\n return statistics.standardDeviation(statistics.logReturns(dataset))\n\n\nclass indicators:\n\n def ema (dataset, window):\n\n '''\n Exponential Moving Average.\n Returns 1D array.\n '''\n\n a, out = 2/(window+1), [dataset[1]]\n for d in dataset[1:]: out.append(a*d+(1-a)*out[-1])\n return np.out\n \n def klinger(self, dataset, fastPeriod=34, slowPeriod=55, signalPeriod=13):\n \n '''\n Klinger Volume Oscillator Implementation.\n https://en.wikipedia.org/wiki/Volume_analysis\n\n Return\n [\n scalar value of recent difference (signal - slow),\n signal oscillator array,\n slow oscillator array\n ]\n '''\n \n osc, lastTrend, lastCm, lastDm, weightFast, weightSlow = [], 0, 0, 0, 2/(fastPeriod+1), 2/(slowPeriod+1)\n maFast, maSlow = dataset[1]['y'][-1], dataset[1]['y'][-1] # volumes\n for i in range(2,dataset.shape[0]):\n c = dataset[i]['y'] # ohlc candle\n v = dataset[i]['v']\n #print('c',c)\n dm = c[1] - c[2]\n currentTrend = c[1] + c[2] + c[3] - dataset[i-1]['y'][1] - dataset[i-1]['y'][2] - dataset[i-1]['y'][3]\n if currentTrend <= 0:\n TREND = -1\n else:\n TREND = 1\n if lastTrend == currentTrend:\n CM = lastCm + dm\n else:\n CM = lastDm + dm\n lastTrend = currentTrend\n lastCm = CM\n lastDm = dm\n if CM == 0:\n temp = -2\n else:\n temp = abs(2*(dm/CM-1))\n VF = v*temp*TREND*100\n maSlow = VF*weightSlow + maSlow*(1-weightSlow)\n maFast = VF*weightFast + maFast*(1-weightFast)\n osc.append(maFast-maSlow)\n oscSlow = indicators.ema(osc, signalPeriod)\n \n return [osc[-1]-oscSlow[-1], osc, oscSlow]\n\n def macd (dataset, signalPeriod=12, slowPeriod=26, macdPeriod=9):\n '''\n MACD implementation with typical period parameters.\n The most commonly used periods (default) are 12, 26, 9, respectively.\n Returns 1D array of difference between signal and slow oscillator.\n '''\n signal, slow = np.array(indicators.ema(dataset, signalPeriod)), np.array(indicator.ema(closed, slowPeriod))\n macd_sig = signal - slow\n macd_slow = np.array(indicators.ema(macd_sig, macdPeriod))\n return macd_sig - macd_slow\n\n def rsi (dataset, window=14):\n '''\n Relative Strength Index Implementation.\n Returns time-resolved 1D array with RSI values between 0 and 100.\n '''\n u,d,rsi=[],[],[]\n for i in range(1,len(dataset)):\n c = dataset[i] - dataset[i-1]\n if c > 0:\n u.append(c)\n d.append(0)\n elif c < 0:\n u.append(0)\n d.append(-c)\n else:\n u.append(0)\n d.append(0)\n u, d = indicators.ema(u,window), indicators.ema(d,window)\n for i in range(len(d)):\n if d[i]==0: # avoid zero division\n rsi.append(100)\n else:\n rsi.append(100*(1-1/(1+u[i]/d[i])))\n return np.array(rsi)\n\n def sma (dataset, window):\n\n '''\n Simple moving average implementation.\n '''\n \n out = []\n for i in range(window, dataset.shape[0]):\n out.append(np.mean(dataset[i-window:i]))\n return out\n\n\nclass plot:\n\n def chart (dataset, name='dataset', overlays={}, color='b', indicatorSets={}, predictionSets={}, \n timeset=None, savePath=None, title='Chart', renderLegend=True):\n\n # position is splitted at the end of dataset and where the prediction begins\n L = dataset.shape[0]\n\n # determine the max prediction length\n maxPredictionLength = 0\n for pred in predictionSets.values():\n if type(pred) is np.ndarray and pred.shape[0] > maxPredictionLength:\n maxPredictionLength = pred.shape[0]\n elif len(pred) > maxPredictionLength:\n maxPredictionLength = len(pred)\n \n # define all arrays\n if timeset:\n if timeset.shape[0] != L:\n raise ValueError(f'dataset and timeset have to match in length, provided {L} and {timeset.shape[0]} do not match!')\n x_array = timeset\n x_extra = np.array([str(i+1) for i in range(maxPredictionLength)])\n else:\n x_array = np.arange(stop=L)\n x_extra = np.arange(start=L, stop=L+maxPredictionLength)\n \n x_array = np.concatenate((x_array, x_extra))\n y_array = dataset\n \n\n # create pyplot figure and format it\n fig = plt.figure(dpi=150, facecolor='black', edgecolor='white')\n ax = fig.add_subplot(1, 1, 1)\n ax.set_title(title, c='white')\n ax.set_facecolor('black')\n # ax.set_xticks([])\n # ax.set_xlabel('')\n ax.yaxis.tick_right()\n ax.spines['right'].set_color('white')\n ax.xaxis.label.set_color('white')\n ax.tick_params(axis='y', colors='white')\n\n\n # add main dataset to chart\n ax.plot(x_array[:L], y_array, label=name, c=color)\n\n # add overlay datasets\n for name, overlay in overlays.items():\n if type(overlay) is list:\n L = len(overlay)\n else:\n L = overlay.shape[0]\n ax.plot(x_array[:L], overlay, label=name)\n\n # add all predictions accordingly\n for name, pred in predictionSets.items():\n ax.plot(x_array[L:L+pred.shape[0]], pred, label=name)\n \n\n # finally add the ledgend\n if renderLegend:\n plt.legend(facecolor='#383838', edgecolor='#ddd', labelcolor='linecolor')\n\n # save if enabled\n if (savePath):\n fig.savefig(savePath)\n else:\n plt.show()\n \n plt.close(fig)","sub_path":"ChartPeer-SDK/chartpeer/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":10983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"495727472","text":"from django.shortcuts import render\r\nfrom django.http import QueryDict, HttpRequest\r\nimport sqlite3\r\nimport requests\r\nimport json\r\n\r\n\r\ndef index(request, *a, **kw):\r\n\tif request.is_ajax():\r\n\t\tname = request.GET.get(\"name\", None)\r\n\t\tsrName = request.GET.get(\"srName\", None)\r\n\t\temail = request.GET.get(\"email\", None)\r\n\t\tphone = request.GET.get(\"phone\", None)\r\n\t\tsave_data(name, srName, email, phone)\r\n\r\n\treturn render(request, 'mainApp/index.html')\r\n\r\n\r\n#_______________------_______________#\r\n\r\n\r\ndef save_data(name, srName, email, phone):\r\n\t\r\n\tconn = sqlite3.connect('vk.db')\r\n\tc = conn.cursor()\r\n\t#c.execute('''create table data (name str, srName str, email str, phone str)''')\r\n\tc.execute(\"insert into data (name, srName, email, phone) values ('%s', '%s', '%s', '%s')\" % (name, srName, email, phone))\r\n\tconn.commit()\r\n\tc.execute('select * from data')\r\n\tfor row in c:\r\n\t \tprint(row)\r\n","sub_path":"hr/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"641808354","text":"from bokeh.plotting import figure, show, output_file\nfrom bokeh.models import ColumnDataSource, ColorBar, HoverTool, VBar, HBar, Quad\nfrom bokeh.models.transforms import CustomJSTransform\nfrom bokeh.models.mappers import LinearColorMapper\nfrom bokeh.models.widgets.tables import ScientificFormatter, DataTable\nfrom bokeh.transform import *\nfrom jinja2.defaults import VARIABLE_END_STRING\nfrom RootInteractive.Tools.aliTreePlayer import *\nfrom bokeh.layouts import *\nfrom bokeh.palettes import *\nfrom bokeh.io import push_notebook, curdoc\nimport logging\nimport pyparsing\nfrom IPython import get_ipython\nfrom bokeh.models.widgets import *\nfrom bokeh.models import CustomJS, ColumnDataSource\nfrom RootInteractive.Tools.pandaTools import pandaGetOrMakeColumn\nfrom RootInteractive.InteractiveDrawing.bokeh.bokehVisJS3DGraph import BokehVisJSGraph3D\nfrom RootInteractive.InteractiveDrawing.bokeh.HistogramCDS import HistogramCDS\nfrom RootInteractive.InteractiveDrawing.bokeh.HistoNdCDS import HistoNdCDS\nimport copy\nfrom RootInteractive.Tools.compressArray import compressCDSPipe\nfrom RootInteractive.InteractiveDrawing.bokeh.CDSCompress import CDSCompress\nfrom RootInteractive.InteractiveDrawing.bokeh.HistoStatsCDS import HistoStatsCDS\nfrom RootInteractive.InteractiveDrawing.bokeh.HistoNdProfile import HistoNdProfile\nfrom RootInteractive.InteractiveDrawing.bokeh.DownsamplerCDS import DownsamplerCDS\nfrom RootInteractive.InteractiveDrawing.bokeh.CDSAlias import CDSAlias\nfrom RootInteractive.InteractiveDrawing.bokeh.CustomJSNAryFunction import CustomJSNAryFunction\nfrom RootInteractive.InteractiveDrawing.bokeh.CDSJoin import CDSJoin\nfrom bokeh.transform import transform as bokehTransform\nimport re\n\n\n# tuple of Bokeh markers\nbokehMarkers = [\"square\", \"circle\", \"triangle\", \"diamond\", \"square_cross\", \"circle_cross\", \"diamond_cross\", \"cross\",\n \"dash\", \"hex\", \"invertedtriangle\", \"asterisk\", \"square_x\", \"x\"]\n\n# default tooltips for 1D and 2D histograms\ndefaultHistoTooltips = [\n (\"range\", \"[@{bin_left}, @{bin_right}]\"),\n (\"count\", \"@bin_count\")\n]\n\ndefaultHisto2DTooltips = [\n (\"range X\", \"[@{bin_bottom_0}, @{bin_top_0}]\"),\n (\"range Y\", \"[@{bin_bottom_1}, @{bin_top_1}]\"),\n (\"count\", \"@bin_count\")\n]\n\nBOKEH_DRAW_ARRAY_VAR_NAMES = [\"X\", \"Y\", \"varZ\", \"colorZvar\", \"marker_field\", \"legend_field\"]\n\ndef makeJScallbackOptimized(widgetDict, cdsOrig, cdsSel, **kwargs):\n options = {\n \"verbose\": 0,\n \"nPointRender\": 10000,\n \"cmapDict\": None,\n \"histogramList\": []\n }\n options.update(kwargs)\n\n code = \\\n \"\"\"\n const t0 = performance.now();\n const dataOrig = cdsOrig.data;\n const nPointRender = options.nPointRender;\n let nSelected=0;\n const precision = 0.000001;\n const size = cdsOrig.length;\n let isSelected = new Array(size);\n for(let i=0; i= widgetValue-0.5*widgetStep);\n isSelected[i] &= (col[i] <= widgetValue+0.5*widgetStep);\n }\n }\n if(widgetType == \"RangeSlider\"){\n const col = dataOrig[key];\n const low = widget.value[0];\n const high = widget.value[1];\n for(let i=0; i= low);\n isSelected[i] &= (col[i] <= high);\n }\n }\n if(widgetType == \"Select\"){\n const col = dataOrig[key];\n let widgetValue = widget.value;\n widgetValue = widgetValue === \"True\" ? true : widgetValue;\n widgetValue = widgetValue === \"False\" ? false : widgetValue;\n for(let i=0; i{\n if(val === \"True\") return true;\n if(val === \"False\") return false;\n if(!isNaN(val)) return Number(val);\n return val;\n });\n for(let i=0; iacc|Math.abs(cur-col[i])acc|cur===col[i],0);\n }\n isSelected[i] &= isOK;\n }\n }\n if(widgetType == \"CheckboxGroup\"){\n const col = dataOrig[key];\n const widgetValue = widget.value;\n for(let i=0; i 1) {\n let queryString='';\n let varString='';\n eval(varString+ 'var result = ('+ queryText+')');\n for(let i=0; i 0 && cdsSel != null){\n cdsSel.booleans = isSelected\n cdsSel.update()\n const t3 = performance.now();\n console.log(`Updating cds took ${t3 - t2} milliseconds.`);\n }\n if(options.cdsHistoSummary !== null){\n options.cdsHistoSummary.update();\n }\n console.log(\\\"nSelected:%d\\\",nSelected);\n \"\"\"\n if options[\"verbose\"] > 0:\n logging.info(\"makeJScallback:\\n\", code)\n # print(code)\n callback = CustomJS(args={'widgetDict': widgetDict, 'cdsOrig': cdsOrig, 'cdsSel': cdsSel, 'options': options},\n code=code)\n return callback\n\n\ndef __processBokehLayoutRow(layoutRow, figureList, layoutList, optionsMother, verbose=0):\n \"\"\"\n :param layoutRow:\n :param figureList:\n :param layoutList:\n :param optionsMother:\n :param verbose:\n :return:\n \"\"\"\n if verbose > 0: logging.info(\"Raw\", layoutRow)\n array = []\n layoutList.append(array)\n option = __processBokehLayoutOption(layoutRow)\n if verbose > 0: logging.info(\"Option\", option)\n for key in optionsMother:\n if not (key in option):\n option[key] = optionsMother[key]\n for idx, y in enumerate(layoutRow):\n if not y.isdigit(): continue\n try:\n fig = figureList[int(y)]\n except:\n logging.error(\"out of range index\", y)\n array.append(fig)\n if type(fig).__name__ == 'DataTable':\n continue\n if 'commonY' in option:\n if option[\"commonY\"] >= 0:\n try:\n fig.y_range = figureList[int(option[\"commonY\"])].y_range\n except ValueError:\n logging.info('Failed: to process option ' + option[\"commonY\"])\n continue\n except AttributeError:\n logging.info('Failed: to process option ' + option[\"commonY\"])\n continue\n if 'commonX' in option:\n if option[\"commonX\"] >= 0:\n try:\n fig.x_range = figureList[int(option[\"commonX\"])].x_range\n except ValueError:\n if verbose > 0: logging.info('Failed: to process option ' + option[\"commonX\"])\n continue\n except AttributeError:\n logging.info('Failed: to process option ')\n continue\n\n if (idx > 0) & ('y_visible' in option):\n fig.yaxis.visible = bool(option[\"y_visible\"]==1)\n if (idx == 0) & ('y_visible' in option):\n fig.yaxis.visible = bool(option[\"y_visible\"]!=0)\n if 'x_visible' in option:\n fig.xaxis.visible = bool(option[\"x_visible\"]==1)\n nCols = len(array)\n for fig in array:\n if type(fig).__name__ == 'Figure':\n if 'plot_width' in option:\n fig.plot_width = int(option[\"plot_width\"] / nCols)\n if 'plot_height' in option:\n fig.plot_height = int(option[\"plot_height\"])\n if type(fig).__name__ == 'DataTable':\n if 'plot_width' in option:\n fig.width = int(option[\"plot_width\"] / nCols)\n if 'plot_height' in option:\n fig.height = int(option[\"plot_height\"])\n if type(fig).__name__ == 'BokehVisJSGraph3D':\n if 'plot_width' in option:\n fig.width = int(option[\"plot_width\"] / nCols)\n if 'plot_height' in option:\n fig.height = int(option[\"plot_height\"])\n\n\ndef __processBokehLayoutOption(layoutOptions):\n \"\"\"\n :param layoutOptions:\n :return:\n \"\"\"\n # https://stackoverflow.com/questions/9305387/string-of-kwargs-to-kwargs\n options = {}\n for x in layoutOptions:\n if not (type(x) == str): continue\n if \"=\" in str(x): # one of the way to see if it's list\n try:\n k, v = x.split(\"=\")\n except ValueError:\n continue\n options[k] = v\n if v.isdigit():\n options[k] = int(v)\n else:\n try:\n options[k] = float(v)\n except ValueError:\n options[k] = v\n return options\n\n\ndef processBokehLayoutArray(widgetLayoutDesc, widgetArray):\n \"\"\"\n apply layout on plain array of bokeh figures, resp. interactive widgets\n :param widgetLayoutDesc: array desciption of layout\n :param widgetArray: input plain array of widgets/figures\n :return: combined figure\n Example: in tutorial/bokehDraw/makePandaWidgets.ipynb\n widgetLayoutDesc=[\n [0,1,2],\n [3,4,5],\n [6,7],\n {'width':10,'sizing_mode':'scale_width'}\n ]\n figureLayoutDesc=[\n [0,1,2, {'commonX':1,'y_visible':2, 'plot_height':300}],\n [2, {'commonX':1, 'y_visible':0}],\n {'width':10,'plot_height':200, 'sizing_mode':'scale_width'}\n ]\n \"\"\"\n if isinstance(widgetLayoutDesc, dict):\n tabs = []\n for i, iPanel in widgetLayoutDesc.items():\n tabs.append(Panel(child=processBokehLayoutArray(iPanel, widgetArray), title=i))\n return Tabs(tabs=tabs)\n options = {\n 'commonX': -1, 'commonY': -1,\n 'x_visible': 1, 'y_visible': 1,\n 'plot_width': -1, 'plot_height': -1,\n 'sizing_mode': 'scale_width',\n 'legend_visible': True\n }\n\n widgetRows = []\n nRows = len(widgetArray)\n # get/apply global options if exist\n if isinstance(widgetLayoutDesc[-1], dict):\n nRows -= 1\n options.update(widgetLayoutDesc[-1])\n widgetLayoutDesc = widgetLayoutDesc[0:-1]\n\n for rowWidget in widgetLayoutDesc:\n rowOptions = {}\n rowOptions.update(options)\n # patch local option\n if isinstance(rowWidget[-1], dict):\n rowOptions.update(rowWidget[-1])\n rowWidget = rowWidget[0:-1]\n rowWidgetArray0 = []\n for i, iWidget in enumerate(rowWidget):\n figure = widgetArray[iWidget]\n rowWidgetArray0.append(figure)\n if hasattr(figure, 'x_range'):\n if rowOptions['commonX'] >= 0:\n figure.x_range = widgetArray[int(rowOptions[\"commonX\"])].x_range\n if rowOptions['commonY'] >= 0:\n figure.y_range = widgetArray[int(rowOptions[\"commonY\"])].y_range\n if rowOptions['x_visible'] == 0:\n figure.xaxis.visible = False\n else:\n figure.xaxis.visible = True\n #figure.xaxis.visible = bool(rowOptions[\"x_visible\"])\n if rowOptions['y_visible'] == 0:\n figure.yaxis.visible = False\n if rowOptions['y_visible'] == 2:\n if i > 0: figure.yaxis.visible = False\n if hasattr(figure, 'plot_width'):\n if rowOptions[\"plot_width\"] > 0:\n plot_width = int(rowOptions[\"plot_width\"] / len(rowWidget))\n figure.plot_width = plot_width\n if rowOptions[\"plot_height\"] > 0:\n figure.plot_height = rowOptions[\"plot_height\"]\n if figure.legend:\n figure.legend.visible = rowOptions[\"legend_visible\"]\n if type(figure).__name__ == \"DataTable\":\n figure.height = int(rowOptions[\"plot_height\"])\n if type(figure).__name__ == \"BokehVisJSGraph3D\":\n if rowOptions[\"plot_width\"] > 0:\n plot_width = int(rowOptions[\"plot_width\"] / len(rowWidget))\n figure.width = plot_width\n if rowOptions[\"plot_height\"] > 0:\n figure.height = rowOptions[\"plot_height\"]\n\n rowWidgetArray = row(rowWidgetArray0, sizing_mode=rowOptions['sizing_mode'])\n widgetRows.append(rowWidgetArray)\n return column(widgetRows, sizing_mode=options['sizing_mode'])\n\n\ndef gridplotRow(figList0, **options):\n \"\"\"\n Make gridplot -resizing properly rows\n\n :param figList0: input array of figures\n :param options:\n :return:\n \"\"\"\n figList = []\n for frow in figList0:\n figList.append([row(frow)])\n pAll = gridplot(figList, **options)\n return pAll\n\n\ndef makeBokehDataTable(dataFrame, source, include, exclude, **kwargs):\n \"\"\"\n Create widget for datatable\n\n :param dataFrame:\n input data frame\n :param source:\n :return:\n \"\"\"\n columns = []\n for col in dataFrame.columns.values:\n isOK = True\n if hasattr(dataFrame, \"meta\"):\n title = dataFrame.meta.metaData.get(col + \".OrigName\", col);\n else:\n title = col\n if include:\n isOK = False\n if re.match(include, col):\n isOK = True\n if exclude:\n if re.match(exclude, col):\n isOK = False\n if isOK:\n columns.append(TableColumn(field=col, title=title))\n data_table = DataTable(source=source, columns=columns, **kwargs)\n return data_table\n\n\ndef makeBokehHistoTable(histoDict, rowwise=False, **kwargs):\n histo_names = []\n histo_columns = []\n bin_centers = []\n edges_left = []\n edges_right = []\n sources = []\n quantiles = []\n compute_quantile = []\n sum_range = []\n\n if \"formatter\" in kwargs:\n formatter = kwargs[\"formatter\"]\n else:\n formatter = ScientificFormatter(precision=3)\n\n for iHisto in histoDict:\n if histoDict[iHisto][\"type\"] == \"histogram\":\n histo_names.append(histoDict[iHisto][\"name\"])\n histo_columns.append(\"bin_count\")\n bin_centers.append(\"bin_center\")\n edges_left.append(\"bin_bottom\")\n edges_right.append(\"bin_top\")\n sources.append(histoDict[iHisto][\"cds\"])\n compute_quantile.append(True)\n if \"quantiles\" in histoDict[iHisto]:\n quantiles += histoDict[iHisto][\"quantiles\"]\n if \"sum_range\" in histoDict[iHisto]:\n sum_range += histoDict[iHisto][\"sum_range\"]\n elif histoDict[iHisto][\"type\"] in [\"histo2d\", \"histoNd\"]:\n for i in range(len(histoDict[iHisto][\"variables\"])):\n histo_names.append(histoDict[iHisto][\"name\"]+\"_\"+str(i))\n histo_columns.append(\"bin_count\")\n bin_centers.append(\"bin_center_\"+str(i))\n edges_left.append(\"bin_bottom_\"+str(i))\n edges_right.append(\"bin_top_\"+str(i))\n sources.append(histoDict[iHisto][\"cds\"])\n compute_quantile.append(False)\n\n quantiles = [*{*quantiles}]\n sum_range_uniq = []\n for i in sum_range:\n if i not in sum_range_uniq:\n sum_range_uniq.append(i)\n stats_cds = HistoStatsCDS(sources=sources, names=histo_names, bincount_columns=histo_columns, bin_centers=bin_centers,\n quantiles=quantiles, compute_quantile=compute_quantile, rowwise=rowwise,\n edges_left=edges_left, edges_right=edges_right, sum_range=sum_range_uniq)\n if rowwise:\n columns = [TableColumn(field=\"description\")]\n for i in histo_names:\n columns.append(TableColumn(field=i, formatter=formatter))\n data_table = DataTable(source=stats_cds, columns=columns, **kwargs)\n else:\n columns = [TableColumn(field=\"name\"), TableColumn(field=\"mean\", formatter=formatter),\n TableColumn(field=\"std\", formatter=formatter), TableColumn(field=\"entries\", formatter=formatter)]\n for (i, iQuantile) in enumerate(quantiles):\n columns.append(TableColumn(field=\"quantile_\"+format(i), title=\"Quantile \"+format(iQuantile),\n formatter=formatter))\n for (i, iBox) in enumerate(sum_range_uniq):\n columns.append(TableColumn(field=\"sum_\"+format(i), title=\"Σ(\"+format(iBox[0])+\",\"+format(iBox[1])+\")\",\n formatter=formatter))\n columns.append(TableColumn(field=\"sum_normed_\"+format(i), title=\"Σ_normed(\"+format(iBox[0])+\",\"+format(iBox[1])+\")\",\n formatter=formatter))\n data_table = DataTable(source=stats_cds, columns=columns, **kwargs)\n return stats_cds, data_table\n\n\ndef bokehDrawArray(dataFrame, query, figureArray, histogramArray=[], parameterArray=[], jsFunctionArray=[], aliasArray=[], sourceArray=None, **kwargs):\n \"\"\"\n Wrapper bokeh draw array of figures\n\n :param dataFrame: - input data frame\n :param query: - query\n :param figureArray: - figure array\n :param histogramArray: - (optional) histogram array\n :param parameterArray: - (optional) parameter array for parameters controllable on client\n :param kwargs:\n :return:\n variable list:\n * pAll\n * handle\n * source\n * plotArray\n\n See example test:\n RootInteractive/InteractiveDrawing/bokeh/test_bokehDrawSA.py\n \"\"\"\n options = {\n 'line': -1,\n 'size': 2,\n 'tools': 'pan,box_zoom, wheel_zoom,box_select,lasso_select,reset,save',\n 'tooltips': [],\n 'histoTooltips': defaultHistoTooltips,\n 'histo2dTooltips': defaultHisto2DTooltips,\n 'y_axis_type': 'auto',\n 'x_axis_type': 'auto',\n 'plot_width': 600,\n 'plot_height': 400,\n 'errX': '',\n 'errY': '',\n 'commonX': 0,\n 'commonY': 0,\n 'ncols': -1,\n 'layout': '',\n 'widgetLayout': '',\n 'palette': Spectral6,\n \"markers\": bokehMarkers,\n \"colors\": 'Category10',\n \"rescaleColorMapper\": False,\n \"filter\": '',\n 'doDraw': False,\n \"legend_field\": None,\n \"legendTitle\": None,\n 'nPointRender': 10000,\n \"nbins\": 10,\n \"weights\": None,\n \"histo2d\": False,\n \"range\": None,\n \"flip_histogram_axes\": False,\n \"show_histogram_error\": False,\n \"arrayCompression\": None,\n \"removeExtraColumns\": True,\n \"cdsDict\": {},\n \"xAxisTitle\": None,\n \"yAxisTitle\": None,\n \"plotTitle\": None\n }\n options.update(kwargs)\n if query is not None:\n dfQuery = dataFrame.query(query).copy()\n if hasattr(dataFrame, 'metaData'):\n dfQuery.metaData = dataFrame.metaData\n logging.info(dfQuery.metaData)\n else:\n dfQuery = dataFrame.copy()\n # Check/resp. load derived variables\n\n if isinstance(figureArray[-1], dict):\n options.update(figureArray[-1])\n\n if sourceArray is not None:\n histogramArray = histogramArray + sourceArray\n\n dfQuery, histogramDict, downsamplerColumns, \\\n columnNameDict, parameterDict, customJsColumns = makeDerivedColumns(dfQuery, figureArray, histogramArray=histogramArray,\n parameterArray=parameterArray, aliasArray=aliasArray, options=options)\n\n paramDict = bokehMakeParameters(parameterArray, histogramArray, figureArray, variableList=list(columnNameDict))\n\n jsFunctionDict = {}\n for i in jsFunctionArray:\n customJsArgList = {}\n if isinstance(i[\"parameters\"], list):\n for j in i[\"parameters\"]:\n customJsArgList[j] = paramDict[j][\"value\"]\n if \"v_func\" in i:\n jsFunctionDict[i[\"name\"]] = CustomJSNAryFunction(parameters=customJsArgList, fields=i[\"fields\"], v_func=i[\"v_func\"])\n else:\n jsFunctionDict[i[\"name\"]] = CustomJSNAryFunction(parameters=customJsArgList, fields=i[\"fields\"], func=i[\"func\"])\n if isinstance(i[\"parameters\"], list):\n for j in i[\"parameters\"]:\n paramDict[j][\"subscribed_events\"].append([\"value\", CustomJS(args={\"mapper\":jsFunctionDict[i[\"name\"]], \"param\":j}, code=\"\"\"\n mapper.parameters[param] = this.value\n mapper.update_args()\n \"\"\")])\n\n aliasDict = {\"\":{}}\n aliasSet = set()\n for i in aliasArray:\n customJsArgList = {}\n transform = None\n if customJsColumns[i[\"name\"]]:\n aliasSet.add(i[\"name\"])\n if \"transform\" in i:\n if i[\"transform\"] in jsFunctionDict:\n if \"context\" in i:\n if i[\"context\"] not in aliasDict:\n aliasDict[i[\"context\"]] = {}\n aliasDict[i[\"context\"]][i[\"name\"]] = {\"fields\": i[\"variables\"], \"transform\": jsFunctionDict[i[\"transform\"]]}\n else:\n aliasDict[\"\"][i[\"name\"]] = {\"fields\": i[\"variables\"], \"transform\": jsFunctionDict[i[\"transform\"]]}\n else:\n if \"parameters\" in i:\n for j in i[\"parameters\"]:\n customJsArgList[j] = paramDict[j][\"value\"]\n if \"v_func\" in i:\n transform = CustomJSNAryFunction(parameters=customJsArgList, fields=i[\"variables\"], v_func=i[\"v_func\"])\n else:\n transform = CustomJSNAryFunction(parameters=customJsArgList, fields=i[\"variables\"], func=i[\"func\"])\n if \"context\" in i:\n if i[\"context\"] not in aliasDict:\n aliasDict[i[\"context\"]] = {}\n aliasDict[i[\"context\"]][i[\"name\"]] = {\"fields\": i[\"variables\"], \"transform\": transform}\n else:\n aliasDict[\"\"][i[\"name\"]] = {\"fields\": i[\"variables\"], \"transform\": transform}\n if \"parameters\" in i:\n for j in i[\"parameters\"]:\n paramDict[j][\"subscribed_events\"].append([\"value\", CustomJS(args={\"mapper\":transform, \"param\":j}, code=\"\"\"\n mapper.parameters[param] = this.value\n mapper.update_args()\n \"\"\")])\n\n\n plotArray = []\n colorAll = all_palettes[options['colors']]\n colorMapperDict = {}\n cdsHistoSummary = None\n\n for i in dfQuery.keys():\n columnNameDict[i] = i\n\n columnNameDict.update(aliasDict[\"\"])\n\n cdsFull = None\n if options['arrayCompression'] is not None:\n print(\"compressCDSPipe\")\n cdsCompress0, sizeMap= compressCDSPipe(dfQuery,options[\"arrayCompression\"],1)\n cdsCompress=CDSCompress(inputData=cdsCompress0, sizeMap=sizeMap)\n cdsFull=cdsCompress\n else:\n try:\n cdsFull = ColumnDataSource(dfQuery)\n except:\n logging.error(\"Invalid source:\", cdsFull)\n\n if aliasDict[\"\"]:\n cdsFull = CDSAlias(source=cdsFull, mapping=columnNameDict)\n\n if downsamplerColumns:\n source = DownsamplerCDS(source=cdsFull, nPoints=options['nPointRender'], selectedColumns=downsamplerColumns)\n else:\n source = None\n\n histogramDict, histoList = bokehMakeHistogramCDS(dfQuery, cdsFull, histogramArray, histogramDict, aliasDict=aliasDict)\n cdsDict = options[\"cdsDict\"]\n\n profileList = []\n for i in histogramDict:\n if i not in cdsDict:\n cdsDict[i] = histogramDict[i][\"cds\"]\n\n for i, variables in enumerate(figureArray):\n logging.info(\"%d\\t%s\", i, variables)\n if isinstance(variables, dict):\n continue\n if variables[0] == 'table':\n TOptions = {\n 'include': '',\n 'exclude': ''\n }\n if len(variables) > 1:\n TOptions.update(variables[1])\n plotArray.append(makeBokehDataTable(dfQuery, source, TOptions['include'], TOptions['exclude']))\n continue\n if variables[0] == 'tableHisto':\n TOptions = {'rowwise': False}\n if len(variables) > 1:\n TOptions.update(variables[1])\n cdsHistoSummary, tableHisto = makeBokehHistoTable(histogramDict, rowwise=TOptions[\"rowwise\"])\n plotArray.append(tableHisto)\n continue\n xAxisTitle = \"\"\n yAxisTitle = \"\"\n # zAxisTitle = \"\"\n plotTitle = \"\"\n\n for varY in variables[1]:\n if hasattr(dfQuery, \"meta\") and '.' not in varY:\n yAxisTitle += dfQuery.meta.metaData.get(varY + \".AxisTitle\", varY)\n else:\n dfQuery, varNameY, cds_name = getOrMakeColumn(dfQuery, varY, None, aliasSet)\n yAxisTitle += getHistogramAxisTitle(histogramDict, varNameY, cds_name, False)\n yAxisTitle += ','\n for varX in variables[0]:\n if hasattr(dfQuery, \"meta\") and '.' not in varX:\n xAxisTitle += dfQuery.meta.metaData.get(varX + \".AxisTitle\", varX)\n else:\n dfQuery, varNameX, cds_name = getOrMakeColumn(dfQuery, varX, None, aliasSet)\n xAxisTitle += getHistogramAxisTitle(histogramDict, varNameX, cds_name, False)\n xAxisTitle += ','\n xAxisTitle = xAxisTitle[:-1]\n yAxisTitle = yAxisTitle[:-1]\n\n optionLocal = copy.copy(options)\n if len(variables) > 2:\n logging.info(\"Option %s\", variables[2])\n optionLocal.update(variables[2])\n\n if optionLocal[\"xAxisTitle\"] is not None:\n xAxisTitle = optionLocal[\"xAxisTitle\"]\n if optionLocal[\"yAxisTitle\"] is not None:\n yAxisTitle = optionLocal[\"yAxisTitle\"]\n plotTitle += yAxisTitle + \" vs \" + xAxisTitle\n if optionLocal[\"plotTitle\"] is not None:\n plotTitle = optionLocal[\"plotTitle\"]\n\n if 'varZ' in optionLocal.keys():\n dfQuery, varNameY, cds_name = getOrMakeColumn(dfQuery, variables[1][0], None, aliasSet)\n _, varNameX, cds_name = getOrMakeColumn(dfQuery, variables[0][0], cds_name, aliasSet)\n _, varNameZ, cds_name = getOrMakeColumn(dfQuery, optionLocal['varZ'], cds_name, aliasSet)\n _, varNameColor, cds_name = getOrMakeColumn(dfQuery, optionLocal['colorZvar'], cds_name, aliasSet)\n options3D = {\"width\": \"99%\", \"height\": \"99%\"}\n cds_used = source\n if cds_name is not None:\n cds_used = cdsDict[cds_name]\n plotI = BokehVisJSGraph3D(width=options['plot_width'], height=options['plot_height'],\n data_source=cds_used, x=varNameX, y=varNameY, z=varNameZ, style=varNameColor,\n options3D=options3D)\n plotArray.append(plotI)\n continue\n else:\n figureI = figure(plot_width=options['plot_width'], plot_height=options['plot_height'], title=plotTitle,\n tools=options['tools'], x_axis_type=options['x_axis_type'],\n y_axis_type=options['y_axis_type'])\n\n figureI.xaxis.axis_label = xAxisTitle\n figureI.yaxis.axis_label = yAxisTitle\n\n # graphArray=drawGraphArray(df, variables)\n lengthX = len(variables[0])\n lengthY = len(variables[1])\n length = max(len(variables[0]), len(variables[1]))\n color_bar = None\n mapperC = None\n cmap_cds_name = None\n if 'colorZvar' in optionLocal:\n #TODO: Support multiple color mappers, add more options, possibly use custom color mapper to improve performance\n #So far, parametrized colZ is only supported for the main CDS\n logging.info(\"%s\", optionLocal[\"colorZvar\"])\n colorZVar = optionLocal['colorZvar']\n if colorZVar in paramDict:\n colorZVar = paramDict[colorZVar]['value']\n _, varColor, cmap_cds_name = getOrMakeColumn(dfQuery, colorZVar, None, aliasSet)\n low = 0\n high = 1\n if cmap_cds_name is None:\n low = min(dfQuery[varColor])\n high=max(dfQuery[varColor])\n if \"cmapLow\" in optionLocal:\n low = optionLocal[\"cmapLow\"]\n if \"cmapHigh\" in optionLocal:\n high = optionLocal[\"cmapHigh\"] \n if optionLocal[\"rescaleColorMapper\"] or optionLocal[\"colorZvar\"] in paramDict:\n if optionLocal[\"colorZvar\"] in colorMapperDict:\n mapperC = colorMapperDict[optionLocal[\"colorZvar\"]]\n else:\n mapperC = {\"field\": varColor, \"transform\": LinearColorMapper(palette=optionLocal['palette'])}\n colorMapperDict[optionLocal[\"colorZvar\"]] = mapperC\n else:\n mapperC = linear_cmap(field_name=varColor, palette=optionLocal['palette'], low=low, high=high)\n cds_used = source\n if cmap_cds_name is not None:\n cds_used = cdsDict[cmap_cds_name]\n # This is really hacky, will probably be removed when ND histogram joins start working\n if cmap_cds_name in histogramDict and histogramDict[cmap_cds_name]['type'] == 'profile' and varColor.split('_')[0] == 'bin':\n histogramDict[cmap_cds_name]['cds'].js_on_change('change', CustomJS(code=\"\"\"\n const col = this.data[field]\n const isOK = this.data.isOK\n const low = col.map((x,i) => isOK[i] ? col[i] : Infinity).reduce((acc, cur)=>Math.min(acc,cur), Infinity);\n const high = col.map((x,i) => isOK[i] ? col[i] : -Infinity).reduce((acc, cur)=>Math.max(acc,cur), -Infinity);\n cmap.high = high;\n cmap.low = low;\n \"\"\", args={\"field\": mapperC[\"field\"], \"cmap\": mapperC[\"transform\"]})) \n axis_title = getHistogramAxisTitle(histogramDict, varColor, cmap_cds_name)\n color_bar = ColorBar(color_mapper=mapperC['transform'], width=8, location=(0, 0), title=axis_title)\n if optionLocal['colorZvar'] in paramDict:\n paramDict[optionLocal['colorZvar']][\"subscribed_events\"].append([\"value\", color_bar, \"title\"])\n\n hover_tool_renderers = {}\n\n figure_cds_name = None\n\n for i in range(0, length):\n cds_name = None\n if variables[1][i % lengthY] in histogramDict:\n iHisto = histogramDict[variables[1][i % lengthY]]\n if iHisto[\"type\"] == \"histogram\":\n dfQuery, varNameY = pandaGetOrMakeColumn(dfQuery, iHisto[\"variables\"][0])\n elif iHisto[\"type\"] == \"histo2d\":\n dfQuery, varNameX = pandaGetOrMakeColumn(dfQuery, iHisto[\"variables\"][0])\n dfQuery, varNameY = pandaGetOrMakeColumn(dfQuery, iHisto[\"variables\"][1])\n else:\n dfQuery, varNameX, cds_name = getOrMakeColumn(dfQuery, variables[0][i % lengthX], cds_name, aliasSet)\n dfQuery, varNameY, cds_name = getOrMakeColumn(dfQuery, variables[1][i % lengthY], cds_name, aliasSet)\n if mapperC is not None and cds_name == cmap_cds_name:\n color = mapperC\n else:\n color = colorAll[max(length, 4)][i]\n if 'color' in optionLocal:\n color=optionLocal['color']\n try:\n marker = optionLocal['markers'][i]\n except:\n marker = optionLocal['markers']\n markerSize = optionLocal['size']\n if markerSize in paramDict:\n markerSize = paramDict[markerSize]['value']\n if len(variables) > 2:\n logging.info(\"Option %s\", variables[2])\n optionLocal.update(variables[2])\n varX = variables[0][i % lengthX]\n varY = variables[1][i % lengthY]\n cds_used = source\n if cds_name is not None:\n cds_used = cdsDict[cds_name]\n\n if varY in histogramDict:\n histoHandle = histogramDict[varY]\n if histoHandle[\"type\"] == \"histogram\":\n colorHisto = colorAll[max(length, 4)][i]\n addHistogramGlyph(figureI, histoHandle, marker, colorHisto, markerSize, optionLocal)\n elif histoHandle[\"type\"] == \"histo2d\":\n addHisto2dGlyph(figureI, varNameX, varNameY, histoHandle, colorMapperDict, color, marker, dfQuery,\n optionLocal)\n else:\n # zAxisTitle +=varColor + \",\"\n # view = CDSView(source=source, filters=[GroupFilter(column_name=optionLocal['filter'], group=True)])\n drawnGlyph = None\n colorMapperCallback = \"\"\"\n glyph.fill_color={...glyph.fill_color, field:this.value}\n glyph.line_color={...glyph.line_color, field:this.value}\n \"\"\"\n if optionLocal[\"legend_field\"] is None:\n x_label = getHistogramAxisTitle(histogramDict, varNameX, cds_name)\n y_label = getHistogramAxisTitle(histogramDict, varNameY, cds_name)\n drawnGlyph = figureI.scatter(x=varNameX, y=varNameY, fill_alpha=1, source=cds_used, size=markerSize,\n color=color, marker=marker, legend_label=y_label + \" vs \" + x_label)\n else:\n drawnGlyph = figureI.scatter(x=varNameX, y=varNameY, fill_alpha=1, source=cds_used, size=markerSize,\n color=color, marker=marker, legend_field=optionLocal[\"legend_field\"])\n if \"colorZvar\" in optionLocal and optionLocal[\"colorZvar\"] in paramDict:\n if len(color[\"transform\"].domain) == 0:\n color[\"transform\"].domain = [(drawnGlyph, color[\"field\"])]\n # HACK: This changes the color mapper's domain, which only consists of one field. \n paramDict[optionLocal['colorZvar']][\"subscribed_events\"].append([\"value\", CustomJS(args={\"transform\": color[\"transform\"]}, code=\"\"\"\n transform.domain[0] = [transform.domain[0][0], this.value]\n transform.change.emit()\n \"\"\")])\n paramDict[optionLocal['colorZvar']][\"subscribed_events\"].append([\"value\", CustomJS(args={\"glyph\": drawnGlyph.glyph}, code=colorMapperCallback)])\n if optionLocal['size'] in paramDict:\n paramDict[optionLocal['size']][\"subscribed_events\"].append([\"value\", drawnGlyph.glyph, \"size\"])\n if cds_name is None:\n if \"\" not in hover_tool_renderers:\n hover_tool_renderers[\"\"] = []\n hover_tool_renderers[\"\"].append(drawnGlyph)\n elif cds_name in histogramDict:\n if histogramDict[cds_name][\"type\"] == \"profile\":\n if cds_name not in hover_tool_renderers:\n hover_tool_renderers[cds_name] = []\n hover_tool_renderers[cds_name].append(drawnGlyph)\n if ('errX' in optionLocal.keys()) and (optionLocal['errX'] != '') and (cds_name is None):\n dfQuery, varErrX = pandaGetOrMakeColumn(dfQuery, optionLocal['errX'])\n errWidthX = bokehTransform(varErrX, CustomJSTransform(v_func=\"return xs.map((x)=>2*x)\"))\n errorX = VBar(top=varNameY, bottom=varNameY, width=errWidthX, x=varNameX, line_color=color)\n if \"colorZvar\" in optionLocal and optionLocal[\"colorZvar\"] in paramDict:\n paramDict[optionLocal['colorZvar']][\"subscribed_events\"].append([\"value\", CustomJS(args={\"glyph\": errorX}, code=colorMapperCallback)])\n figureI.add_glyph(source, errorX)\n if ('errY' in optionLocal.keys()) and (optionLocal['errY'] != '') and (cds_name is None):\n dfQuery, varErrY = pandaGetOrMakeColumn(dfQuery, optionLocal['errY'])\n errWidthY = bokehTransform(varErrY, CustomJSTransform(v_func=\"return xs.map((x)=>2*x)\"))\n errorY = HBar(left=varNameX, right=varNameX, height=errWidthY, y=varNameY, line_color=color)\n if \"colorZvar\" in optionLocal and optionLocal[\"colorZvar\"] in paramDict:\n paramDict[optionLocal['colorZvar']][\"subscribed_events\"].append([\"value\", CustomJS(args={\"glyph\": errorY}, code=colorMapperCallback)])\n figureI.add_glyph(source, errorY)\n # errors = Band(base=varNameX, lower=varNameY+\"_lower\", upper=varNameY+\"_upper\",source=source)\n # figureI.add_layout(errors)\n if figure_cds_name is None:\n figure_cds_name = cds_name\n elif figure_cds_name != cds_name:\n figure_cds_name = \"\"\n\n if color_bar != None:\n figureI.add_layout(color_bar, 'right')\n for iCds, iRenderers in hover_tool_renderers.items():\n if iCds == \"\":\n tooltips = optionLocal[\"tooltips\"]\n elif iCds in histogramDict and histogramDict[iCds][\"type\"] == \"profile\":\n profile_description = histogramDict[iCds]\n tooltips = defaultNDProfileTooltips(profile_description[\"variables\"], profile_description[\"axis\"],\n profile_description[\"quantiles\"], profile_description[\"sum_range\"])\n figureI.add_tools(HoverTool(tooltips=tooltips, renderers=iRenderers))\n if figureI.legend:\n figureI.legend.click_policy = \"hide\"\n if optionLocal[\"legendTitle\"] is not None:\n logging.warn(\"legendTitle is deprecated, please use the 'title' field in 'legend_options'\")\n figureI.legend.title = optionLocal[\"legendTitle\"]\n elif figure_cds_name != \"\":\n figureI.legend.title = figure_cds_name\n if 'legend_options' in optionLocal:\n legend_options = optionLocal['legend_options'].copy()\n legend_options_parameters = {}\n for i, iOption in legend_options.items():\n if iOption in parameterDict:\n legend_options_parameters[i] = paramDict[iOption]\n for i, iOption in legend_options_parameters.items():\n legend_options[i] = iOption['value']\n figureI.legend.update(**legend_options)\n for i, iOption in legend_options_parameters.items():\n iOption[\"subscribed_events\"].append([\"value\", figureI.legend[0], i]) \n # zAxisTitle=zAxisTitle[:-1]\n # if(len(zAxisTitle)>0):\n # plotTitle += \" Color:\" + zAxisTitle\n # figureI.title = plotTitle\n plotArray.append(figureI)\n if isinstance(options['layout'], list) or isinstance(options['layout'], dict):\n pAll = processBokehLayoutArray(options['layout'], plotArray)\n layoutList = [pAll]\n if options['doDraw']:\n show(pAll)\n return pAll, source, layoutList, dfQuery, colorMapperDict, cdsFull, histoList, cdsHistoSummary, profileList, paramDict, aliasDict\n\n\ndef addHisto2dGlyph(fig, x, y, histoHandle, colorMapperDict, color, marker, dfQuery, options):\n visualization_type = \"heatmap\"\n if \"visualization_type\" in options:\n visualization_type = options[\"visualization_type\"]\n cdsHisto = histoHandle[\"cds\"]\n\n tooltips = None\n if \"tooltips\" in histoHandle:\n tooltips = histoHandle[\"tooltips\"]\n elif \"tooltips\" in options:\n tooltips = options[\"histo2dTooltips\"]\n\n if visualization_type == \"heatmap\":\n # Flipping histogram axes probably doesn't make sense in this case.\n mapperC = {\"field\": \"bin_count\", \"transform\": LinearColorMapper(palette=options['palette'])}\n color_bar = ColorBar(color_mapper=mapperC['transform'], width=8, location=(0, 0),\n title=\"Count\")\n histoGlyph = Quad(left=\"bin_bottom_0\", right=\"bin_top_0\", bottom=\"bin_bottom_1\", top=\"bin_top_1\",\n fill_color=mapperC)\n histoGlyphRenderer = fig.add_glyph(cdsHisto, histoGlyph)\n fig.add_layout(color_bar, 'right')\n elif visualization_type == \"colZ\":\n mapperC = {\"field\": \"bin_count\", \"transform\": LinearColorMapper(palette=options['palette'])}\n color_bar = ColorBar(color_mapper=mapperC['transform'], width=8, location=(0, 0),\n title=y)\n if options[\"legend_field\"] is None:\n histoGlyphRenderer = fig.scatter(x=\"bin_center_0\", y=\"bin_count\", fill_alpha=1, source=cdsHisto, size=options['size'],\n color=mapperC, marker=marker, legend_label=\"Histogram of \" + x)\n else:\n histoGlyphRenderer = fig.scatter(x=\"bin_center_0\", y=\"bin_count\", fill_alpha=1, source=cdsHisto, size=options['size'],\n color=mapperC, marker=marker, legend_field=options[\"legend_field\"])\n if \"show_histogram_error\" in options:\n errorbar = VBar(x=\"bin_center_0\", width=0, top=\"errorbar_high\", bottom=\"errorbar_low\", line_color=mapperC)\n fig.add_glyph(cdsHisto, errorbar)\n fig.add_layout(color_bar, 'right')\n if tooltips is not None:\n fig.add_tools(HoverTool(renderers=[histoGlyphRenderer], tooltips=tooltips))\n\n\ndef addHistogramGlyph(fig, histoHandle, marker, colorHisto, size, options):\n cdsHisto = histoHandle[\"cds\"]\n if 'color' in options:\n colorHisto = options['color']\n tooltips = None\n if \"tooltips\" in histoHandle:\n tooltips = histoHandle[\"tooltips\"]\n elif \"tooltips\" in options:\n tooltips = options[\"histoTooltips\"]\n visualization_type = \"points\"\n histoGlyphRenderer = None\n if \"visualization_type\" in options:\n visualization_type = options[\"visualization_type\"]\n if visualization_type == \"bars\":\n if options['flip_histogram_axes']:\n histoGlyph = Quad(left=0, right=\"bin_count\", bottom=\"bin_left\", top=\"bin_right\", fill_color=colorHisto)\n else:\n histoGlyph = Quad(left=\"bin_left\", right=\"bin_right\", bottom=0, top=\"bin_count\", fill_color=colorHisto)\n histoGlyphRenderer = fig.add_glyph(cdsHisto, histoGlyph)\n elif visualization_type == \"points\":\n if options['flip_histogram_axes']:\n histoGlyphRenderer = fig.scatter(y=\"bin_center\", x=\"bin_count\", color=colorHisto, marker=marker, source=cdsHisto, size=size,\n legend_label=histoHandle[\"variables\"][0])\n if \"show_histogram_error\" in options:\n errorbar = HBar(y=\"bin_center\", height=0, left=\"errorbar_low\", right=\"errorbar_high\", line_color=colorHisto)\n fig.add_glyph(cdsHisto, errorbar)\n else:\n histoGlyphRenderer = fig.scatter(x=\"bin_center\", y=\"bin_count\", color=colorHisto, marker=marker, source=cdsHisto, size=size,\n legend_label=histoHandle[\"variables\"][0])\n if \"show_histogram_error\" in options:\n errorbar = VBar(x=\"bin_center\", width=0, top=\"errorbar_high\", bottom=\"errorbar_low\", line_color=colorHisto)\n fig.add_glyph(cdsHisto, errorbar)\n if tooltips is not None:\n fig.add_tools(HoverTool(renderers=[histoGlyphRenderer], tooltips=tooltips))\n\ndef makeBokehSliderWidget(df: pd.DataFrame, isRange: bool, params: list, paramDict: dict, **kwargs):\n options = {\n 'type': 'auto',\n 'bins': 30,\n 'sigma': 4,\n 'limits': (0.05, 0.05),\n 'title': '',\n }\n options.update(kwargs)\n name = params[0]\n title = params[0]\n if len(options['title']) > 0:\n title = options['title']\n start = 0\n end = 0\n step = 0\n value=None\n if options['callback'] == 'parameter':\n if options['type'] == 'user':\n start = params[1], end = params[2], step = params[3], value = (params[4], params[5])\n else:\n param = paramDict[params[0]]\n start = param['range'][0]\n end = param['range'][1]\n bins = options['bins'] \n if 'bins' in param:\n bins = param['bins']\n if 'step' in param:\n step = param['step']\n else:\n step = (end - start) / bins\n value = paramDict[params[0]][\"value\"]\n else:\n if options['type'] == 'user':\n start = params[1], end = params[2], step = params[3], value = (params[4], params[5])\n elif (options['type'] == 'auto') | (options['type'] == 'minmax'):\n start = df[name].min()\n end = df[name].max()\n step = (end - start) / options['bins']\n elif (options['type'] == 'unique'):\n start = df[name].min()\n end = df[name].max()\n nbins=df[name].unique().size-1\n step = (end - start) / float(nbins)\n elif options['type'] == 'sigma':\n mean = df[name].mean()\n sigma = df[name].std()\n start = mean - options['sigma'] * sigma\n end = mean + options['sigma'] * sigma\n step = (end - start) / options['bins']\n elif options['type'] == 'sigmaMed':\n mean = df[name].median()\n sigma = df[name].std()\n start = mean - options['sigma'] * sigma\n end = mean + options['sigma'] * sigma\n step = (end - start) / options['bins']\n elif options['type'] == 'sigmaTM':\n mean = df[name].trimmed_mean(options['limits'])\n sigma = df[name].trimmed_std(options['limits'])\n start = mean - options['sigma'] * sigma\n end = mean + options['sigma'] * sigma\n step = (end - start) / options['bins']\n if isRange:\n if (start==end):\n start-=1\n end+=1\n if value is None:\n value = (start, end)\n slider = RangeSlider(title=title, start=start, end=end, step=step, value=value)\n else:\n if value is None:\n value = (start + end) * 0.5\n slider = Slider(title=title, start=start, end=end, step=step, value=value)\n return slider\n\n\ndef makeBokehSelectWidget(df: pd.DataFrame, params: list, paramDict: dict, default=None, **kwargs):\n options = {'size': 10}\n options.update(kwargs)\n # optionsPlot = []\n if len(params) == 1:\n if options['callback'] == 'parameter':\n optionsPlot = paramDict[params[0]][\"options\"]\n else:\n optionsPlot = np.sort(df[params[0]].unique()).tolist()\n else:\n optionsPlot = params[1:]\n for i, val in enumerate(optionsPlot):\n optionsPlot[i] = str((val))\n default_value = 0\n if isinstance(default, int):\n if 0 <= default < len(optionsPlot):\n default_value = default\n else:\n raise IndexError(\"Default value out of range for select widget.\")\n elif default is None:\n if options['callback'] == 'parameter':\n default_value = optionsPlot.index(paramDict[params[0]][\"value\"])\n else:\n default_value = optionsPlot.index(paramDict[params[0]][\"value\"])\n return Select(title=params[0], value=optionsPlot[default_value], options=optionsPlot)\n\n\ndef makeBokehMultiSelectWidget(df: pd.DataFrame, params: list, paramDict: dict, **kwargs):\n # print(\"makeBokehMultiSelectWidget\",params,kwargs)\n options = {'default': 0, 'size': 4}\n options.update(kwargs)\n # optionsPlot = []\n if len(params) == 1:\n try:\n optionsPlot = np.sort(df[params[0]].unique()).tolist()\n except:\n optionsPlot = sorted(df[params[0]].unique().tolist())\n else:\n optionsPlot = params[1:]\n for i, val in enumerate(optionsPlot):\n optionsPlot[i] = str((val))\n # print(optionsPlot)\n return MultiSelect(title=params[0], value=optionsPlot, options=optionsPlot, size=options['size'])\n\n\ndef makeBokehCheckboxWidget(df: pd.DataFrame, params: list, paramDict: dict, **kwargs):\n options = {'default': 0, 'size': 10}\n options.update(kwargs)\n # optionsPlot = []\n if len(params) == 1:\n optionsPlot = np.sort(df[params[0]].unique()).tolist()\n else:\n optionsPlot = params[1:]\n for i, val in enumerate(optionsPlot):\n optionsPlot[i] = str(val)\n return CheckboxGroup(labels=optionsPlot, active=[])\n\n\ndef makeBokehWidgets(df, widgetParams, cdsOrig, cdsSel, histogramList=[], cmapDict=None, cdsHistoSummary=None, profileList=None, paramDict={}, aliasDict=None, nPointRender=10000):\n widgetArray = []\n widgetDict = {}\n for widget in widgetParams:\n type = widget[0]\n params = widget[1]\n optionLocal = {}\n localWidget = None\n if len(widget) == 3:\n optionLocal = widget[2]\n if \"callback\" not in optionLocal:\n if params[0] in paramDict:\n optionLocal[\"callback\"] = \"parameter\"\n else:\n optionLocal[\"callback\"] = \"selection\"\n if type == 'range':\n localWidget = makeBokehSliderWidget(df, True, params, paramDict, **optionLocal)\n if type == 'slider':\n localWidget = makeBokehSliderWidget(df, False, params, paramDict, **optionLocal)\n if type == 'select':\n localWidget = makeBokehSelectWidget(df, params, paramDict, **optionLocal)\n if type == 'multiSelect':\n localWidget = makeBokehMultiSelectWidget(df, params, paramDict, **optionLocal)\n # if type=='checkbox':\n # localWidget=makeBokehCheckboxWidget(df,params,**options)\n if localWidget:\n widgetArray.append(localWidget)\n if optionLocal[\"callback\"] == \"selection\":\n widgetDict[params[0]] = localWidget\n callbackSel = makeJScallbackOptimized(widgetDict, cdsOrig, cdsSel, histogramList=histogramList,\n cmapDict=cmapDict, nPointRender=nPointRender,\n cdsHistoSummary=cdsHistoSummary, profileList=profileList, aliasDict=aliasDict)\n #callback = makeJScallbackOptimized(widgetDict, cdsOrig, cdsSel, histogramList=histogramList, cmapDict=cmapDict, nPointRender=nPointRender)\n for iDesc, iWidget in zip(widgetParams, widgetArray):\n optionLocal = {}\n params = iDesc[1]\n if len(iDesc) == 3:\n optionLocal = iDesc[2]\n if \"callback\" not in optionLocal:\n if params[0] in paramDict:\n optionLocal[\"callback\"] = \"parameter\"\n else:\n optionLocal[\"callback\"] = \"selection\"\n if optionLocal[\"callback\"] == \"selection\":\n callback = callbackSel\n elif optionLocal[\"callback\"] == \"parameter\":\n paramControlled = paramDict[iDesc[1][0]]\n for iEvent in paramControlled[\"subscribed_events\"]:\n if len(iEvent) == 2:\n iWidget.js_on_change(*iEvent)\n else:\n iWidget.js_link(*iEvent)\n continue\n if isinstance(iWidget, CheckboxGroup):\n iWidget.js_on_click(callback)\n elif isinstance(iWidget, Slider) or isinstance(iWidget, RangeSlider):\n iWidget.js_on_change(\"value\", callback)\n else:\n iWidget.js_on_change(\"value\", callback)\n iWidget.js_on_event(\"value\", callback)\n return widgetArray\n\n\ndef bokehMakeHistogramCDS(dfQuery, cdsFull, histogramArray=[], histogramDict=None, parameterDict={}, aliasDict={}, **kwargs):\n options = {\"range\": None,\n \"nbins\": 10,\n \"weights\": None,\n \"quantiles\": [],\n \"sum_range\": [],\n \"histograms\": {}\n }\n histoDict = {}\n histoList = []\n for iHisto in histogramArray:\n histoName = iHisto[\"name\"]\n if histogramDict is not None and not histogramDict[histoName]:\n continue\n # XXX: This is not a histogram, this is a join\n if \"left\" in iHisto:\n if iHisto[\"left\"] is not None:\n left = histoDict[iHisto[\"left\"]][\"cds\"]\n else:\n left = cdsFull\n if iHisto[\"right\"] is not None:\n right = histoDict[iHisto[\"right\"]][\"cds\"]\n else:\n right = cdsFull\n on_left = []\n if \"left_on\" in iHisto:\n on_left = iHisto[\"left_on\"]\n on_right = []\n if \"right_on\" in iHisto:\n on_right = iHisto[\"right_on\"]\n how = \"inner\"\n cdsHisto = CDSJoin(left=left, right=right, on_left=on_left, on_right=on_right, how=how)\n if iHisto[\"name\"] in aliasDict:\n mapping = aliasDict[iHisto[\"name\"]]\n cdsHisto = CDSAlias(source=cdsHisto, mapping=mapping)\n histoDict[histoName] = iHisto.copy()\n histoDict[histoName].update({\"cds\": cdsHisto, \"type\": \"join\"})\n continue\n # XXX: Just rename this function already\n if \"data\" in iHisto:\n source = None\n if 'arrayCompression' in iHisto:\n print(\"compressCDSPipe\")\n cdsCompress0, sizeMap= compressCDSPipe(iHisto[\"data\"].copy(), iHisto[\"arrayCompression\"],1)\n cdsCompress=CDSCompress(inputData=cdsCompress0, sizeMap=sizeMap)\n source=cdsCompress\n else:\n source = ColumnDataSource(dfQuery)\n if iHisto[\"name\"] in aliasDict:\n mapping = aliasDict[iHisto[\"name\"]]\n source = CDSAlias(source=source, mapping=mapping) \n histoDict[histoName] = iHisto.copy()\n histoDict[histoName].update({\"cds\": source, \"type\": \"source\"})\n continue \n optionLocal = copy.copy(options)\n optionLocal.update(iHisto)\n weights = None\n cdsUsed = None\n if \"source\" in optionLocal:\n cdsUsed = optionLocal[\"source\"]\n sampleVars = iHisto[\"variables\"]\n if optionLocal[\"weights\"] is not None:\n _, weights, cdsUsed = getOrMakeColumn(dfQuery, optionLocal[\"weights\"], cdsUsed, aliasDict[\"\"])\n if len(sampleVars) == 1:\n _, varNameX, cdsUsed = getOrMakeColumn(dfQuery, sampleVars[0], cdsUsed, aliasDict[\"\"])\n source = cdsFull\n if \"source\" in iHisto:\n source = iHisto[\"source\"]\n cdsHisto = HistogramCDS(source=source, nbins=optionLocal[\"nbins\"], histograms=optionLocal[\"histograms\"],\n range=optionLocal[\"range\"], sample=varNameX, weights=weights)\n histoList.append(cdsHisto)\n if iHisto[\"name\"] in aliasDict:\n mapping = aliasDict[iHisto[\"name\"]]\n mapping.update({\"bin_center\": \"bin_center\", \"bin_count\": \"bin_count\", \"bin_bottom\": \"bin_bottom\", \"bin_top\": \"bin_top\"})\n for i in optionLocal[\"histograms\"].keys():\n mapping.update({i:i})\n cdsHisto = CDSAlias(source=cdsHisto, mapping=mapping)\n histoDict[histoName] = iHisto.copy()\n histoDict[histoName].update({\"cds\": cdsHisto, \"type\": \"histogram\"})\n else:\n sampleVarNames = []\n for i in sampleVars:\n _, varName, cdsUsed = getOrMakeColumn(dfQuery, i, cdsUsed, aliasDict[\"\"])\n sampleVarNames.append(varName)\n source = cdsFull\n if \"source\" in iHisto:\n source = iHisto[\"source\"]\n cdsHisto = HistoNdCDS(source=source, nbins=optionLocal[\"nbins\"],\n range=optionLocal[\"range\"], sample_variables=sampleVarNames,\n weights=weights, histograms=optionLocal[\"histograms\"])\n histoList.append(cdsHisto)\n if iHisto[\"name\"] in aliasDict:\n mapping = aliasDict[iHisto[\"name\"]]\n mapping.update({\"bin_count\": \"bin_count\"})\n for i in range(len(sampleVarNames)):\n mapping.update({f\"bin_center_{i}\": f\"bin_center_{i}\", f\"bin_bottom_{i}\": f\"bin_bottom_{i}\", f\"bin_top_{i}\": f\"bin_top_{i}\"})\n for i in optionLocal[\"histograms\"].keys():\n mapping.update({i:i})\n cdsHisto = CDSAlias(source=cdsHisto, mapping=mapping)\n if len(sampleVars) == 2:\n histoDict[histoName] = {\"cds\": cdsHisto, \"type\": \"histo2d\", \"name\": histoName,\n \"variables\": sampleVars}\n else:\n histoDict[histoName] = {\"cds\": cdsHisto, \"type\": \"histoNd\", \"name\": histoName,\n \"variables\": sampleVars}\n if \"axis\" in iHisto:\n axisIndices = iHisto[\"axis\"]\n profilesDict = {}\n for i in axisIndices:\n cdsProfile = HistoNdProfile(source=cdsHisto, axis_idx=i, quantiles=optionLocal[\"quantiles\"],\n sum_range=optionLocal[\"sum_range\"])\n profilesDict[i] = cdsProfile\n histoDict[histoName+\"_\"+str(i)] = {\"cds\": cdsProfile, \"type\": \"profile\", \"name\": histoName+\"_\"+str(i), \"variables\": sampleVars,\n \"quantiles\": optionLocal[\"quantiles\"], \"sum_range\": optionLocal[\"sum_range\"], \"axis\": i} \n histoDict[histoName][\"profiles\"] = profilesDict\n\n return histoDict, histoList\n\n\ndef makeDerivedColumns(dfQuery, figureArray=None, histogramArray=None, parameterArray=None, widgetArray=None, aliasArray=None, options={}):\n histogramDict = {}\n columnNameDict = {}\n paramDict = {}\n downsamplerColumns = {}\n\n aliasDict = {}\n\n if histogramArray is not None:\n for i, histo in enumerate(histogramArray):\n histogramDict[histo[\"name\"]] = True\n\n if aliasArray is not None:\n for i, func in enumerate(aliasArray):\n aliasDict[func[\"name\"]] = True\n\n if parameterArray is not None:\n for i, param in enumerate(parameterArray):\n paramDict[param[\"name\"]] = param\n\n if figureArray is not None:\n for i, variables in enumerate(figureArray):\n if isinstance(variables, dict):\n continue\n if variables[0] != \"table\" and variables[0] != \"tableHisto\":\n nvars = len(variables)\n if isinstance(variables[-1], dict):\n optionLocal = options.copy()\n optionLocal.update(variables[-1])\n nvars = nvars - 1\n else:\n optionLocal = options\n if \"source\" in optionLocal:\n continue\n variablesLocal = [None]*len(BOKEH_DRAW_ARRAY_VAR_NAMES)\n for axis_index, axis_name in enumerate(BOKEH_DRAW_ARRAY_VAR_NAMES):\n if axis_index < nvars:\n variablesLocal[axis_index] = variables[axis_index]\n elif axis_name in optionLocal:\n variablesLocal[axis_index] = optionLocal[axis_name]\n if variablesLocal[axis_index] is not None and not isinstance(variablesLocal[axis_index], list):\n variablesLocal[axis_index] = [variablesLocal[axis_index]]\n lengthX = len(variables[0])\n lengthY = len(variables[1])\n length = max(j is not None and len(j) for j in variablesLocal)\n\n for j in range(0, length):\n if variables[1][j % lengthY] not in histogramDict:\n for iVariable in variablesLocal:\n if iVariable is not None and ('.' not in iVariable[j % len(iVariable)]):\n if iVariable[j % len(iVariable)] in paramDict:\n parameter = paramDict[iVariable[j % len(iVariable)]]\n if 'options' in parameter:\n for i in parameter['options']:\n dfQuery, varName = pandaGetOrMakeColumn(dfQuery, i)\n columnNameDict[varName] = True\n downsamplerColumns[varName] = True\n elif iVariable[j % len(iVariable)] in aliasDict:\n aliasDict[iVariable[j % len(iVariable)]] = True\n downsamplerColumns[iVariable[j % len(iVariable)]] = True\n else:\n dfQuery, varName = pandaGetOrMakeColumn(dfQuery, iVariable[j % len(iVariable)])\n columnNameDict[varName] = True\n downsamplerColumns[varName] = True \n \n # These are kept because of error bars which will be moved to the client later\n if '.' not in variables[0][j % lengthX] and variables[0][j % lengthX] not in aliasDict:\n dfQuery, varNameX = pandaGetOrMakeColumn(dfQuery, variables[0][j % lengthX])\n if '.' not in variables[1][j % lengthY] and variables[1][j % lengthY] not in aliasDict:\n dfQuery, varNameY = pandaGetOrMakeColumn(dfQuery, variables[1][j % lengthY])\n # TODO: Make error bars client side to get rid of this mess. At least ND histogram does support them.\n if ('errY' in optionLocal) and (optionLocal['errY'] != ''):\n dfQuery, varNameErrY = pandaGetOrMakeColumn(dfQuery, optionLocal['errY'])\n columnNameDict[varNameErrY] = True\n downsamplerColumns[varNameErrY] = True\n if ('errX' in optionLocal) and (optionLocal['errX'] != ''):\n dfQuery, varNameErrX = pandaGetOrMakeColumn(dfQuery, optionLocal['errX'])\n columnNameDict[varNameErrX] = True\n downsamplerColumns[varNameErrX] = True\n if 'tooltips' in optionLocal:\n tooltipColumns = getTooltipColumns(optionLocal['tooltips'])\n columnNameDict.update(tooltipColumns)\n downsamplerColumns.update(tooltipColumns)\n else:\n histogramDict[variables[1][j % lengthY]] = True\n\n if histogramArray is not None:\n for i, histo in enumerate(histogramArray):\n if histogramDict[histo[\"name\"]] and \"source\" not in histo:\n if \"variables\" in histo:\n for j, variable in enumerate(histo[\"variables\"]):\n if variable in aliasDict:\n aliasDict[variable] = True\n else:\n dfQuery, varName = pandaGetOrMakeColumn(dfQuery, variable)\n columnNameDict[varName] = True\n if \"weights\" in histo:\n if histo[\"weights\"] in aliasDict:\n aliasDict[histo[\"weights\"]] = True\n else:\n dfQuery, varName = pandaGetOrMakeColumn(dfQuery, histo[\"weights\"])\n columnNameDict[varName] = True\n if \"histograms\" in histo:\n for iColumn in histo[\"histograms\"].values():\n if iColumn is not None:\n if \"weights\" in iColumn:\n if iColumn[\"weights\"] in aliasDict:\n aliasDict[iColumn[\"weights\"]] = True\n else:\n dfQuery, varName = pandaGetOrMakeColumn(dfQuery, iColumn[\"weights\"])\n columnNameDict[varName] = True\n\n if widgetArray is not None:\n for iWidget in widgetArray:\n if len(iWidget) < 3 or 'callback' not in iWidget[2]:\n if iWidget[1][0] not in paramDict:\n dfQuery, varNameX = pandaGetOrMakeColumn(dfQuery, iWidget[1][0])\n columnNameDict[varNameX] = True\n elif iWidget[2]['callback'] == 'selection':\n dfQuery, varNameX = pandaGetOrMakeColumn(dfQuery, iWidget[1][0])\n columnNameDict[varNameX] = True\n\n if aliasArray is not None:\n for func in aliasArray:\n if \"context\" in func and func[\"name\"] in downsamplerColumns:\n downsamplerColumns.pop(func[\"name\"])\n\n if \"removeExtraColumns\" in options and options[\"removeExtraColumns\"]:\n dfQuery = dfQuery[columnNameDict]\n\n return dfQuery, histogramDict, list(downsamplerColumns.keys()), columnNameDict, paramDict, aliasDict\n\ndef bokehMakeParameters(parameterArray, histogramArray, figureArray, variableList, options={}):\n parameterDict = {}\n if parameterArray is not None:\n for param in parameterArray:\n parameterDict[param[\"name\"]] = param.copy()\n parameterDict[param[\"name\"]][\"subscribed_events\"] = []\n if histogramArray is not None:\n for iHisto in histogramArray: \n pass\n if figureArray is not None:\n for i, variables in enumerate(figureArray):\n if isinstance(variables, dict):\n continue\n if len(variables) > 1 and variables[0] != \"table\" and variables[0] != \"tableHisto\":\n if len(variables) > 2:\n optionLocal = options.copy()\n optionLocal.update(variables[-1])\n else:\n optionLocal = options \n if 'colorZvar' in optionLocal:\n varColor = optionLocal['colorZvar']\n if varColor in parameterDict:\n paramColor = parameterDict[varColor]\n # Possibly also allow custom color mappers?\n if \"type\" not in paramColor:\n paramColor[\"type\"] = \"varName\"\n if \"options\" not in paramColor:\n paramColor[\"options\"] = variableList\n # XXX: Add autofill for cases of histograms and main CDS\n if 'size' in optionLocal:\n varSize = optionLocal['size']\n if varSize in parameterDict:\n paramSize = parameterDict[varSize]\n if \"type\" not in paramSize:\n t = type(paramSize['value'])\n if t == str:\n paramSize[\"type\"] = \"varName\"\n else:\n paramSize[\"type\"] = \"scalar\"\n if paramSize[\"type\"] == \"varName\":\n if \"options\" not in paramColor:\n paramSize[\"options\"] = variableList\n elif paramSize[\"type\"] == \"scalar\":\n if \"range\" not in paramSize:\n raise ValueError(\"Missing range for parameter: \", paramSize[\"name\"])\n return parameterDict\n\ndef defaultNDProfileTooltips(varNames, axis_idx, quantiles, sumRanges):\n tooltips = []\n for i, iAxis in enumerate(varNames):\n if i != axis_idx:\n tooltips.append((iAxis, \"[@{bin_bottom_\" + str(i) + \"}, @{bin_top_\" + str(i) + \"}]\"))\n tooltips.append((\"Mean \" + varNames[axis_idx], \"@mean\"))\n tooltips.append((\"Std. \" + varNames[axis_idx], \"@std\"))\n for i, iQuantile in enumerate(quantiles):\n tooltips.append((f\"Quantile {iQuantile} {varNames[axis_idx]}\", \"@quantile_\" + str(i)))\n for i, iRange in enumerate(sumRanges):\n tooltips.append((f\"Sum_normed {varNames[axis_idx]} in [{iRange[0]}, {iRange[1]}]\", \"@sum_normed_\" + str(i)))\n return tooltips\n\ndef getOrMakeColumn(dfQuery, column, cdsName, ignoreDict={}):\n if '.' in column:\n c = column.split('.')\n if cdsName is None or cdsName == c[0]:\n return [dfQuery, c[1], c[0]]\n else:\n raise ValueError(\"Inconsistent CDS\")\n else:\n if column in ignoreDict:\n return [dfQuery, column, cdsName]\n dfQuery, column = pandaGetOrMakeColumn(dfQuery, column)\n return [dfQuery, column, None]\n\ndef getTooltipColumns(tooltips):\n if isinstance(tooltips, str):\n return {}\n result = {}\n tooltip_regex = re.compile(r'@(?:\\w+|\\{[^\\}]*\\})')\n for iTooltip in tooltips:\n for iField in tooltip_regex.findall(iTooltip[1]):\n if iField[1] == '{':\n result[iField[2:-1]] = True\n else:\n result[iField[1:]] = True\n return result\n\ndef getHistogramAxisTitle(histoDict, varName, cdsName, removeCdsName=True):\n if cdsName is None:\n return varName\n if cdsName in histoDict:\n if \"variables\" not in histoDict[cdsName]:\n return varName\n if '_' in varName:\n if varName == \"bin_count\":\n # Maybe do something else\n return \"entries\"\n x = varName.split(\"_\")\n if x[0] == \"bin\":\n if len(x) == 2:\n return histoDict[cdsName][\"variables\"][0]\n return histoDict[cdsName][\"variables\"][int(x[-1])]\n if x[0] == \"quantile\":\n quantile = histoDict[cdsName][\"quantiles\"][int(x[-1])]\n if '_' in cdsName:\n histoName, projectionIdx = cdsName.split(\"_\")\n return \"quantile \" + str(quantile) + \" \" + histoDict[histoName][\"variables\"][int(projectionIdx)]\n return \"quantile \" + str(quantile)\n if x[0] == \"sum\":\n range = histoDict[cdsName][\"sum_range\"][int(x[-1])]\n if len(x) == 2:\n if '_' in cdsName:\n histoName, projectionIdx = cdsName.split(\"_\")\n return \"sum \" + histoDict[histoName][\"variables\"][int(projectionIdx)] + \" in [\" + str(range[0]) + \", \" + str(range[1]) + \"]\"\n return \"sum in [\" + str(range[0]) + \", \" + str(range[1]) + \"]\"\n else:\n if '_' in cdsName:\n histoName, projectionIdx = cdsName.split(\"_\")\n return \"p \" + histoDict[histoName][\"variables\"][int(projectionIdx)] + \" in [\" + str(range[0]) + \", \" + str(range[1]) + \"]\"\n return \"p in [\"+ str(range[0]) + \", \" + str(range[1]) + \"]\"\n else:\n if '_' in cdsName:\n histoName, projectionIdx = cdsName.split(\"_\")\n return varName + \" \" + histoDict[histoName][\"variables\"][int(projectionIdx)]\n if not removeCdsName:\n return cdsName+\".\"+varName\n return varName","sub_path":"RootInteractive/InteractiveDrawing/bokeh/bokehTools.py","file_name":"bokehTools.py","file_ext":"py","file_size_in_byte":72770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"44184009","text":"#coding=utf-8\n\nimport unittest\n\n\"\"\"\n\nRainbow Sort\nGiven an array of balls, where the color of the balls can only be Red, Green or Blue, sort the balls such that all the \nRed balls are grouped on the left side, all the Green balls are grouped in the middle and all the Blue balls are grouped \non the right side. (Red is denoted by -1, Green is denoted by 0, and Blue is denoted by 1).\n\nExamples\n\n{0} is sorted to {0}\n{1, 0} is sorted to {0, 1}\n{1, 0, 1, -1, 0} is sorted to {-1, 0, 0, 1, 1}\nAssumptions\n\nThe input array is not null.\nCorner Cases\n\nWhat if the input array is of length zero? In this case, we should not do anything as well.\n\n\"\"\"\n\n\n\nclass Solution(object):\n def rainbowSort(self, array):\n \"\"\"\n array: int[]\n return: int[]\n \"\"\"\n # write your solution here\n if not array:\n return []\n arr = array\n RED = -1\n GREEN = 0\n BLUE = 1\n i, j, k = 0, len(array) - 1, 0\n while k <= j:\n if arr[k] == RED:\n arr[i], arr[k] = arr[k], arr[i]\n i += 1\n elif arr[k] == GREEN:\n k += 1\n else:\n arr[j], arr[k] = arr[k], arr[j]\n j -= 1\n return arr\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n nums = [0]\n answer = [0]\n result = self.sol.rainbowSort(nums)\n self.assertEqual(answer, result)\n\n def test_case2(self):\n nums = [1, 0]\n answer = [0, 1]\n result = self.sol.rainbowSort(nums)\n self.assertEqual(answer, result)\n\n\n def test_case3(self):\n nums = [1, 0, 1, -1, 0]\n answer = [-1, 0, 0, 1, 1]\n result = self.sol.rainbowSort(nums)\n self.assertEqual(answer, result)\n\n def test_case4(self):\n nums = [1, 1, -1, 1]\n answer = [1,1,-1, 1]\n result = self.sol.rainbowSort(nums)\n self.assertEqual(answer, result)\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n#-*- coding:utf-8 -*-\n","sub_path":"lo/rainbow_sort.py","file_name":"rainbow_sort.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"630749923","text":"import pygame\n\n\nclass Checkpoint:\n x = 0\n y = 0\n\n w = 0\n h = 0\n\n number = 0\n captured = 0\n\n def __init__(self, c1, c2, grid_size):\n self.captured = False\n\n x1, y1 = c1\n x2, y2 = c2\n\n # get the x, y, w, h\n if x1 < x2:\n self.x = x1\n self.w = x2 - x1\n else:\n self.x = x2\n self.w = x1 - x2\n\n if y1 < y2:\n self.y = y1\n self.h = y2 - y1\n else:\n self.y = y2\n self.h = y1 - y2\n\n # make sure the x and y are filling the grid\n self.x = self.x - (self.x % grid_size)\n self.y = self.y - (self.y % grid_size)\n\n self.w = (int(self.w/grid_size)+1)*grid_size\n self.h = (int(self.h/grid_size)+1)*grid_size\n\n def check_captured(self, player):\n if not self.captured:\n\n if player.x + player.w > self.x:\n if player.x < self.x + self.w:\n if player.y + player.h > self.y:\n if player.y < self.y + self.h:\n\n self.captured = True\n return True\n\n def draw(self, screen, screen_difference_x, screen_difference_y):\n if self.captured:\n pygame.draw.rect(screen, (0, 255, 0), (self.x - screen_difference_x, self.y - screen_difference_y, self.w, self.h))\n\n else:\n pygame.draw.rect(screen, (255, 0, 0), (self.x - screen_difference_x, self.y - screen_difference_y, self.w, self.h))\n\n\ndef create_checkpoint_from_string(string):\n \"\"\" this will take a string and return an instance of a checkpoint class \"\"\"\n coordinate1 = -1\n coordinate2 = -1\n gridsize = -1\n\n for i in range(len(string)):\n if string[i] == \"coordinate\":\n if coordinate1 == -1:\n coordinate1 = (int(string[i+1]), int(string[i+2]))\n\n else:\n coordinate2 = (int(string[i+1]), int(string[i+2]))\n\n elif string[i] == \"gridsize\":\n gridsize = int(string[i+1])\n\n if coordinate1 != -1 and coordinate2 != -1 and gridsize != -1:\n return Checkpoint(coordinate1, coordinate2, gridsize)\n\n return False\n","sub_path":"classes/game/checkpoint_class.py","file_name":"checkpoint_class.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"139464553","text":"import ml_collections\r\n\r\ndef get_b16_config(classifier='seg', activation='softmax'):\r\n \"\"\"Returns the ViT-B/16 configuration.\"\"\"\r\n config = ml_collections.ConfigDict()\r\n config.patches = ml_collections.ConfigDict({'size': (16, 16)})\r\n config.hidden_size = 768\r\n config.transformer = ml_collections.ConfigDict()\r\n config.transformer.mlp_dim = 3072\r\n config.transformer.num_heads = 12\r\n config.transformer.num_layers = 12\r\n config.transformer.attention_dropout_rate = 0.0\r\n config.transformer.dropout_rate = 0.1\r\n\r\n if classifier == 'seg':\r\n config.classifier = 'seg'\r\n config.representation_size = None\r\n config.resnet_pretrained_path = None\r\n config.pretrained_path = '../model/vit_checkpoint/imagenet21k/ViT-B_16.npz'\r\n config.patch_size = 16\r\n\r\n config.decoder_channels = (256, 128, 64, 16)\r\n config.n_classes = 2\r\n\r\n if activation == 'softmax':\r\n config.activation = 'softmax'\r\n return config\r\n\r\n\r\ndef get_testing(classifier='token'):\r\n \"\"\"Returns a minimal configuration for testing.\"\"\"\r\n config = ml_collections.ConfigDict()\r\n config.patches = ml_collections.ConfigDict({'size': (16, 16)})\r\n config.hidden_size = 1\r\n config.transformer = ml_collections.ConfigDict()\r\n config.transformer.mlp_dim = 1\r\n config.transformer.num_heads = 1\r\n config.transformer.num_layers = 1\r\n config.transformer.attention_dropout_rate = 0.0\r\n config.transformer.dropout_rate = 0.1\r\n\r\n if classifier == 'token':\r\n config.classifier = 'token'\r\n config.representation_size = None\r\n return config\r\n\r\ndef get_r50_b16_config(classifier='seg', activation='softmax'):\r\n \"\"\"Returns the Resnet50 + ViT-B/16 configuration.\"\"\"\r\n config = get_b16_config()\r\n config.patches.grid = (16, 16)\r\n config.resnet = ml_collections.ConfigDict()\r\n config.resnet.num_layers = (3, 4, 9)\r\n config.resnet.width_factor = 1\r\n\r\n if classifier == 'seg':\r\n config.classifier = 'seg'\r\n config.pretrained_path = '../model/vit_checkpoint/imagenet21k/R50+ViT-B_16.npz'\r\n config.decoder_channels = (256, 128, 64, 16)\r\n config.skip_channels = [512, 256, 64, 16]\r\n config.n_classes = 2\r\n config.n_skip = 3\r\n\r\n if activation == 'softmax':\r\n config.activation = 'softmax'\r\n return config\r\n","sub_path":"networks/vit_seg_configs.py","file_name":"vit_seg_configs.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"288630444","text":"from setuptools import setup\nfrom setuptools import find_packages\n\nfrom shutil import copyfile, rmtree\nimport os\nimport glob\n\ndef build_package():\n try:\n rmtree('pyaf')\n except:\n pass\n os.mkdir('pyaf')\n os.mkdir('pyaf/TS')\n for file in glob.glob('TS/*.py'): \n copyfile(file, 'pyaf/' + file)\n copyfile('ForecastEngine.py' ,\n 'pyaf/ForecastEngine.py')\n copyfile('HierarchicalForecastEngine.py',\n 'pyaf/HierarchicalForecastEngine.py')\n copyfile('__init__.py',\n 'pyaf/__init__.py')\n os.mkdir('pyaf/CodeGen')\n for file in glob.glob('CodeGen/*.py'): \n copyfile(file, 'pyaf/' + file)\n os.mkdir('pyaf/Bench')\n for file in glob.glob('Bench/*.py'): \n copyfile(file, 'pyaf/' + file)\n \n\nbuild_package();\n\nsetup(name='pyaf',\n version='1.0',\n description='Python Automatic Forecasting',\n author='Antoine CARME',\n author_email='antoine.carme@laposte.net',\n url='https://github.com/antoinecarme/pyaf',\n license='BSD 3-clause',\n packages=['pyaf' , 'pyaf.TS' , 'pyaf.CodeGen' , 'pyaf.Bench'],\n install_requires=[\n 'scipy',\n 'pandas',\n 'sklearn',\n 'matplotlib',\n 'pydot',\n 'dill',\n 'pathos',\n 'sqlalchemy'\n ])\n\nrmtree('pyaf')\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"308685919","text":"import backbone\nimport utils\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn.functional as F\nfrom math import exp\n\nclass Decoder(nn.Module):\n def __init__(self, zsize):\n super(Decoder,self).__init__()\n self.dfc3 = nn.Linear(zsize, 4096)\n self.bn3 = nn.BatchNorm1d(4096)\n self.dfc2 = nn.Linear(4096, 4096)\n self.bn2 = nn.BatchNorm1d(4096)\n self.dfc1 = nn.Linear(4096,256 * 6 * 6)\n self.bn1 = nn.BatchNorm1d(256*6*6)\n self.upsample1=nn.Upsample(scale_factor=2)\n self.dconv5 = nn.ConvTranspose2d(256, 256, 3, padding = 0)\n self.dconv4 = nn.ConvTranspose2d(256, 384, 3, padding = 1)\n self.dconv3 = nn.ConvTranspose2d(384, 192, 3, padding = 1)\n self.dconv2 = nn.ConvTranspose2d(192, 64, 5, padding = 2)\n self.dconv1 = nn.ConvTranspose2d(64, 3, 12, stride = 4, padding = 4)\n\n def forward(self,x):#,i1,i2,i3):\n \n x = self.dfc3(x)\n #x = F.relu(x)\n x = F.relu(self.bn3(x))\n \n x = self.dfc2(x)\n x = F.relu(self.bn2(x))\n #x = F.relu(x)\n x = self.dfc1(x)\n x = F.relu(self.bn1(x))\n #x = F.relu(x)\n #print(x.size())\n x = x.view(-1,256,6,6)\n #print (x.size())\n x=self.upsample1(x)\n #print x.size()\n x = self.dconv5(x)\n #print x.size()\n x = F.relu(x)\n #print x.size()\n x = F.relu(self.dconv4(x))\n #print x.size()\n x = F.relu(self.dconv3(x))\n #print x.size()\t\t\n x=self.upsample1(x)\n #print x.size()\t\t\n x = self.dconv2(x)\n #print x.size()\t\t\n x = F.relu(x)\n x=self.upsample1(x)\n #print x.size()\n x = self.dconv1(x)\n #print x.size()\n x = torch.sigmoid(x)\n return x\n\n\nclass BaselineTrain(nn.Module):\n def __init__(self, model_func, num_class, loss_type = 'softmax'):\n super(BaselineTrain, self).__init__()\n self.feature = model_func()\n\n if loss_type == 'softmax':\n self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)\n self.classifier.bias.data.fill_(0)\n elif loss_type == 'dist': #Baseline ++\n self.classifier = backbone.distLinear(self.feature.final_feat_dim, num_class)\n \n self.loss_type = loss_type #'softmax' #'dist'\n self.num_class = num_class\n self.loss_fn = nn.CrossEntropyLoss()\n self.top1 = utils.AverageMeter()\n self.decoder = Decoder(self.feature.final_feat_dim)\n self.autoencoder_criterion = nn.MSELoss()\n \n ### my code ###\n self.record_list = [[\"Epoch\", \"Batch\", \"Loss\", \"Top1 Val\", \"Top1 Avg\"]]\n\n def forward(self,x):\n x = Variable(x.cuda())\n out = self.feature.forward(x)\n scores = self.classifier.forward(out)\n return scores\n \n def set_loss_decoder(self, x):\n x = x.contiguous().view( self.n_way * (self.n_support + self.n_query), *x.size()[2:]) \n z_all = self.feature.forward(x)\n x_hat = self.decoder.forward(z_all)\n loss = self.autoencoder_criterion(x_hat, x)\n return loss\n\n def forward_loss(self, x, y):\n y = Variable(y.cuda())\n\n scores = self.forward(x)\n\n _, predicted = torch.max(scores.data, 1)\n correct = predicted.eq(y.data).cpu().sum()\n self.top1.update(correct.item()*100 / (y.size(0)+0.0), y.size(0)) \n\n loss = self.loss_fn(scores, y )\n x = Variable(x.to(self.device))\n loss += 0.5*self.set_loss_decoder(x)\n return loss\n \n def train_loop(self, epoch, start_epoch, stop_epoch, train_loader, optimizer):\n print_freq = 10\n avg_loss=0\n for i, (x,y) in enumerate(train_loader):\n optimizer.zero_grad()\n loss = self.forward_loss(x, y)\n loss.backward()\n optimizer.step()\n\n avg_loss = avg_loss+loss.item()\n if i % print_freq==0:\n #print(optimizer.state_dict()['param_groups'][0]['lr'])\n print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Top1 Val {:f} | Top1 Avg {:f}'.format(epoch, i, len(train_loader), avg_loss/float(i+1), self.top1.val, self.top1.avg))\n \n ### my code ###\n self.record_list.append([epoch, i, avg_loss/float(i+1), self.top1.val, self.top1.avg])\n \n def test_loop(self, val_loader):\n return -1 #no validation, just save model during iteration\n\n\n ### my code ###\n #model.train_loop_dann(epoch, base_loader, optimizer, optimizer_domain, model_domain )\n def train_loop_dann(self, epoch, start_epoch, stop_epoch, train_loader, optimizer, optimizer_domain, model_domain, dann_link):\n print_freq = 10\n avg_loss=0\n avg_loss_domain=0\n gam = 10\n progress = epoch/(stop_epoch-start_epoch-1)\n lamb = (2 / (1+exp(-gam*progress))) -1\n self.record_list = [[\"Epoch\", \"Batch\", \"Loss\", \"Domain Loss\", \"Top1 Val\", \"Top1 Avg\"]]\n for i, ((x1, y1), (x2, y2)) in enumerate(zip(train_loader[0], train_loader[1])):\n x1 = Variable(x1.cuda())\n y1 = Variable(y1.cuda())\n x2 = Variable(x2.cuda())\n y2 = Variable(y2.cuda())\n \n optimizer.zero_grad()\n optimizer_domain.zero_grad()\n \n # step1: train domain classifier\n mixed_data, mixed_label, domain_label = model_domain.mix_data(x1, x2, y1, y2)\n feature = self.feature.forward(mixed_data)\n # We don't need to train feature extractor in step 1.\n # Thus we detach the feature neuron to avoid backpropgation.\n domain_logits = model_domain.forward(feature.detach())\n loss = model_domain.loss_fn(domain_logits, domain_label)\n avg_loss_domain = avg_loss_domain+loss.item()\n loss.backward()\n optimizer_domain.step()\n \n # step 2 : train feature extractor and label classifier\n class_logits = self.classifier.forward(feature)\n _, predicted = torch.max(class_logits.data, 1)\n correct = predicted.eq(mixed_label.data).cpu().sum()\n self.top1.update(correct.item()*100 / (mixed_label.size(0)+0.0), mixed_label.size(0))\n domain_logits = model_domain.forward(feature)\n # loss = cross entropy of classification - lamb * domain binary cross entropy.\n # The reason why using subtraction is similar to generator loss in disciminator of GAN\n loss = self.loss_fn(class_logits, mixed_label) - lamb * model_domain.loss_fn(domain_logits, domain_label)\n avg_loss = avg_loss+loss.item()\n loss.backward()\n optimizer.step()\n \n if i % print_freq==0:\n #print(optimizer.state_dict()['param_groups'][0]['lr'])\n batch_num_mul = min(len(train_loader[0]), len(train_loader[1]))\n batch_num_mul = batch_num_mul//10 *10\n print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Domain Loss {:f} | Top1 Val {:f} | Top1 Avg {:f}'.format(epoch, i, batch_num_mul, avg_loss/float(i+1), avg_loss_domain/float(i+1), self.top1.val, self.top1.avg))\n \n self.record_list.append([epoch, i, avg_loss/float(i+1), avg_loss_domain/float(i+1), self.top1.val, self.top1.avg])\n \n def test_loop_dann(self, val_loader):\n return -1 #no validation, just save model during iteration","sub_path":"methods/baselinetrain.py","file_name":"baselinetrain.py","file_ext":"py","file_size_in_byte":7596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"68553145","text":"# Third-Party Imports\nfrom django.core.exceptions import ValidationError\nfrom rest_framework import serializers\n\n# App Imports\nfrom core import models\nfrom core.constants import CHECKIN, CHECKOUT\n\n\nclass AssetSerializer(serializers.ModelSerializer):\n checkin_status = serializers.SerializerMethodField()\n allocation_history = serializers.SerializerMethodField()\n assigned_to = serializers.SerializerMethodField()\n asset_category = serializers.ReadOnlyField()\n asset_sub_category = serializers.ReadOnlyField()\n asset_make = serializers.ReadOnlyField()\n make_label = serializers.ReadOnlyField(source=\"asset_make\")\n asset_type = serializers.ReadOnlyField()\n asset_location = serializers.SlugRelatedField(\n many=False,\n slug_field=\"name\",\n required=False,\n queryset=models.AndelaCentre.objects.all(),\n )\n\n model_number = serializers.SlugRelatedField(\n queryset=models.AssetModelNumber.objects.all(), slug_field=\"name\"\n )\n\n class Meta:\n model = models.Asset\n fields = (\n \"id\",\n \"uuid\",\n \"asset_category\",\n \"asset_sub_category\",\n \"asset_make\",\n \"make_label\",\n \"asset_code\",\n \"serial_number\",\n \"model_number\",\n \"checkin_status\",\n \"created_at\",\n \"last_modified\",\n \"current_status\",\n \"asset_type\",\n \"allocation_history\",\n \"specs\",\n \"purchase_date\",\n \"notes\",\n \"assigned_to\",\n \"asset_location\",\n \"verified\",\n )\n depth = 1\n read_only_fields = (\n \"uuid\",\n \"created_at\",\n \"last_modified\",\n \"assigned_to\",\n \"current_status\",\n \"notes\",\n \"asset_category\",\n \"asset_sub_category\",\n \"asset_make\",\n )\n\n def get_checkin_status(self, obj):\n try:\n asset_log = (\n models.AssetLog.objects.filter(asset=obj)\n .order_by(\"-created_at\")\n .first()\n )\n if asset_log.log_type == CHECKIN:\n return \"checked_in\"\n elif asset_log.log_type == CHECKOUT:\n return \"checked_out\"\n except AttributeError:\n return None\n\n def get_assigned_to(self, obj):\n if not obj.assigned_to:\n return None\n if obj.assigned_to.department:\n from api.serializers import DepartmentSerializer\n\n serialized_data = DepartmentSerializer(obj.assigned_to.department)\n elif obj.assigned_to.workspace:\n from api.serializers import OfficeWorkspaceSerializer\n\n serialized_data = OfficeWorkspaceSerializer(obj.assigned_to.workspace)\n elif obj.assigned_to.user:\n from api.serializers import UserSerializer\n\n serialized_data = UserSerializer(obj.assigned_to.user)\n else:\n return None\n return serialized_data.data\n\n def get_allocation_history(self, obj):\n allocations = models.AllocationHistory.objects.filter(asset=obj.id)\n return [\n {\n \"id\": allocation.id,\n \"current_owner\": allocation.current_owner.email\n if allocation.current_owner\n else None,\n \"previous_owner\": allocation.previous_owner.email\n if allocation.previous_owner\n else None,\n \"created_at\": allocation.created_at,\n }\n for allocation in allocations\n ]\n\n def to_internal_value(self, data):\n internals = super(AssetSerializer, self).to_internal_value(data)\n specs_serializer = AssetSpecsSerializer(data=data)\n specs_serializer.is_valid()\n if len(specs_serializer.data):\n try:\n specs, _ = models.AssetSpecs.objects.get_or_create(\n **specs_serializer.data\n )\n except ValidationError as err:\n raise serializers.ValidationError(err.error_dict)\n internals[\"specs\"] = specs\n return internals\n\n\nclass AssetAssigneeSerializer(serializers.ModelSerializer):\n assignee = serializers.SerializerMethodField()\n\n class Meta:\n model = models.AssetAssignee\n fields = (\"id\", \"assignee\")\n\n def get_assignee(self, obj):\n if obj.user:\n return obj.user.email\n\n elif obj.department:\n return obj.department.name\n\n elif obj.workspace:\n return obj.workspace.name\n\n\nclass AssetLogSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.AssetLog\n fields = (\"id\", \"asset\", \"log_type\", \"created_at\", \"last_modified\")\n\n def to_representation(self, instance):\n instance_data = super().to_representation(instance)\n asset = models.Asset.objects.get(id=instance.asset.id)\n serial_no = asset.serial_number\n asset_code = asset.asset_code\n instance_data[\"checked_by\"] = instance.checked_by.email\n instance_data[\"asset\"] = f\"{serial_no} - {asset_code}\"\n return instance_data\n\n def validate(self, fields):\n existing_log = models.AssetLog.objects.filter(asset=fields[\"asset\"])\n existing_log = existing_log.first()\n if existing_log and existing_log.log_type == fields[\"log_type\"]:\n raise serializers.ValidationError(\n f\"The asset log type is already {existing_log.log_type}\"\n )\n return fields\n\n\nclass AssetStatusSerializer(AssetSerializer):\n status_history = serializers.SerializerMethodField()\n\n class Meta:\n model = models.AssetStatus\n fields = (\n \"id\",\n \"asset\",\n \"current_status\",\n \"status_history\",\n \"previous_status\",\n \"created_at\",\n )\n\n def get_status_history(self, obj):\n asset_status = models.AssetStatus.objects.filter(asset=obj.asset)\n return [\n {\n \"id\": asset.id,\n \"asset\": asset.asset_id,\n \"current_status\": asset.current_status,\n \"previous_status\": asset.previous_status,\n \"created_at\": asset.created_at,\n }\n for asset in asset_status\n if obj.created_at > asset.created_at\n ]\n\n def to_representation(self, instance):\n instance_data = super().to_representation(instance)\n serial_no = instance.asset.serial_number\n asset_code = instance.asset.asset_code\n instance_data[\"asset\"] = f\"{asset_code} - {serial_no}\"\n return instance_data\n\n\nclass AllocationsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.AllocationHistory\n fields = (\"asset\", \"current_owner\", \"previous_owner\", \"created_at\")\n read_only_fields = (\"previous_owner\",)\n\n def to_representation(self, instance):\n instance_data = super().to_representation(instance)\n serial_no = instance.asset.serial_number\n asset_code = instance.asset.asset_code\n\n if instance.previous_owner:\n instance_data[\"previous_owner\"] = instance.previous_owner.email\n if instance.current_owner:\n instance_data[\"current_owner\"] = instance.current_owner.email\n instance_data[\"asset\"] = f\"{serial_no} - {asset_code}\"\n return instance_data\n\n\nclass AssetCategorySerializer(serializers.ModelSerializer):\n category_name = serializers.ReadOnlyField(source=\"name\")\n\n class Meta:\n model = models.AssetCategory\n fields = (\"id\", \"name\", \"created_at\", \"last_modified\", \"category_name\")\n\n def to_internal_value(self, data):\n _data = data.copy()\n if not _data.get(\"name\"):\n _data[\"name\"] = _data.get(\"category_name\")\n internal_value = super().to_internal_value(_data)\n return internal_value\n\n\nclass AssetSubCategorySerializer(serializers.ModelSerializer):\n sub_category_name = serializers.ReadOnlyField(source=\"name\")\n\n class Meta:\n model = models.AssetSubCategory\n fields = (\n \"id\",\n \"name\",\n \"asset_category\",\n \"created_at\",\n \"last_modified\",\n \"sub_category_name\",\n )\n\n def to_internal_value(self, data):\n _data = data.copy()\n if not _data.get(\"name\"):\n _data[\"name\"] = _data.get(\"sub_category_name\")\n internal_value = super().to_internal_value(_data)\n return internal_value\n\n def to_representation(self, instance):\n instance_data = super().to_representation(instance)\n instance_data[\"asset_category\"] = instance.asset_category.name\n return instance_data\n\n\nclass AssetTypeSerializer(serializers.ModelSerializer):\n asset_type = serializers.ReadOnlyField(source=\"name\")\n\n class Meta:\n model = models.AssetType\n fields = (\n \"id\",\n \"name\",\n \"asset_sub_category\",\n \"has_specs\",\n \"created_at\",\n \"last_modified\",\n \"asset_type\",\n )\n\n def to_internal_value(self, data):\n _data = data.copy()\n if not data.get(\"name\"):\n _data[\"name\"] = _data.get(\"asset_type\")\n internal_value = super().to_internal_value(_data)\n return internal_value\n\n def to_representation(self, instance):\n instance_data = super().to_representation(instance)\n instance_data[\"asset_sub_category\"] = instance.asset_sub_category.name\n return instance_data\n\n\nclass AssetModelNumberSerializer(serializers.ModelSerializer):\n make_label = serializers.SerializerMethodField()\n model_number = serializers.ReadOnlyField(source=\"name\")\n\n class Meta:\n model = models.AssetModelNumber\n fields = (\n \"id\",\n \"name\",\n \"asset_make\",\n \"created_at\",\n \"last_modified\",\n \"model_number\",\n \"make_label\",\n )\n\n def to_representation(self, instance):\n instance_data = super().to_representation(instance)\n instance_data[\"asset_make\"] = instance.asset_make.name\n return instance_data\n\n def to_internal_value(self, data):\n _data = data.copy()\n if not _data.get(\"asset_make\"):\n _data[\"asset_make\"] = _data.get(\"make_label\")\n if not _data.get(\"name\"):\n _data[\"name\"] = _data.get(\"model_number\")\n asset_make = _data.get(\"asset_make\")\n if not asset_make:\n raise serializers.ValidationError(\n {\"asset_make\": [self.error_messages[\"required\"]]}\n )\n try:\n asset_make_instance = models.AssetMake.objects.get(id=asset_make)\n except Exception:\n raise serializers.ValidationError(\n {\"asset_make\": [f'Invalid pk \"{asset_make}\" - object does not exist.']}\n )\n internal_value = super().to_internal_value(_data)\n internal_value.update({\"asset_make\": asset_make_instance})\n return internal_value\n\n def get_make_label(self, obj):\n return obj.asset_make.name\n\n\nclass AssetConditionSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.AssetCondition\n fields = (\"id\", \"asset\", \"notes\", \"created_at\")\n\n def to_representation(self, instance):\n instance_data = super().to_representation(instance)\n serial_no = instance.asset.serial_number\n asset_code = instance.asset.asset_code\n instance_data[\"asset\"] = f\"{serial_no} - {asset_code}\"\n return instance_data\n\n\nclass AssetMakeSerializer(serializers.ModelSerializer):\n asset_type = serializers.SerializerMethodField()\n make_label = serializers.SerializerMethodField()\n\n class Meta:\n model = models.AssetMake\n fields = (\n \"id\",\n \"name\",\n \"asset_type\",\n \"created_at\",\n \"last_modified_at\",\n \"make_label\",\n )\n\n def get_asset_type(self, obj):\n return obj.asset_type.name\n\n def get_make_label(self, obj):\n return obj.name\n\n def to_internal_value(self, data):\n _data = data.copy()\n if not _data.get(\"name\"):\n _data[\"name\"] = _data.get(\"make_label\")\n asset_type = _data[\"asset_type\"]\n if not asset_type:\n raise serializers.ValidationError(\n {\"asset_type\": [self.error_messages[\"required\"]]}\n )\n try:\n asset_type_instance = models.AssetType.objects.get(id=asset_type)\n except Exception:\n raise serializers.ValidationError(\n {\"asset_type\": [f'Invalid pk \"{asset_type}\" - object does not exist.']}\n )\n internal_value = super().to_internal_value(_data)\n internal_value.update({\"asset_type\": asset_type_instance})\n return internal_value\n\n\nclass AssetIncidentReportSerializer(serializers.ModelSerializer):\n submitted_by = serializers.SerializerMethodField()\n\n class Meta:\n model = models.AssetIncidentReport\n fields = (\n \"id\",\n \"asset\",\n \"incident_type\",\n \"incident_location\",\n \"incident_description\",\n \"injuries_sustained\",\n \"loss_of_property\",\n \"witnesses\",\n \"submitted_by\",\n \"police_abstract_obtained\",\n )\n\n def get_submitted_by(self, instance):\n if instance.submitted_by:\n return instance.submitted_by.email\n return instance.submitted_by\n\n def to_representation(self, instance):\n instance_data = super().to_representation(instance)\n serial_no = instance.asset.serial_number\n asset_code = instance.asset.asset_code\n instance_data[\"asset\"] = f\"{serial_no} - {asset_code}\"\n return instance_data\n\n\nclass AssetHealthSerializer(serializers.ModelSerializer):\n asset_type = serializers.ReadOnlyField()\n model_number = serializers.ReadOnlyField(source=\"model_number__name\")\n count_by_status = serializers.ReadOnlyField(source=\"current_status\")\n\n class Meta:\n model = models.Asset\n fields = (\"asset_type\", \"model_number\", \"count_by_status\")\n\n\nclass AssetSpecsSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.AssetSpecs\n fields = (\n \"id\",\n \"year_of_manufacture\",\n \"processor_speed\",\n \"screen_size\",\n \"processor_type\",\n \"storage\",\n \"memory\",\n )\n extra_kwargs = {\n \"processor_speed\": {\"required\": False},\n \"processor_type\": {\"required\": False},\n \"screen_size\": {\"required\": False},\n \"memory\": {\"required\": False},\n \"storage\": {\"required\": False},\n \"year_of_manufacture\": {\"required\": False},\n }\n validators = []\n\n def validate(self, fields):\n not_unique = models.AssetSpecs.objects.filter(**fields).exists()\n if not_unique:\n raise serializers.ValidationError(\n \"Similar asset specification already exist\"\n )\n return fields\n\n\nclass StateTransitionSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.StateTransition\n fields = (\"asset_incident_report\", \"state\")\n","sub_path":"api/serializers/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":15415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"240747425","text":"__author__ = 'Animadversio_Binxu'\nfrom source.gameAICore import *\nfrom random import gauss, random\nimport pickle, json\ndef clip(v, LB, UB):\n return min(UB, max(LB, v))\n\nmatch_results = []\nparam_evol = []\nsigma = dict(gamma=0.1, beta=0.1, alpha=0.1, HQWeight=0.1, enemyWeight=0.1, occupThreat=10)\nbound = dict(gamma=(0, 4), beta=(0, 4), alpha=(0, 4), HQWeight=(0.5,10), enemyWeight=(0.5,10), occupThreat=(100,1000))\nparam0 = dict(gamma=1.5, beta=2.0, alpha=0.3, HQWeight=1, enemyWeight=2, occupThreat=500)\nparam1 = param0\nT0 = time()\nfor geni in range(50):\n param1 = param0\n param2 = {k: clip(v + gauss(0, sigma[k]), *bound[k]) for k, v in param0.items()}\n print(\"Gen%d(%.3fsec)\\nOriginal AI param %s\\nPK Challenger AI param %s \" % (geni, time() - T0, param1, param2,))\n # This is the basic loop for playing an action sequence step by step\n playerAI = {1: (\"Origin\", greedyRiskThreatMinMaxExactPolicy, param1),\n 2: (\"Mutant\", greedyRiskThreatMinMaxExactPolicy, param2)}\n game = gameSetup2()\n winner12, gameDict12, act_record12 = gamePlay(game, playerAI, display=True)\n match_results.append((playerAI, winner12, gameDict12, act_record12))\n\n playerAI = {2: (\"Origin\", greedyRiskThreatMinMaxExactPolicy, param1),\n 1: (\"Mutant\", greedyRiskThreatMinMaxExactPolicy, param2)}\n game = gameSetup2()\n winner21, gameDict21, act_record21 = gamePlay(game, playerAI, display=True)\n match_results.append((playerAI, winner21, gameDict21, act_record21))\n if winner21 == 2 and winner12 == 1:\n print(\"Gen%d(%.3fsec)\\nOriginal AI win consistently param %s \"%(geni, time() - T0, param1, ))\n param0 = param1\n elif winner21 == 1 and winner12 == 2:\n print(\"Gen%d(%.3fsec)\\nChallenger AI win consistently param %s \"%(geni, time() - T0, param2, ))\n param0 = param2\n else:\n param0 = {k1: (v1+v2)/2 for (k1,v1), (k2,v2) in zip(param1.items(), param2.items())}\n print(\"Gen%d(%.3fsec)\\nTwo AI get tie! Merge them param %s \" % (geni, time() - T0, param0, ))\n param_evol.append((geni, param1, param2, winner12, winner21, gameDict12, gameDict21))\n pickle.dump(param_evol, open(\"evol_param_trace2.pkl\", \"wb\"))\n json.dump(param_evol, open(\"evol_param_trace2.json\", mode=\"w\"))\n#%%\nimport pandas as pd\nparam0_tab = pd.DataFrame([param[1] for param in param_evol])\nparam1_tab = pd.DataFrame([param[2] for param in param_evol])\nwin_tab = pd.DataFrame([{\"win12\":param[3],\"win21\":param[4]} for param in param_evol])\nstat_tab = pd.DataFrame([param[5] for param in param_evol])\nsummaryTab = pd.concat([param0_tab,param1_tab,win_tab,stat_tab],axis=1)\n#%%\n{'gamma': 1.95, 'beta': 2.00, 'alpha': 0.06, 'HQWeight': 1.32, 'enemyWeight': 1.64, 'occupThreat': 478}\nplayerAI = {2: (\"Origin\", greedyRiskThreatMinMaxExactPolicy, param0),\n 1: \"human\"} #(\"Mutant\", greedyRiskThreatMinMaxExactPolicy, param2)}4\ngame = gameSetup2()\nwinner_H, gameDict_H, act_record_H = gamePlay(game, playerAI, display=True)\n# match_results.append((playerAI, winner12, gameDict12, act_record12))","sub_path":"source/EvolvAI.py","file_name":"EvolvAI.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"347833347","text":"from utils.constants import *\nfrom utils.strings import *\n\nclass BaseAgent(object):\n '''Base class containing all the parameters for reinforcement learning'''\n\n def __init__(self, config):\n scale = 10000\n\n self.max_step = 5000 * scale\n\n self.target_q_update_step = 1 * scale\n self.learning_rate = 0.00025\n self.learning_rate_minimum = 0.00025\n self.learning_rate_decay = 0.96\n self.learning_rate_decay_step = 5 * scale\n\n self.ep_end = 0.1\n self.ep_start = 1.\n self.ep_end_t = config[MEMORY_SIZE]\n\n self.train_frequency = 4\n self.learn_start = 5. * scale\n\n self.min_delta = -1\n self.max_delta = 1\n\n # _test_step = 5 * scale\n # _save_step = _test_step * 10","sub_path":"code/model/baseagent.py","file_name":"baseagent.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"562487109","text":"###################################################################################\n# Python script to compute the peak locations in the theoretical strain spectrum\n# NOTE: Before running this script, make sure that the following files are present\n# [YOURDIR]/dispVsFreq.py\n# [YOURDIR]/strainVsFreq.py\n# These are generated by the getSolutionInterp.edp FreeFem script.\n# See the bash script run-script.sh for more information.\n#\n# Run:\n# python3 RealFreqIce.py ICEBERG1/\n# python3 RealFreqIce.py ICEBERG1.5/\n#\n# Load the strain/displacement vs frequency plots.\n# Find Peaks in the frequency domain.\n###################################################################################\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport sys\nimport time\nfrom scipy.signal import find_peaks\nfont = {'weight' : 'normal',\n 'size' : 15}\nmatplotlib.rc('font', **font)\n\nfilePath = sys.argv[1]\npi = np.pi\nomega = 2*pi*np.linspace(0.01,0.125,21)\nT = 2*pi/omega\ndispVsFreq = np.loadtxt(filePath+\"/dispVsFreq.dat\")\nstrainVsFreq = np.loadtxt(filePath+\"/strainVsFreq.dat\")\nnpts = len(dispVsFreq)\nnev = 10\nomeganew = 2*pi*np.linspace(0.01,0.125,npts)\n\n\nplt.figure(figsize=[10,4])\nplt.semilogy(omeganew/(2*pi), dispVsFreq[:,0],label='$u_x$')\nplt.semilogy(omeganew/(2*pi), dispVsFreq[:,1],label='$u_y$')\npeaks0, _ = find_peaks(dispVsFreq[:,0], prominence=1e-3, distance=20)\nplt.semilogy(omeganew[peaks0]/(2*pi), dispVsFreq[peaks0,0],\"x\")\npeaks1, _ = find_peaks(dispVsFreq[:,1], prominence=1e-3, distance=20)\nplt.semilogy(omeganew[peaks1]/(2*pi), dispVsFreq[peaks1,1],\"x\")\nplt.legend()\nplt.xlabel(\"$\\\\frac{\\omega}{2\\pi}$\")\nplt.ylabel(\"$||u_x||_{\\infty}\\;\\;\\;\\\\mathrm{or}\\;\\;\\;||u_y||_{\\infty}$\")\n\nplt.figure(figsize=[10,4])\nplt.semilogy(omeganew/(2*pi), strainVsFreq[:,0],label='$\\epsilon_{xx}$')\nplt.semilogy(omeganew/(2*pi), strainVsFreq[:,1],label='$\\epsilon_{yy}$')\nplt.semilogy(omeganew/(2*pi), strainVsFreq[:,2],label='$\\epsilon_{xy}$')\npeaks0, _ = find_peaks(strainVsFreq[:,0], prominence=1e-6)\nplt.semilogy(omeganew[peaks0]/(2*pi), strainVsFreq[peaks0,0],\"x\")\npeaks1, _ = find_peaks(strainVsFreq[:,1], prominence=1e-6)\nplt.semilogy(omeganew[peaks1]/(2*pi), strainVsFreq[peaks1,1],\"x\")\npeaks2, _ = find_peaks(strainVsFreq[:,2], prominence=1e-6)\nplt.semilogy(omeganew[peaks2]/(2*pi), strainVsFreq[peaks2,2],\"x\")\nplt.legend()\nplt.xlabel(\"$\\\\frac{\\omega}{2\\pi}$\")\nplt.ylabel(\"$||\\epsilon_{xx}||_{\\infty}\\;\\;\\\\mathrm{or}\\;\\;||\\epsilon_{yy}||_{\\infty}\\;\\;\\\\mathrm{or}\\;\\;||\\epsilon_{xy}||_{\\infty}$\")\n\nplt.show()\n","sub_path":"python_modules/RealFreqIce.py","file_name":"RealFreqIce.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"15980511","text":"import argparse\n\nimport csv\nfrom os.path import join as PJ\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom datasets.Dataset import CIFAR10Dataset\nfrom networks import ResNet18, HaHaNet\n\nfrom utils import config, data_transform\nfrom datasets.evaluation import accuracy\n\n\ndef test(dataloader, model):\n model.eval()\n results = {'predict': [], 'label': []}\n for it, (labels, images) in enumerate(dataloader):\n\n # Drop images and labels into GPU\n images = images.detach()\n labels = labels.detach()\n\n # Take class with largest score as predict\n predicts = model(images)\n predicts = torch.argmax(predicts, 1)\n\n # Record results\n results['predict'] += predicts.tolist()\n results['label'] += labels.tolist()\n\n if (it + 1) % (len(dataloader) / 10) == 0:\n print(f\"it: [{it+1:03d}/{len(dataloader):03d}]\", end='\\r')\n acc = accuracy(results['label'], results['predict'])\n return acc\n\n\nif __name__ == '__main__':\n ########################################\n # Environment and Experiment setting\n ########################################\n # Parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp', '-e', type=str, default='exp1_ResNet18')\n parser.add_argument('--model_weights', '-w', type=str, default='')\n args = parser.parse_args()\n\n # Load experiment config\n config_path = f\"./configs/{args.exp}.yaml\"\n config = config(config_path)\n exp_name = config['exp_name']\n print(f\"EXP: {exp_name}\")\n\n save_root = PJ(f\"./results\", exp_name)\n\n # Show some experiment info\n model_weights = args.model_weights\n print(f\"Model: {config['model']}, Weights: {model_weights}\")\n print(f\"Pretrained: {config['pretrained']}, Freeze: {config['freeze']}\")\n\n ########################################\n # Data loader\n ########################################\n class2idx = config['class2idx']\n idx2class = {class2idx[k]: k for k in class2idx.keys()}\n # Dataset\n transform = data_transform(config, train=False)\n testset = CIFAR10Dataset(config['data_root'], config['test_file'], class2idx, transform)\n # Dataloader\n testloader = DataLoader(testset, config['test_size'], shuffle=False, num_workers=config['num_workers'])\n\n ########################################\n # Model\n ########################################\n num_class = len(class2idx)\n input_size = config['new_size']\n if config['model'] == 'ResNet18':\n model = ResNet18(input_size, num_class, config['pretrained'], config['freeze'])\n elif config['model'] == 'HaHaNet':\n model = HaHaNet(input_size, num_class, config['pretrained'], config['freeze'])\n else:\n raise f\"Model {config['model']} is not support.\"\n\n # Drop model into GPU\n model.cuda()\n\n ########################################\n # Loading model\n ########################################\n model.load_state_dict(torch.load(PJ(save_root, model_weights)))\n epoch = int(model_weights[:4])\n print(f\"Loading model {model_weights} successed!\")\n\n ########################################\n # Start evaluate (testing set)\n ########################################\n model.eval()\n torch.set_grad_enabled(False)\n acc = test(testloader, model)\n print(f\"Accuracy: {acc:0.4f}\\n\")\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"522419239","text":"#\n#Task 8: Wheel\n#Georgios Kampanos\n#2021259\n#\n\n\nfrom Canvas import *\n\nangle = 0\n\noval_x = 100;oval_y = 100 \t\t#starting position of oval\nline_x = 100;line_y = 100\t\t#starting position of line\nline_x1 = end_x(line_x,70,angle); line_y1 = end_y(line_y,70,angle)#end position of line\n\ncreate_oval(oval_x-70,oval_y-70,oval_x+70,oval_y+70)#oval with radius of 70\n\nwhile angle <= 360:\n\tcreate_line(line_x,line_y,line_x1,line_y1)\t#create a new line\n\tangle += 20\t\t\t#update lines angle\n\tline_x1 = end_x(line_x,70,angle); line_y1 = end_y(line_y,70,angle)#update end of line\n\ncomplete()","sub_path":"Unit 3/prep_task8.py","file_name":"prep_task8.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"103275993","text":"from django.utils import timezone\nfrom django.contrib import admin\n\n\nclass CoreAdmin(admin.ModelAdmin):\n\n \"\"\"\n CoreAdmin: Model admin\n ======================\n\n Define behavior of administration UI for Core model.\n\n Methods:\n\n - I{save_model}\n \"\"\"\n\n def save_model(self, request, obj, form, change):\n\n \"\"\"\n On action create/update instance though adminisration UI set I{author}\n as current user and I{changed} date as now.\n \"\"\"\n\n # Add athor only on creation action.\n if not change:\n obj.set_author(request.user)\n\n obj.set_changed(timezone.now())\n\n super(CoreAdmin, self).save_model(request, obj, form, change)\n","sub_path":"rcore/core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"406347634","text":"\"\"\"\n This Class will be checking for the\n Authorization header if it does exist in the headers list\n and will also be checking if the format of the Authorization\n value is\n token thetokenhere\n ie:\n Authorization: token yourtokenhere\n\"\"\"\nimport jwt\nfrom datetime import datetime\nfrom django.conf import settings\nfrom rest_framework import authentication\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom .models import User\nfrom rest_framework_jwt.settings import api_settings\n\n\nclass JWTAuthentication(authentication.BaseAuthentication):\n\n def authenticate(self, request):\n \"\"\"\n This method checks if the token is valid\n the return value is the users token\n \"\"\"\n\n auth = authentication.get_authorization_header(request).split()\n\n # Ensure we have a token\n # get_authorization_header returns headers as bytesstring\n # hence the need to check\n # against b'token'\n\n if not auth or auth[0].lower() != b'bearer':\n return None\n try:\n token = auth[1]\n\n except UnicodeError:\n message = \"Token contains invalid characters\"\n raise AuthenticationFailed(message)\n # Attempt decoding the token\n try:\n payload = jwt.decode(token, settings.SECRET_KEY)\n except:\n raise AuthenticationFailed('Invalid token.')\n\n # Get the user owning the token by the decoded email prop\n try:\n user = User.objects.get(email=payload['email'])\n except User.DoesNotExist:\n raise AuthenticationFailed('No user found for token provided')\n\n return (user, token)\n\n def validate_token(self, token):\n # we use the same key for encoding as well\n\n try:\n jwt.decode(token, settings.SECRET_KEY)\n except jwt.DecodeError or jwt.InvalidTokenError:\n return False\n except jwt.ExpiredSignature:\n return False\n return True\n","sub_path":"authors/apps/authentication/backends.py","file_name":"backends.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"599767499","text":"# -*- encoding: utf-8 *-*\nimport sys\n\nimport versioneer\nversioneer.VCS = 'git'\nversioneer.style = 'pep440'\nversioneer.versionfile_source = 'borgweb/_version.py'\nversioneer.versionfile_build = 'borgweb/_version.py'\nversioneer.tag_prefix = ''\nversioneer.parentdir_prefix = 'borgweb-' # dirname like 'myproject-1.2.0'\n\nmin_python = (3, 2)\nif sys.version_info < min_python:\n print(\"BorgWeb requires Python %d.%d or later\" % min_python)\n sys.exit(1)\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nwith open('README.rst', 'r') as fd:\n long_description = fd.read()\n\ncmdclass = versioneer.get_cmdclass()\n\nsetup(\n name='borgweb',\n version=versioneer.get_version(),\n author='The Borg Collective (see AUTHORS file)',\n author_email='borgbackup@librelist.com',\n url='https://borgweb.github.io/',\n description='Browser-based user interface for BorgBackup',\n long_description=long_description,\n license='BSD',\n platforms=['Linux', 'MacOS X', 'FreeBSD', ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: BSD :: FreeBSD',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: System :: Archiving :: Backup',\n ],\n packages=['borgweb', 'borgweb._tests'],\n scripts=['scripts/borgweb'],\n cmdclass=cmdclass,\n install_requires=[\n 'flask',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"580458925","text":"# Copyright (c) 2018 SONATA-NFV and Paderborn University\n# ALL RIGHTS RESERVED.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Neither the name of the SONATA-NFV, Paderborn University\n# nor the names of its contributors may be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# This work has been performed in the framework of the SONATA project,\n# funded by the European Commission under Grant number 671517 through\n# the Horizon 2020 and 5G-PPP programmes. The authors would like to\n# acknowledge the contributions of their colleagues of the SONATA\n# partner consortium (www.sonata-nfv.eu).\nimport unittest\n\nfrom emuvim.api.openstack.resources.flow_classifier import FlowClassifier\n\n\nclass FlowClassifierTest(unittest.TestCase):\n def test_empty_flow_classifier_to_match_conversion(self):\n c = FlowClassifier(\"test\")\n self.assertEqual(\"dl_type=2048\", c.to_match())\n\n def test_tcp_ip_flow_classifier_to_match_conversion(self):\n c = FlowClassifier(\"test\")\n c.protocol = \"tcp\"\n c.source_ip_prefix = \"10.0.0.10/32\"\n c.destination_ip_prefix = \"10.0.0.12/32\"\n c.destination_port_range_min = 80\n c.destination_port_range_max = 80\n self.assertEqual(\"dl_type=2048,nw_proto=6,nw_src=10.0.0.10/32,nw_dst=10.0.0.12/32,tp_dst=80\", c.to_match())\n","sub_path":"src/emuvim/test/unittests/test_flow_classifier.py","file_name":"test_flow_classifier.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"563544671","text":"# Author: kk.Fang(fkfkbill@gmail.com)\n\"\"\"重构的分析模块\"\"\"\n\n__all__ = [\n \"calc_statistics\"\n]\n\nfrom utils import const\nfrom utils.perf_utils import timing\nfrom models.mongo import *\n\n# 统计数据model\n# 注意:如果统计数据有先后依赖,需要在这里体现。\nSTATS_MODELS = (\n StatsSchemaRate,\n StatsCMDBRate,\n StatsCMDBSQLText,\n StatsRiskSqlRule,\n StatsRiskObjectsRule,\n StatsNumDrillDown,\n StatsCMDBPhySize,\n StatsCMDBLoginUser,\n StatsCMDBSQLPlan,\n StatsLoginUser\n)\n\n\ndef check_for_requirement():\n \"\"\"\n 检查统计模块的依赖关系和运行顺序是否正常\n :return:\n \"\"\"\n print(\"statistics models requirement checking ...\")\n processed_models = []\n for m in STATS_MODELS:\n required_models_but_not_ready = [\n required_model\n for required_model in m.requires\n if required_model not in processed_models\n ]\n if required_models_but_not_ready:\n print(f\"Failing: {m} requires {required_models_but_not_ready} to run first!\")\n raise const.RequiredModelNotRunException\n processed_models.append(m)\n\n\n@timing()\ndef calc_statistics(*args, **kwargs):\n \"\"\"\n 计算统计数据\n :return:\n \"\"\"\n check_for_requirement()\n for m in STATS_MODELS:\n print(f\"* Making statistics data for {m.__doc__} ...\")\n an_iterator = m.generate(*args, **kwargs)\n if an_iterator is None:\n print(\"Returned None, should be an iterator. Skipped.\")\n continue\n docs = list(an_iterator)\n if not docs:\n print(\"No statistics object to be saved.\")\n continue\n m.objects.insert(docs)\n\n\n@timing()\ndef analyse(task_record_id, cmdb_id, schema_name):\n \"\"\"分析\"\"\"\n raise NotImplementedError\n","sub_path":"utils/analyse_utils.py","file_name":"analyse_utils.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"574773968","text":"import numpy as np\nfrom OTM4RL import OTM4RL\nfrom matplotlib import pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport matplotlib.colors as pltc\nfrom random import sample\n\nclass otmEnvDiscrete:\n\n def __init__(self, env_info, configfile):\n\n self.time_step = env_info[\"time_step\"]\n self.plot_precision = env_info[\"plot_precision\"]\n\n assert (type(self.plot_precision) == int and self.plot_precision >= 1), \"plot_precision must be an integer greater than or equal to 1\"\n\n self.otm4rl = OTM4RL(configfile)\n self.num_states = env_info[\"num_states\"]\n self.num_actions = env_info[\"num_actions\"]\n self.controllers = self.otm4rl.get_controller_infos()\n self.num_intersections = len(self.controllers)\n self.action_space = range(self.num_actions ** self.num_intersections)\n self.state_space = range(self.num_states ** (self.num_intersections * 2))\n self.max_queues = self.otm4rl.get_max_queues()\n self.buffer = env_info[\"buffer\"]\n self.queue_buffer = dict(list(zip(self.otm4rl.get_link_ids(), [{\"waiting\": [], \"transit\": []} for i in self.otm4rl.get_link_ids()])))\n self.signal_buffer = dict(list(zip(self.controllers.keys(), [[] for i in self.controllers.keys()])))\n # self.seed()\n\n # def seed(self, seed=None):\n # self.np_random, seed = seeding.np_random(seed)\n # return [seed]\n\n def encode_state(self, state):\n encoded_state = 0\n state_vec = []\n road_connection_info = self.otm4rl.get_road_connection_info()\n i = 0\n for c_id, controller in self.controllers.items():\n stages = controller[\"stages\"]\n for stage in stages:\n in_link_ids = []\n agg_queue = 0\n max_queue = 0\n phase_ids = stage[\"phases\"]\n for phase_id in phase_ids:\n road_connections = self.otm4rl.get_signals()[c_id][\"phases\"][phase_id][\"road_conns\"]\n for road_connection in road_connections:\n in_link_ids.append(road_connection_info[road_connection][\"in_link\"])\n in_link_ids = list(set(in_link_ids))\n for link_id in in_link_ids:\n agg_queue += state[link_id][\"waiting\"]\n max_queue += self.max_queues[link_id]\n encoded_stage_state = int(agg_queue * self.num_states / max_queue) if agg_queue != max_queue else self.num_states - 1\n state_vec.append(encoded_stage_state)\n encoded_state += encoded_stage_state * (self.num_states ** i)\n i += 1\n state_vec.reverse()\n return encoded_state, np.array(state_vec)\n\n def decode_action(self, action):\n a = action\n signal_command = dict(list(zip(self.controllers.keys(), np.zeros(self.num_intersections).astype(int))))\n i = self.num_intersections - 1\n while a != 0:\n controller_id = list(self.controllers.keys())[i]\n signal_command[controller_id] = a % self.num_actions\n a = a // self.num_actions\n i -= 1\n\n return signal_command\n\n def set_state(self, state):\n self.otm4rl.set_queues(state)\n self.state = self.encode_state(state)\n\n def reset(self):\n state = self.max_queues.copy()\n in_links = set([rc_info[\"in_link\"] for rc_info in self.otm4rl.get_road_connection_info().values()])\n out_links = set([rc_info[\"out_link\"] for rc_info in self.otm4rl.get_road_connection_info().values()])\n out_links = list(out_links - in_links)\n for link_id in state.keys():\n if link_id in out_links:\n state[link_id] = {\"waiting\": int(0), \"transit\": int(0)}\n else:\n p = np.random.random()\n transit_queue = p*state[link_id]\n q = np.random.random()\n waiting_queue = q*(state[link_id] - transit_queue)\n state[link_id] = {\"waiting\": round(waiting_queue), \"transit\": round(transit_queue)}\n self.otm4rl.initialize()\n self.set_state(state)\n self.add_queue_buffer()\n\n return self.state\n\n def step(self, action):\n assert action in self.action_space, \"%r (%s) invalid\" % (action, type(action))\n\n self.otm4rl.set_control(self.decode_action(action))\n self.add_signal_buffer()\n\n self.otm4rl.advance(self.time_step)\n\n next_state = self.otm4rl.get_queues()\n self.add_queue_buffer()\n\n self.state, state_vec = self.encode_state(next_state)\n reward = -state_vec.sum()\n\n return self.state, reward\n\n def add_queue_buffer(self):\n\n if self.buffer == True:\n queues = self.otm4rl.get_queues()\n for link_id in queues.keys():\n self.queue_buffer[link_id][\"waiting\"].append(queues[link_id][\"waiting\"])\n self.queue_buffer[link_id][\"transit\"].append(queues[link_id][\"transit\"])\n else:\n pass\n\n def add_signal_buffer(self):\n\n if self.buffer == True:\n signals = self.otm4rl.get_control()\n for c_id in signals:\n self.signal_buffer[c_id].append(signals[c_id])\n else:\n pass\n\n def plot_queues(self, link_id, queue_type, from_time = 0, to_time = 10):\n\n road_connection_info = self.otm4rl.get_road_connection_info()\n\n link_rc = []\n link_controller = None\n link_stages = []\n for rc, rc_info in road_connection_info.items():\n if link_id == rc_info[\"in_link\"]:\n link_rc.append(rc)\n\n for c_id in self.controllers.keys():\n for stage in range(len(self.controllers[c_id][\"stages\"])):\n phase_ids = self.controllers[c_id][\"stages\"][stage][\"phases\"]\n for phase_id in phase_ids:\n road_connections = self.otm4rl.get_signals()[c_id][\"phases\"][phase_id][\"road_conns\"]\n if set.intersection(set(link_rc),set(road_connections)) != set():\n link_stages.append(stage)\n if len(link_stages) != 0:\n link_controller = c_id\n break\n\n if link_controller == None:\n print(\"This link is leaving the network or it is a demand link, so it is not impacted by traffic lights\")\n return\n\n fig, ax = plt.subplots()\n queues = self.queue_buffer[link_id][queue_type]\n step = self.time_step/self.plot_precision\n ax.plot([i*step for i in range(len(queues))], queues)\n\n stages = np.array(self.signal_buffer[link_controller])\n stage_times = np.array(range(len(stages)))*self.time_step\n aux = np.array([stages[i] if (i == 0 or stages[i-1] != stages[i]) else -1 for i in range(len(stages))])\n changing_stages = np.array([aux[i] if (i == 0 or aux[i] in link_stages or (aux[i-1] in link_stages and aux[i] not in link_stages)) else -1 for i in range(len(aux))])\n stages = np.extract(changing_stages >= 0, stages)\n stage_times = np.extract(changing_stages >=0, stage_times)\n colors = [\"g\" if stages[i] in link_stages else \"r\" for i in range(len(stages))]\n for i in range(len(colors)):\n ax.axvline(x=stage_times[i], color = colors[i])\n y = (ax.get_ylim()[1] - ax.get_ylim()[0])*0.96 + ax.get_ylim()[0]\n ax.text(stage_times[i] + 0.05*self.time_step, y, stages[i] if stages[i] in link_stages else \"\")\n\n plt.title(\"Link \" + str(link_id) + \" - Queue dynamics (\" + queue_type + \" queue)\")\n plt.show()\n\n def build_network_lines(self, state):\n\n nodes = {}\n for node_id in self.otm4rl.get_node_ids():\n node_info = self.otm4rl.get_node_with_id(node_id)\n nodes[node_id] = {'x': node_info.getX(), 'y': node_info.getY()}\n\n lines = []\n norms = []\n minX = float('Inf')\n maxX = -float('Inf')\n minY = float('Inf')\n maxY = -float('Inf')\n\n for link_id in self.otm4rl.get_link_ids():\n link_info = self.otm4rl.get_link_with_id(link_id)\n\n start_point = nodes[link_info.getStart_node_id()]\n end_point = nodes[link_info.getEnd_node_id()]\n\n x0 = start_point['x']\n y0 = start_point['y']\n x1 = end_point['x']\n y1 = end_point['y']\n\n if x1-x0 > 0:\n y0 -= 150\n y1 -= 150\n\n if x1-x0 < 0:\n y0 += 150\n y1 += 150\n\n if y1-y0 > 0:\n x0 += 100\n x1 += 100\n\n if y1-y0 < 0:\n x0 -= 100\n x1 -= 100\n\n p0 = (x0, y0)\n p1 = (x1, y1)\n\n lines.append([p0, p1])\n norms.append(state[link_id][\"waiting\"]/self.max_queues[link_id])\n\n minX = min([minX, p0[0], p1[0]])\n maxX = max([maxX, p0[0], p1[0]])\n minY = min([minY, p0[1], p1[1]])\n maxY = max([maxY, p0[1], p1[1]])\n\n return lines, norms, minX, maxX, minY, maxY\n\n def get_signal_positions(self, lines, control):\n\n link_coords = dict(zip(self.otm4rl.get_link_ids(), lines))\n road_connection_info = self.otm4rl.get_road_connection_info()\n signal_positions = dict()\n for c_id, stage in control.items():\n phase_ids = self.controllers[c_id][\"stages\"][stage][\"phases\"]\n for phase_id in phase_ids:\n road_connections = self.otm4rl.get_signals()[c_id][\"phases\"][phase_id][\"road_conns\"]\n for road_connection in road_connections:\n in_link_id = road_connection_info[road_connection][\"in_link\"]\n out_link_id = road_connection_info[road_connection][\"out_link\"]\n signal_positions[road_connection] = {\"in_link\": link_coords[in_link_id], \"out_link\": link_coords[out_link_id]}\n return signal_positions\n\n def plot_environment(self, state, control):\n fig, ax = plt.subplots()\n\n lines, norms, minX, maxX, minY, maxY = self.build_network_lines(state)\n\n cmap = plt.get_cmap('Wistia')\n all_colors = [cmap(z) for z in norms]\n lc = LineCollection(lines, colors = all_colors)\n lc.set_linewidths(15)\n ax.add_collection(lc)\n\n dY = maxY - minY\n dX = maxX - minX\n\n if (dY > dX):\n ax.set_ylim((minY, maxY))\n c = (maxX + minX) / 2\n ax.set_xlim((c - dY / 2, c + dY / 2))\n else:\n ax.set_xlim((minX, maxX))\n c = (maxY + minY) / 2\n ax.set_ylim((c - dX / 2, c + dX / 2))\n\n signal_positions = self.get_signal_positions(lines, control)\n\n for rc in signal_positions.values():\n p0 = rc[\"in_link\"][0]\n p1 = rc[\"in_link\"][1]\n ax.annotate(s='', xy=p1, xytext=p0, arrowprops=dict(arrowstyle='-'))\n p0 = rc[\"out_link\"][0]\n p1 = rc[\"out_link\"][1]\n ax.annotate(s='', xy=p1, xytext=p0, arrowprops=dict(arrowstyle='->'))\n\n plt.show()\n # plot traffic lights\n # show time\n\n # def render(self, mode='human'):\n # #plot the queue profile over time\n # #render the network\n # pass\n #\n # def close(self):\n # #stop rendering\n # pass\n","sub_path":"src/otm/otm_env.py","file_name":"otm_env.py","file_ext":"py","file_size_in_byte":11362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"267515277","text":"\nfrom utils import get_session\nfrom models import ElibraryProduct, SciProduct\n\nimport re\n\n#init DB session\nsession = get_session()\n\n# getting all products from db\nelibrary_products = session.query(ElibraryProduct).all()\nsci_products = session.query(SciProduct).all()\n\nfor el_product in elibrary_products:\n\n for sci_product in sci_products:\n el_title_words = re.sub(r'\\s', ' ', re.sub(r'\\W', ' ', el_product.title.lower())).split(' ')\n sc_title_words = re.sub(r'\\s', ' ', re.sub(r'\\W', ' ', sci_product.title.lower())).split(' ')\n \n counter = 0\n for w in el_title_words:\n if w in sc_title_words:\n counter += 1\n\n if counter / len(el_title_words) > 0.7:\n print('match!', el_product.id, sci_product.id)\n print(el_product.title, sci_product.title)\n print()\nsession.close()\n\n\n\n","sub_path":"src/match_products.py","file_name":"match_products.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"57981629","text":"import discord\nfrom discord.ext import commands\nimport requests\nimport json\n\n\nlol_api = 'RGAPI-d9beda42-82b8-4c75-8a6d-bf4da9928e08'\n\n\nclass LeagueofLegends():\n \"\"\"Commands relating to League of Legends\"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(pass_context=True, aliases=['list', 'registered'],\n help=\"list saved LoL igns for server\")\n async def list_ign(self, ctx):\n with open('igns/igns-{}'.format(ctx.message.server.id), 'r+') as f:\n lines = f.readlines()\n await self.bot.say(\"Registered summoners:\")\n for line in lines:\n await self.bot.say(line)\n\n\n @commands.command(pass_context=True, aliases=['register', 'add'],\n help=\"add a LoL ign to server list\")\n async def add_ign(self, ctx, *ign):\n ign = ' '.join(ign)\n\n resp = requests.get(\n 'https://na1.api.riotgames.com/lol/summoner/v3/summoners/by-name/{}?api_key={}'.format(ign, lol_api))\n resp = json.loads(resp.text)\n print(resp)\n if 'status' not in resp:\n ign = resp['name']\n elif resp['status']['status_code'] == 404:\n await self.bot.say(\"{} is not a registered summoner\".format(ign))\n return\n else:\n await self.bot.say(\"Summoner Error {}\".format(str(resp['status']['status_code'])))\n return\n\n with open('igns/igns-{}'.format(ctx.message.server.id), 'a+') as f:\n f.write(ign+'\\n')\n await self.bot.say(\"{} added\".format(ign))\n\n @commands.command(pass_context=True, aliases=['delete', 'remove'],\n help=\"remove a LoL ign from server list\")\n async def remove_ign(self, ctx, *ign):\n ign = ' '.join(ign)\n\n resp = requests.get(\n 'https://na1.api.riotgames.com/lol/summoner/v3/summoners/by-name/{}?api_key={}'.format(ign, lol_api))\n resp = json.loads(resp.text)\n print(resp)\n if 'status' not in resp:\n ign = resp['name']\n elif resp['status']['status_code'] == 404:\n await self.bot.say(\"{} is not a summoner\".format(ign))\n return\n else:\n await self.bot.say(\"Summoner Error {}\".format(str(resp['status']['status_code'])))\n return\n\n with open('igns/igns-{}'.format(ctx.message.server.id), 'r+') as f:\n lines = f.readlines()\n if ign+'\\n' in lines:\n lines.remove(ign+'\\n')\n else:\n await self.bot.say(\"{} is not on my summoner list\".format(ign))\n return\n with open('igns/igns-{}'.format(ctx.message.server.id), 'w') as f:\n f.writelines(lines)\n await self.bot.say(\"{} removed\".format(ign))\n\n\n @commands.command(pass_context=True, aliases=['ingame'],\n help=\"check if all saved igns are in-game\")\n async def check_ingame(self, ctx, *ign):\n if ign:\n ign = ' '.join(ign)\n await self.bot.say(gamestatus(ign))\n else:\n s = set()\n with open('igns/igns-{}'.format(ctx.message.server.id), 'r+') as f:\n for line in f:\n s.add(line.replace('\\n', '').lower())\n for ign in s:\n await self.bot.say(gamestatus(ign))\n\n\ndef gamestatus(summ):\n resp = requests.get(\n 'https://na1.api.riotgames.com/lol/summoner/v3/summoners/by-name/{}?api_key={}'.format(summ, lol_api))\n resp = json.loads(resp.text)\n print(resp)\n if 'status' in resp and resp['status']['status_code'] == 404:\n return \"{} is not a registered summoner\".format(summ)\n elif 'status' in resp:\n return \"Summoner Error {}\".format(str(resp['status']['status_code']))\n sid = resp['id']\n name = resp['name']\n resp = requests.get(\n 'https://na1.api.riotgames.com/lol/spectator/v3/active-games/by-summoner/{}?api_key={}'.format(sid, lol_api))\n resp = json.loads(resp.text)\n print(resp)\n if 'status' not in resp:\n mins = int(resp['gameLength'] / 60)\n secs = resp['gameLength'] % 60\n return \"{} has been in a(n) {} game for {}:{}\".format(name, resp['gameMode'], mins, secs)\n elif resp['status']['status_code'] == 404:\n return \"{} is not in game\".format(name)\n else:\n return \"Game Error {}\".format(str(resp['status']['status_code']))\n\n\ndef setup(bot):\n bot.add_cog(LeagueofLegends(bot))","sub_path":"league.py","file_name":"league.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"205199272","text":"import numpy as np\nimport cv2\nfrom numpy.core.numeric import cross\n\ndef draw_lines(im, lines):\n for l in lines:\n r,t = l[0]\n p1,p2 = hough_to_projective(r,t)\n cv2.line(im,p1[0:2],p2[0:2],(255,0,0),2)\n cv2.imshow(\"lines\", im)\n cv2.waitKey(0)\n\ndef hough_to_projective(ro,theta):\n # sinus y cosinus\n s,c = [np.sin(theta),np.cos(theta)]\n # Central point of the line\n xb,yb = [c*ro, s*ro]\n # Necessitem dos punts extrems per a construir la línia\n p1 = [int(xb + 10000*(-s)),int(yb + 10000*c)]\n p2 = [int(xb - 10000*(-s)),int(yb - 10000*c)]\n return [p1[0],p1[1],1], [p2[0],p2[1],1]\n\n# a partir de las ecuaciones de las dos rectas establecemos si intersectan, y en que punto intersectan\n# utilizamos geometría proyectiva\ndef intersection_point(l1_eq,l2_eq):\n # producto vectorial de dos vectores da el vector perpendicular a ambos, en este caso el resultado será\n # el punto de intersección\n p = np.cross(l1_eq,l2_eq) # p = [0,0,0] ----> no hay intersección entre las rectas\n p = p if p.all() == 0 else p/p[2] # dividimos ya que tenemos un espacio tridimensional(proyectivo) y así lo pasamos a coord homogeneas\n return np.array([p[0], p[1]])\n\n\ndef get_lines(im):\n # list to hold the selected lines\n ultimate_lines = []\n # thresholds for the lines\n r_thres = 80\n # Canny to get the edges\n edges = cv2.Canny(cv2.cvtColor(im,cv2.COLOR_RGB2GRAY), 150, 200, 3)\n # T,edges = cv2.threshold(cv2.cvtColor(im,cv2.COLOR_BGR2GRAY),200, 255, cv2.THRESH_BINARY)\n # then we will be applying hough transform to the edged image in order to obtain the lines\n lines = cv2.HoughLines(edges,1,np.pi/180, 200) # we only take the lines that have over 200 points\n # we take the first line as a reference\n ref_line = lines[0]\n # getting the line in an euclidean EXPLICIT representation\n p1,p2 = hough_to_projective(ref_line[0][0], ref_line[0][1])\n ref_eq = np.cross(p1,p2)\n ultimate_lines.append(ref_line)\n for l in lines[1:]:\n r,t = l[0]\n t_deg = t*180/np.pi \n # getting the line in an euclidean EXPLICIT representation\n p1,p2 = hough_to_projective(r, t)\n l_eq = np.cross(p1,p2)\n # testing wether the lines intersect\n p = intersection_point(ref_eq, l_eq)\n # getting the ro parameters of all lines selected until now\n all_ro = np.array(ultimate_lines)[:,0,0]\n # if r verifies the threshold for all the other ro's\n if ((abs(all_ro - r) > r_thres)).all() == True and (p.all() != 0) :# and (t_deg < 80 and t_deg > 95):# reference_line[0][0] = ro\n ultimate_lines.append(l)\n break\n return np.array(ultimate_lines)\n\ndef get_vanishing_point(lines):\n # getting the vanishing point as the mean point of the intersecting points of the lines\n intersections = []\n for l1 in lines:\n # getting the equation of the first line\n p1,p2 = hough_to_projective(l1[0][0], l1[0][1])\n l1_eq = np.cross(p1,p2)\n for l2 in lines:\n if np.allclose(l1,l2) == False:\n # getting the equation of the second line\n p3,p4 = hough_to_projective(l2[0][0], l2[0][1])\n l2_eq = np.cross(p3,p4)\n # calculate the intersecting point of the lines\n p = intersection_point(l1_eq,l2_eq)\n # if the point is not in intersections\n if np.array(intersections == p).any() == False:\n intersections.append(p)\n return np.mean(intersections,axis=0)\n\ndef test_lines(imset):\n detected = 0\n for im in imset:\n test_im = cv2.imread(im)\n # resizing the image for testing to half of its size\n test_im = cv2.resize(test_im,(int(test_im.shape[0]/2), int(test_im.shape[1]/2)))\n # Now we have to apply hough transform to obtain the lines of the image\n lines = get_lines(test_im)\n detected = detected+1 if len(lines) > 1 else detected\n\n print(\"accuracy: \", detected/len(imset))","sub_path":"src/geometric_utils.py","file_name":"geometric_utils.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"116219383","text":"\"\"\" ナイーブベイズ\n\n * ナイーブベイズを使った教師あり文書分類器の構築\n\n\"\"\"\n\nimport math\nimport json\nimport codecs\nfrom janome.tokenizer import Tokenizer\n\n\nclass NaiveBayes():\n \"\"\"\n\n ナイーブベイズを使った教師あり文書分類器の実装\n\n Attributes\n ----------\n vocabularies : set\n 単語の辞書\n word_count : dict\n カテゴリごとの単語の出現頻度\n {'エンタメ': {'テレビ': 4, '音楽': 10, ...} }\n category_count : dict\n 各カテゴリの文書数\n {'エンタメ': 80, 'スポーツ': 80, ...}\n\n \"\"\"\n\n def __init__(self):\n self.vocabularies = set()\n self.word_count = {}\n self.category_count = {}\n\n def to_words(self, article):\n \"\"\"\n\n 記事の内容を分かち書きする\n\n Parameters\n ----------\n article : str\n 記事の内容\n\n Returns\n -------\n filtered_tokens : tupple\n 記事を分かち書きした結果(名刺/動詞/形容詞のみ)を保持したタプル\n\n \"\"\"\n t = Tokenizer()\n filtered_tokens = []\n tokens = t.tokenize(article)\n for token in tokens:\n ps = token.part_of_speech.split(\",\")[0]\n if ps not in ['名詞', '動詞', '形容詞']:\n continue\n w = token.base_form\n if w == \"*\" or w == \"\":\n w = token.surface\n if w == \"\" or w == \"\\n\":\n continue\n filtered_tokens.append(w)\n return tuple(filtered_tokens)\n\n def word_count_up(self, word, category):\n \"\"\"\n\n 単語の出現頻度をカウントする\n\n Parameters\n ----------\n word : str\n 単語\n category : str\n カテゴリ\n\n \"\"\"\n self.word_count.setdefault(category, {})\n self.word_count[category].setdefault(word, 0)\n self.word_count[category][word] += 1\n self.vocabularies.add(word)\n\n def category_count_up(self, category):\n \"\"\"\n\n あるカテゴリの文書数をカウントする\n\n Parameters\n ----------\n category : str\n カテゴリ\n\n \"\"\"\n self.category_count.setdefault(category, 0)\n self.category_count[category] += 1\n\n def train(self, doc, category):\n \"\"\"\n\n 分類器の訓練(辞書の作成)を行う\n\n Parameters\n ----------\n doc : str\n 記事\n category : str\n カテゴリ\n\n \"\"\"\n words = self.to_words(doc)\n for word in words:\n self.word_count_up(word, category)\n self.category_count_up(category)\n\n def prior_prob(self, category):\n \"\"\"\n\n 事前確率を計算する\n\n Parameters\n ----------\n category : str\n カテゴリ\n\n \"\"\"\n num_categories = sum(self.category_count.values())\n num_docs = self.category_count[category]\n return num_docs / num_categories\n\n def num_of_appearance(self, word, category):\n \"\"\"\n\n 任意のカテゴリにおける任意の単語の出現回数を返す\n\n Parameters\n ----------\n word : str\n カテゴリ\n category : str\n カテゴリ\n\n \"\"\"\n if word in self.word_count[category]:\n return self.word_count[category][word]\n return 0\n\n def word_prob(self, word, category):\n \"\"\"\n\n P(word|category)を計算\n\n Parameters\n ----------\n word : str\n 単語\n category : str\n カテゴリ\n\n Returns\n -------\n prob : float\n P(word|category)\n\n \"\"\"\n # +1は加算スムージングのため\n numerator = self.num_of_appearance(\n word, category) + 1\n denominator = sum(\n self.word_count[category].values()) + len(self.vocabularies)\n\n prob = numerator / denominator\n return prob\n\n def score(self, words, category):\n \"\"\"\n\n 文章のスコアを計算\n\n Parameters\n ----------\n words : リスト\n 文章に出現する単語のリスト\n category : str\n カテゴリ\n\n Returns\n -------\n score : float\n あるカテゴリに対する文書のスコア\n P(category|document)\n\n \"\"\"\n # logを取るのは、word_probが非常に小さい値になることがあるため\n score = math.log(self.prior_prob(category))\n for word in words:\n score += math.log(self.word_prob(word, category))\n return score\n\n def classify(self, doc):\n \"\"\"\n\n 各カテゴリについて文章のスコアをカテゴリを推定\n\n Parameters\n ----------\n doc : str\n 文章\n\n Returns\n -------\n best_guessed_category : str\n 予測された(スコアが最大の)カテゴリ\n\n \"\"\"\n best_guessed_category = None\n max_prob_before = -sys.maxsize\n words = self.to_words(doc)\n\n for category in self.category_count.keys():\n prob = self.score(words, category)\n if prob > max_prob_before:\n max_prob_before = prob\n best_guessed_category = category\n return best_guessed_category\n\n def save_model(self, save_path):\n \"\"\"\n\n 構築済したモデルを保存する\n\n Parameters\n ----------\n save_path : str\n モデルの保存先\n\n \"\"\"\n json_model = {\n \"vocabularies\": list(self.vocabularies),\n \"word_count\": self.word_count,\n \"category_count\": self.category_count\n }\n with codecs.open(save_path, 'w', 'utf-8') as f:\n json.dump(json_model, f, ensure_ascii=False)\n\n def load_model(self, model_path):\n \"\"\"\n\n 構築済みのモデルを読み込む\n\n Parameters\n ----------\n model_path : str\n モデルのパス\n\n \"\"\"\n json_data = json.load(codecs.open(model_path, 'r', 'utf-8'))\n self.vocabularies = json_data[\"vocabularies\"]\n self.word_count = json_data[\"word_count\"]\n self.category_count = json_data[\"category_count\"]\n","sub_path":"gunosy/classifier/lib/NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"325437231","text":"#Pat McDonald\n#Exercise 5: Programming and Scripting\n#Working with Fishers Iris dataset. Source: https://archive.ics.uci.edu/ml/datasets/iris\n\n#Open the iris dataset\nwith open(\"data/iris.csv\") as f:\n #Loop through each line\n for line in f:\n #Split and print each line of string values\n #Code by Mohamed Noor;https://learnonline.gmit.ie/mod/forum/discuss.php?d=14986#p29763 \n line = line.replace(',', ' ')\n line = line.rstrip()\n print(line[:11], line[12:16].strip())","sub_path":"openfile.py","file_name":"openfile.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"49762922","text":"#!/usr/bin/python3\n'''\nscript (based on the file 1-pack_web_static.py) that distributes an archive\nto your web servers, using the function do_deploy\n'''\nfrom fabric.api import *\nfrom fabric.operations import run, put, sudo\nimport os.path\nenv.hosts = ['142.44.167.24', '144.217.246.211']\n\n\ndef do_deploy(archive_path):\n '''\n script that uploads the archive to /tmp/, uncompresses the folder,\n deletes the archive, deletes the symbolic link, and creates a new\n symbolic link\n '''\n if (os.path.isfile(archive_path) is False):\n return False\n try:\n put(archive_path, '/tmp/')\n name = archive_path.split('/')[-1]\n new_dir = ('/data/web_static/releases/' + name.split('.')[0])\n run('sudo mkdir -p {}'.format(new_dir))\n run('sudo tar -xzf /tmp/{} -C {}'.format(name, new_dir))\n run('sudo rm /tmp/{}'.format(name))\n run('sudo mv {}/web_static/* {}/'.format(new_dir, new_dir))\n run('sudo rm -rf {}/web_static'.format(new_dir))\n run('sudo rm -rf /data/web_static/current')\n run('sudo ln -s {} /data/web_static/current'.format(new_dir))\n return True\n except:\n return False\n","sub_path":"2-do_deploy_web_static.py","file_name":"2-do_deploy_web_static.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"458243569","text":"#Write a function that takes a list value as an argument and returns a string with all the items separated by a comma and a space, with and inserted before the last item.\n#For example, passing the previous spam list to the function would return 'apples, bananas, tofu, and cats'\n#Your function should be able to work with any list value passed to it. Be sure to test the case where an empty list [] is passed to your function.\n\ndef commaCode(someList):\n if someList != []:\n message = '' #Initialize string message\n for i in range(len(someList)):\n if i < len(someList)-1:\n message += someList[i]+', ' #Add a comma and a space for each list item that isn't the last\n else:\n message += 'and '+someList[i] #Add an 'and' and a space in front of the last list item\n \n print(message) #Print results\n else:\n print('Enter a non-empty list')\n\nspam = ['apples','bananas','tofu','cats'] #Example list\ncommaCode(spam) #Call function\n","sub_path":"ch4PracticeProjectCommaCode.py","file_name":"ch4PracticeProjectCommaCode.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"332117595","text":"the_world = {}\n\n\n#with stantement: we don't have to close the file handler. The with statement \n# creates a context manager and it will automatically close the file handler for \n# you when you are done with it. (better way)\ndef read_from_file():\n with open('countries.txt','r') as text_file:\n for each_line in text_file: \n each_line = each_line.rstrip('\\n')\n country,city = each_line.split('/')\n the_world[country] = city\n\n\ndef write_to_file(country_name, city_name):\n with open ('countries.txt','a') as text_file:\n text_file.write('\\n'+country_name + '/' + city_name)\n\nprint ('Ask the expert - Captial Cities of the World')\nread_from_file()\n\nwhile True:\n query_country = input('Type the name of the country: ').lower().capitalize()\n \n if query_country in the_world:#how to check if the key is in the dictionary\n result = the_world[query_country]\n print(f'The capital city of {query_country} is {result}!')\n\n else:\n new_city = input(f\"I do not know the capital of {query_country}, please tell me!\")\n the_world[query_country] = new_city\n write_to_file(query_country, new_city)\n\n\n","sub_path":"askTheExpert.py","file_name":"askTheExpert.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"629926735","text":"from collections import defaultdict\nfrom typing import Dict\n\nfrom common.polynom_factory import gen_nodes\nfrom grid.cell import Cell2D\nfrom operator_assembler.base_ddof_allocator import BaseAllocator2D\nfrom common.custom_types_for_typechecking import *\nimport numpy as np\n\n\nclass Nto1Allocator2D(BaseAllocator2D):\n def __init__(self, *args, **kwargs):\n super(Nto1Allocator2D, self).__init__(*args, **kwargs)\n self.priority = ['size_match', 'match', 'border', 'smaller', 'bigger']\n\n def _make_ddof_index(self):\n for num_layer, cell in self.grid_interface.iterate_cells_fstb():\n adj_cells_stitching_modes = self._stitching_modes(num_layer=num_layer, cell=cell)\n for edge, adj_edge_cells, mode in adj_cells_stitching_modes:\n self._connect_edges(host_edge=edge, peer=adj_edge_cells, host_cell=cell, stitching_mode=mode)\n self._allocate_interior_ddofs(host_cell=cell)\n\n # TODO: convenience function to construct endpoint index\n\n def _make_conjugate_vertex_index(self):\n sizes = {}\n vertex_dofs = defaultdict(list)\n\n for (ll_vertex, vertex), (local_dof, global_dof) in self._vertex_ddof_index.items():\n sizes[ll_vertex] = max(sizes.get(ll_vertex, 0), max([abs(i - j) for i, j in zip(ll_vertex, vertex)]))\n vertex_dofs[vertex].append((ll_vertex, global_dof))\n\n self._conjugate_vertex_index = vertex_dofs\n self._sizes = sizes\n self.new_dof_mapping = np.arange(self._ddof_cnt, dtype=np.int64)\n\n def _merge_ddof_in_index(self):\n cvi_replacement = defaultdict(list)\n id2v_replacement = {}\n\n for vertex, list_dofs_props in self._conjugate_vertex_index.items():\n ll_vertices = defaultdict(list)\n unique_dofs = {}\n for ll_vertex, global_dof in list_dofs_props:\n ll_vertices[self._sizes[ll_vertex]].append(ll_vertex)\n cvi_replacement[vertex].append((ll_vertex, global_dof))\n id2v_replacement[global_dof] = vertex\n unique_dofs[self._sizes[ll_vertex]] = (ll_vertex, global_dof)\n for size, ll_vertices in ll_vertices.items():\n for ll_vertex in ll_vertices:\n self.new_dof_mapping[self._vertex_ddof_index[(ll_vertex, vertex)][1]] = unique_dofs[size][1]\n self._vertex_ddof_index[(ll_vertex, vertex)] = (\n self._vertex_ddof_index[(ll_vertex, vertex)][0],\n unique_dofs[size][1]\n )\n self._conjugate_vertex_index = cvi_replacement\n self._id_to_vertex_index = id2v_replacement\n\n def make_complete_index(self):\n self._make_ddof_index()\n self._make_conjugate_vertex_index()\n self._merge_ddof_in_index()\n\n\n @staticmethod\n def _get_stitching_mode(host_edge: edge_2D_type, peer_edges: Dict[Tuple, Cell2D], host_props, peer_props):\n if len(peer_edges) == 0:\n return 'border'\n elif len(peer_edges) == 1 and list(peer_edges.keys())[0][1] == host_edge:\n if host_props == peer_props[0]:\n return 'match'\n else:\n return 'size_match'\n elif len(peer_edges) > 1:\n return 'smaller'\n else:\n return 'bigger'\n\n def _weakly_connect_edges(self, host_edge: edge_2D_type, peer: Dict[Tuple, Cell2D], host_cell: Cell2D, how: str):\n\n host_props = {(host_cell.ll_vertex, host_edge):\n self.grid_interface.get_cell_props(host_cell)}\n peer_props = {(p_cell.ll_vertex, p_edge[1]):\n self.grid_interface.get_cell_props(p_cell) for p_edge, p_cell in peer.items()}\n\n host_list = {(host_cell.ll_vertex, host_edge):\n self.get_flat_list_of_ddofs(host_edge, host_cell)}\n peer_list = {(p_cell.ll_vertex, p_edge[1]):\n self.get_flat_list_of_ddofs(p_edge[1], p_cell) for p_edge, p_cell in peer.items()}\n\n if how in ['smaller', 'size_match']:\n self._weak_edge_connections[(host_cell.ll_vertex, host_edge)] = peer_list\n self._weak_edge_connections_props[(host_cell.ll_vertex, host_edge)] = peer_props\n\n for p_edge, p_cell in peer.items():\n peer_edge = p_edge[1]\n self._weak_edge_connections[(p_cell.ll_vertex, peer_edge)] = host_list\n self._weak_edge_connections_props[(p_cell.ll_vertex, peer_edge)] = host_props\n else:\n raise Exception('Program flow cannot turn here')\n\n def _stitching_modes(self, cell: Cell2D, num_layer: int):\n adj_cells_stitching_modes = []\n for edge in cell.iterate_edges():\n adj_edge_cells = self.grid_interface.query_adj_cells_by_edge(cell, edge, num_layer=num_layer)\n host_order, host_dist = self.grid_interface.get_cell_props(cell)\n peer_props = [self.grid_interface.get_cell_props(p_cell) for p_cell in adj_edge_cells.values()]\n\n stitching_mode = self._get_stitching_mode(\n edge, adj_edge_cells,\n host_props=(host_order, host_dist),\n peer_props=peer_props)\n adj_cells_stitching_modes.append((edge, adj_edge_cells, stitching_mode))\n\n return sorted(adj_cells_stitching_modes, key=lambda x: self.priority.index(x[2]))\n\n def _connect_edges(self, host_edge: edge_2D_type,\n peer: Dict[Tuple, Cell2D],\n host_cell: Cell2D,\n stitching_mode: str):\n\n host_order, host_dist = self.grid_interface.get_cell_props(host_cell)\n peer_props = [self.grid_interface.get_cell_props(p_cell) for p_cell in peer.values()]\n\n normed_edge_host = host_cell.edge_normed_by_size(host_edge)\n host_cell_fe = self._get_fe_cell(size=host_cell.size, order=host_order, dist=host_dist)\n\n if stitching_mode in ['border', 'smaller', 'bigger', 'size_match']:\n self._allocate_local_ddofs_edge(edge=host_edge, cell=host_cell, cell_fe=host_cell_fe)\n if stitching_mode == 'size_match':\n for (adj_edge, peer_cell), p_props in zip(peer.items(), peer_props):\n peer_edge = adj_edge[1]\n peer_cell_fe = self._get_fe_cell(size=peer_cell.size, order=p_props[0], dist=p_props[1])\n self._allocate_local_ddofs_edge(edge=peer_edge, cell=peer_cell, cell_fe=peer_cell_fe)\n else:\n for (adj_edge, peer_cell), p_props in zip(peer.items(), peer_props):\n peer_edge = adj_edge[1]\n peer_cell_fe = self._get_fe_cell(size=peer_cell.size, order=p_props[0], dist=p_props[1])\n ld_host_list = host_cell_fe.get_edge_ddof_index()[normed_edge_host]\n\n to_merge_with_edge = self._edge_ddof_index.get((peer_cell.ll_vertex, peer_edge))\n if to_merge_with_edge is not None:\n self._edge_ddof_index[(host_cell.ll_vertex, host_edge)] = \\\n [(j, i[1]) for i, j in zip(to_merge_with_edge, ld_host_list)]\n else:\n self._edge_ddof_index[(host_cell.ll_vertex, host_edge)] = \\\n [(i, self._ddof_cnt + num) for num, i in enumerate(ld_host_list)]\n self._ddof_cnt += len(ld_host_list)\n for vertex_peer, vertex_host in zip(peer_edge, host_edge):\n local_ddof_host = host_cell_fe.get_vertex_ddof_index()[host_cell.vertex_normed_by_size(vertex_host)]\n local_ddof_peer = peer_cell_fe.get_vertex_ddof_index()[peer_cell.vertex_normed_by_size(vertex_host)]\n to_merge_with_vertex = self._vertex_ddof_index.get(\n (peer_cell.ll_vertex, vertex_peer),\n self._vertex_ddof_index.get((host_cell.ll_vertex, vertex_host))\n )\n if to_merge_with_vertex is not None:\n self._vertex_ddof_index[(host_cell.ll_vertex, vertex_host)] = \\\n (local_ddof_host, to_merge_with_vertex[1])\n self._vertex_ddof_index[(peer_cell.ll_vertex, vertex_peer)] = \\\n (local_ddof_peer, to_merge_with_vertex[1])\n else:\n self._vertex_ddof_index[(host_cell.ll_vertex, vertex_host)] = \\\n (local_ddof_host, self._ddof_cnt)\n self._vertex_ddof_index[(peer_cell.ll_vertex, vertex_peer)] = \\\n (local_ddof_peer, self._ddof_cnt)\n self._ddof_cnt += 1\n if stitching_mode in ['smaller', 'size_match']:\n self._weakly_connect_edges(host_edge=host_edge,\n peer=peer,\n host_cell=host_cell,\n how=stitching_mode)\n","sub_path":"operator_assembler/n_to_1_ddof_allocator.py","file_name":"n_to_1_ddof_allocator.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"444964528","text":"#!/usr/bin/python\n\nfrom collections import defaultdict\n\ns = open(\"day13.input\").read()\nlines = s.split(\"\\n\")\n\nstateDict = {\"l\": \"s\", \"s\": \"r\", \"r\": \"l\"}\nclass Cart(object):\n def __init__(self, x, y, dir, id):\n self.x = x\n self.y = y\n self.dir = dir\n self.id = id\n self.state = \"l\" # l -> s -> r ->\n def __repr__(self):\n return \" \".join([str(p) for p in [self.id, self.x,self.y,self.dir,self.state]])\n def __lt__(self, other):\n if self.y == other.y:\n return self.x < other.x\n return self.y < other.y\n def r(self):\n cart.x += 1\n cart.dir = \">\"\n def l(self):\n cart.x -= 1\n cart.dir = \"<\"\n def u(self):\n cart.y -= 1\n cart.dir = \"^\"\n def d(self):\n cart.y += 1\n cart.dir = \"v\"\n def move(self, floor):\n #print(self.id, self.x, self.y, self.dir, floor)\n next = states[self.dir]\n t = next[floor]\n if floor == \"+\":\n l, s, r = t\n if self.state == \"l\":\n t = l\n elif self.state == \"s\":\n t = s\n elif self.state == \"r\":\n t = r\n self.state = stateDict[self.state]\n\n if t == \"v\":\n self.d()\n elif t == \"^\":\n self.u()\n elif t == \">\":\n self.r()\n elif t == \"<\":\n self.l()\n else:\n panic()\n\nd = {}\ncarts = {}\ncId = 0\nfor y, line in enumerate(lines):\n for x, c in enumerate(line):\n if c == \" \":\n continue\n if c in (\"v\", \"^\"):\n carts[cId] = Cart(x,y,c,cId)\n cId += 1\n d[(x,y)] = \"|\"\n elif c in (\"<\", \">\"):\n carts[cId] = Cart(x,y,c,cId)\n cId += 1\n d[(x,y)] = \"-\"\n else:\n d[(x,y)] = c\nprint(carts)\n\ndef panic():\n print(\"NOOOO\")\n\nstates = {}\n\n# l s r\nstates[\">\"] = {\"-\": \">\", \"\\\\\": \"v\", \"/\": \"^\", \"+\": (\"^\", \">\", \"v\")}\nstates[\"<\"] = {\"-\": \"<\", \"\\\\\": \"^\", \"/\": \"v\", \"+\": (\"v\", \"<\", \"^\")}\nstates[\"v\"] = {\"|\": \"v\", \"\\\\\": \">\", \"/\": \"<\", \"+\": (\">\", \"v\", \"<\")}\nstates[\"^\"] = {\"|\": \"^\", \"\\\\\": \"<\", \"/\": \">\", \"+\": (\"<\", \"^\", \">\")}\n\ni = 0\nprint( len(carts))\nwhile True:\n done = False\n if len(carts) == 1:\n print([(cart.x,cart.y) for cart in carts.values()])\n break\n for cart in sorted(carts.values()):\n floor = d[(cart.x,cart.y)]\n cart.move(floor)\n for other in carts.values():\n if cart.id == other.id:\n continue\n if cart.x == other.x and cart.y == other.y:\n print(\"Crash\", cart.x, cart.y)\n del carts[cart.id]\n del carts[other.id]\n break\n i += 1\n","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"261649599","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'litleleprikon'\n\nimport sys\nfrom app_libs.console import Console\nfrom app_libs.interface import MainWindow\nfrom PyQt4.QtGui import QApplication\n\n\ndef main():\n if len(sys.argv) > 1:\n if sys.argv[1] == '-c':\n Console().run()\n else:\n app = QApplication(sys.argv)\n ex = MainWindow()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt as ex:\n print(\"Keyboard interrupt\")","sub_path":"second/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"346780853","text":"print(\"ok\")\n\n#input_filename = \"A-test.txt\"\n#output_filename = \"A-test-out.txt\"\n\n#input_filename = \"A-small-attempt0.in\"\n#output_filename = \"A-small-attempt0.out\"\n\ninput_filename = \"A-large.in\"\noutput_filename = \"A-large.out\"\n\n\ndef solve(xs):\n friends = 0\n stands = 0\n for need, new_people in enumerate(xs):\n #print(\"Stands %d, need %d\" % (stands, need))\n new_friends = need - stands if stands < need else 0\n stands += new_people + new_friends\n friends += new_friends\n #print(\"New people %d, new friends %d\" % (new_people, new_friends))\n return friends\n\n\nwith open(input_filename, \"r\") as ifile:\n with open(output_filename, \"w\") as ofile:\n T = int(ifile.readline())\n for case in range(1, T+1):\n data = ifile.readline().split()[1]\n data = [int(s) for s in data]\n\n print(\"\\nCase %d\" % case)\n print(\"Task: %s\" % str(data))\n\n result = solve(data)\n\n ofile.write(\"Case #%d: %d\\n\" % (case, result))\n print(\"Solve: %d\" % result)\n\n\n","sub_path":"solutions_5639104758808576_1/Python/Verum/A-StandingOvation.py","file_name":"A-StandingOvation.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"34601397","text":"#! /usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport warnings\nimport datetime\n\n\nsys.path.append(\"../\")\nfrom utils.conf import wd, wd_cur, ai, ai_cur, ai_en\nfrom utils.etl import ETL\n\nwarnings.filterwarnings('ignore')\nos.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'\n\nif len(sys.argv) == 1:\n dotime = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\nelse:\n dotime = sys.argv[1]\n\ntable_name = 't_invest_portfolio_store'\nfile_name = 't_invest_portfolio_store'\n\n\n# 获取数据映射sql\nsrc_sql = \"SELECT src_sql, data_source,columns_list,unique_key FROM iadvisor.etl_src_tgt_rule WHERE tgt_table = '%s' AND is_use = 1\" % table_name\nai_cur.execute(src_sql)\nrows = ai_cur.fetchall()\nsql = \"select * from (%s) where to_char(updatetime,'YYYY-MM-DD') >='%s'\" % (rows[0][0], dotime)\ncolumns = rows[0][2]\nunique_key = rows[0][3]\nprint(sql)\ntable_name2 = 'iadvisor.t_invest_portfolio_store'\netl = ETL(src_cur=wd_cur,\n src_conn=wd,\n tgt_cur=ai_cur,\n tgt_conn=ai,\n sql=sql,\n table_name=table_name2,\n columns=columns,\n unique_key=unique_key)\n\n# 加载数据\netl.dump_data(file_name)\n\n# 写入数据\netl.import_data(file_name)\n\n\nwd_cur.close()\nai_cur.close()\nwd.close()\nai.close()\n","sub_path":"iadvisor/t_invest_portfolio_store.py","file_name":"t_invest_portfolio_store.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"467008443","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport logging\nimport ldap\nimport ldap.sasl\nfrom threading import local\nfrom django.core.signals import request_finished\nfrom django.core.handlers.wsgi import WSGIHandler\nfrom django.dispatch import receiver\nfrom intranet import settings\n\nlogger = logging.getLogger(__name__)\n_thread_locals = local()\n\n\nclass LDAPFilter(object):\n @staticmethod\n def operator(operator, *conditions):\n return \"(\" + operator + \"\".join((\"(\" + c + \")\" for c in conditions)) + \")\"\n\n @staticmethod\n def and_filter(*conditions):\n return LDAPFilter.operator(\"&\", *conditions)\n\n @staticmethod\n def or_filter(*conditions):\n return LDAPFilter.operator(\"|\", *conditions)\n\n @staticmethod\n def attribute_in_list(attribute, values):\n \"\"\"Returns a filter for selecting all entries for which a\n specified attribute is contained in a specified list of values.\n \"\"\"\n conditions = (attribute + \"=\" + v for v in values)\n return LDAPFilter.or_filter(*conditions)\n\n @staticmethod\n def all_users():\n \"\"\"Returns a filter for selecting all user objects in LDAP\n \"\"\"\n\n user_object_classes = settings.LDAP_OBJECT_CLASSES.values()\n return LDAPFilter.attribute_in_list(\"objectclass\", user_object_classes)\n\n\nclass LDAPConnection(object):\n \"\"\"Represents an LDAP connection with wrappers for the raw ldap\n queries.\n\n Attributes:\n conn: The singleton LDAP connection.\n\n \"\"\"\n def __init__(self):\n \"\"\"Initialize a singleton LDAPConnection object.\n\n Connect to the LDAP server specified in settings and bind\n using the GSSAPI protocol. The requisite KRB5CCNAME\n environmental variable should have already been set by the\n SetKerberosCache middleware.\n\n \"\"\"\n if (not hasattr(_thread_locals, \"ldap_conn\")) \\\n or (_thread_locals.ldap_conn is None):\n logger.info(\"Connecting to LDAP...\")\n _thread_locals.ldap_conn = ldap.ldapobject.ReconnectLDAPObject(settings.LDAP_SERVER, trace_stack_limit=None)\n\n try:\n auth_tokens = ldap.sasl.gssapi()\n _thread_locals.ldap_conn.sasl_interactive_bind_s('', auth_tokens)\n logger.info(\"Successfully connected to LDAP.\")\n except (ldap.LOCAL_ERROR, ldap.INVALID_CREDENTIALS):\n _thread_locals.ldap_conn.simple_bind_s(settings.AUTHUSER_DN, settings.AUTHUSER_PASSWORD)\n logger.error(\"SASL bind failed - using simple bind\")\n # logger.debug(_thread_locals.ldap_conn.whoami_s())\n\n @property\n def raw_connection(self):\n \"\"\"Return the raw connection from threadlocals\n \"\"\"\n\n return _thread_locals.ldap_conn\n\n def search(self, dn, filter, attributes):\n \"\"\"Search LDAP and return an LDAPResult.\n\n Search LDAP with the given dn and filter and return the given\n attributes in an LDAPResult object.\n\n Args:\n dn: The string representation of the distinguished name\n (DN) of the entry at which to start the search.\n filter: The string representation of the filter to apply to\n the search.\n attributes: A list of LDAP attributes (as strings)\n to retrieve.\n\n Returns:\n An LDAPResult object.\n\n Raises:\n Should raise stuff but it doesn't yet\n\n \"\"\"\n logger.debug(\"Searching ldap - dn: {}, filter: {}, \"\n \"attributes: {}\".format(dn, filter, attributes))\n\n # tip-toe around unicode bugs\n attributes = [str(attr) for attr in attributes]\n\n return _thread_locals.ldap_conn.search_s(dn, ldap.SCOPE_SUBTREE,\n filter, attributes)\n\n def user_attributes(self, dn, attributes):\n \"\"\"Fetch a list of attributes of the specified user.\n\n Fetch LDAP attributes of a tjhsstStudent or a tjhsstTeacher. The\n LDAPResult will contain an empty set of results if the user does\n not exist.\n\n Args:\n dn: The full DN of the user\n attributes: A list of the LDAP fields to fetch (strings)\n\n Returns:\n LDAPResult object (empty if no results)\n\n \"\"\"\n logger.debug(\"Fetching attributes '{}' of user \"\n \"{}\".format(str(attributes), dn))\n\n filter = LDAPFilter.all_users()\n\n try:\n r = self.search(dn, filter, attributes)\n except ldap.NO_SUCH_OBJECT as e:\n logger.error(\"No such user \" + dn)\n raise\n return LDAPResult(r)\n\n def class_attributes(self, dn, attributes):\n \"\"\"Fetch a list of attributes of the specified class.\n\n Fetch LDAP attributes of a tjhsstClass. The LDAPResult will\n contain an empty set of results if the class does not exist.\n\n Args:\n dn: The full DN of the class\n attributes: A list of the LDAP fields to fetch (strings)\n\n Returns:\n LDAPResult object (empty if no results)\n\n \"\"\"\n logger.debug(\"Fetching attributes '\" + str(attributes) +\n \"' of class \" + dn)\n filter = '(objectclass=tjhsstClass)'\n\n try:\n r = self.search(dn, filter, attributes)\n except ldap.NO_SUCH_OBJECT:\n logger.error(\"No such class \" + dn)\n raise\n return LDAPResult(r)\n\n\nclass LDAPResult(object):\n \"\"\"Represents the result of an LDAP query.\n\n LDAPResult stores the raw result of an LDAP query and can process\n the results in various ways.\n\n Attributes:\n result: the raw result of an LDAP query\n\n \"\"\"\n\n def __init__(self, result):\n self.result = result\n\n def first_result(self):\n \"\"\"Fetch the first LDAP object in the response.\"\"\"\n if len(self.result) > 0:\n return self.result[0][1]\n else:\n return []\n\n def results_array(self):\n \"\"\"Return the full array of results.\"\"\"\n return self.result\n\n\n@receiver(request_finished,\n dispatch_uid=\"close_ldap_connection\",\n sender=WSGIHandler)\ndef close_ldap_connection(sender, **kwargs):\n \"\"\"Closes the request's LDAP connection.\n\n Listens for the request_finished signal from Django and upon\n receit, unbinds from the directory, terminates the current\n association, and frees resources.\n\n It would be nice if we could leave the connections open in a pool,\n but it looks like rebinding on an open connection isn't possible\n with GSSAPI binds.\n\n \"\"\"\n if hasattr(_thread_locals, 'ldap_conn'):\n if _thread_locals.ldap_conn is not None:\n _thread_locals.ldap_conn.unbind_s()\n _thread_locals.ldap_conn = None\n logger.info(\"LDAP connection closed.\")\n","sub_path":"intranet/db/ldap_db.py","file_name":"ldap_db.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"576635822","text":"import socket\nimport time\nimport pickle\nfrom threading import Thread\n\nfrom protocol import Message\n\nsock = None\nid_peer_map = {}\n\nuser_table = {}\n# maps username and password\n# TODO : encryption\n\nusers_online = {}\n# users_online: { user1: thread id, ... }\n\npending_requests = {}\n# pending_requests: { user1: Message, ...}\n\nfriends_list = {}\n# friends_list : { user1: [ user2, user3, ... ], ...}\n\nmessage_logs = {}\n# message_logs :\n# {\n# (user1, user2) : [\n# [ sender, payload, unread1, unread2 ], ...\n# ],\n# ...\n# }\n\n\n# Returns unique pair of two users\ndef get_tuple(first, second):\n return (first, second) if first < second else (second, first)\n\n# Timer thread\nclass Timer(Thread):\n def __init__(self, parent):\n Thread.__init__(self)\n self.parent = parent\n\n def run(self):\n while True:\n self.parent.ping()\n time.sleep(10)\n\n\nclass MessageHandler(object):\n MAX_CONNECTIONS = 10\n PORT = 20236\n IP_ADDRESS = '147.46.241.102'\n\n # IP_ADDRESS = ''\n\n def __init__(self):\n self.initialize()\n for i in range(MessageHandler.MAX_CONNECTIONS):\n thread = MessageHandler.Connection(self)\n thread.daemon = True\n thread.start()\n \n timer = Timer(self)\n timer.daemon = True\n timer.start()\n\n # Initialize socket\n def initialize(self):\n global sock\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n sock.bind((MessageHandler.IP_ADDRESS, MessageHandler.PORT))\n print(\"listening...\")\n sock.listen(10)\n\n # Thread connecting to clients\n class Connection(Thread):\n def __init__(self, message_handler):\n self.message_handler = message_handler\n Thread.__init__(self)\n\n def run(self):\n peer, addr = sock.accept()\n print(\"accepted:\", addr)\n id_peer_map[self.ident] = peer\n while True:\n try:\n byte_message = peer.recv(1024)\n message = pickle.loads(byte_message)\n response = self.message_handler.handle_message(message, self.ident)\n if response is not None:\n peer.send(pickle.dumps(response))\n except EOFError:\n # this happens when client app closed\n peer.close()\n print(\"closed:\", addr)\n online_ids = list(users_online.values())\n if self.ident in online_ids:\n username = list(users_online.keys())[online_ids.index(self.ident)]\n del users_online[username]\n peer, add = sock.accept()\n print(\"accepted:\", addr)\n id_peer_map[self.ident] = peer\n\n # Handling message from client\n def handle_message(self, message, ident):\n if message.type == 'message':\n self.send_message(message.sender_id, message.receiver_id, message.payload)\n elif message.type == 'signup':\n return self.register_user(message.sender_id, message.payload)\n elif message.type == 'login':\n response = self.login(message.sender_id, message.payload)\n if response.type == 'success':\n users_online[message.sender_id] = ident\n if message.sender_id in pending_requests:\n id_peer_map[ident].send(pickle.dumps(response))\n msg = pending_requests[message.sender_id]\n del pending_requests[message.sender_id]\n return msg\n return response\n elif message.type == 'get_friends':\n return self.get_friends(message.sender_id)\n elif message.type == 'get_statuses':\n return self.get_statuses(message.sender_id)\n elif message.type == 'add_friend':\n return self.add_friend(message.sender_id, message.receiver_id)\n elif message.type == 'reply_request_friend':\n self.request_friend(message.sender_id, message.receiver_id, message.payload)\n elif message.type == 'get_logs':\n self.mark_read(message.sender_id, message.receiver_id)\n logs = message_logs[get_tuple(message.sender_id, message.receiver_id)]\n return Message(\"message_logs\", 0, 0, logs)\n elif message.type == 'read_msg':\n self.mark_read(message.sender_id, message.receiver_id)\n elif message.type == 'signout':\n print(\"sign out:\", message.sender_id)\n del users_online[message.sender_id]\n\n # Send text message to receiver\n def send_message(self, sender, receiver, payload):\n # struct: [sender, text, user1_unread, user2_unread]\n msg_struct = [sender, payload, 1, 1]\n i = 2 if sender < receiver else 3\n msg_struct[i] = 0\n message_logs[get_tuple(sender, receiver)].append(msg_struct)\n\n if receiver in users_online:\n peer = id_peer_map[users_online[receiver]]\n message = Message('message', sender, receiver, payload)\n try:\n peer.send(pickle.dumps(message))\n except BrokenPipeError:\n del users_online[receiver]\n\n\n def register_user(self, username, password):\n if username in user_table:\n return Message('fail', 0, 0, \"Username already taken\")\n else:\n user_table[username] = password\n friends_list[username] = []\n print(\"registered a user:\", username)\n return Message('success', 0, 0, \"Registered. Please login\")\n\n def login(self, username, password):\n if username in users_online:\n return Message('fail', 0, 0, \"User is already connected\")\n elif username in user_table and user_table[username] == password:\n print(\"sign in:\", username)\n return Message('success', 0, 0, \"Welcome\")\n else:\n return Message('fail', 0, 0, \"Incorrect information. Try again\")\n\n def add_friend(self, sender, username):\n if username not in user_table:\n return Message('fail', 0, 0, \"User doesn't exist\")\n if username in friends_list[sender]:\n return Message('fail', 0, 0, \"User is your friend\")\n\n message = Message('request_friend', sender, username, sender + \" wants you to be friend\")\n if username in users_online:\n peer = id_peer_map[users_online[username]]\n try:\n peer.send(pickle.dumps(message))\n except BrokenPipeError:\n del users_online[username]\n pending_requests[username] = messsage\n else:\n pending_requests[username] = message\n return Message('success', 0, 0, \"Requested\")\n\n def request_friend(self, sender, receiver, payload):\n if receiver in users_online:\n peer = id_peer_map[users_online[receiver]]\n message = Message('request_reply', sender, receiver, payload)\n try:\n peer.send(pickle.dumps(message))\n except BrokenPipeError:\n del users_online[receiver]\n if payload == \"Accept\":\n friends_list[sender].append(receiver)\n friends_list[receiver].append(sender)\n message_logs[get_tuple(sender, receiver)] = []\n \n def get_friends(self, sender):\n return Message(\"friends_list\", 0, 0, friends_list[sender])\n \n def get_statuses(self, sender):\n statuses = map(lambda x: x in users_online, friends_list[sender])\n return Message(\"status_list\", 0, 0, list(statuses))\n\n def mark_read(self, sender, receiver):\n i = 2 if sender < receiver else 3\n for log in message_logs[get_tuple(sender, receiver)]:\n log[i] = 0\n \n def ping(self):\n for user, ident in list(users_online.items()):\n peer = id_peer_map[ident]\n message = Message(\"ping\")\n try:\n peer.send(pickle.dumps(message))\n except BrokenPipeError:\n del users_online[user]\n\n\nmessage_handler = MessageHandler()\n\ntry:\n while True:\n time.sleep(1)\n\nexcept KeyboardInterrupt:\n sock.close()\n for peer in id_peer_map.values():\n peer.close()\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"467754981","text":"from django.shortcuts import get_object_or_404\nfrom .models import Transformer, Building, Connection, LoadLog\nfrom django.utils import timezone\nimport googlemaps\nfrom .gmapsDistMatrix.distance_matrix import distance_matrix\n\ndef toggle(tID):\n oldT = get_object_or_404(Transformer, id = tID)\n ChangedBuildings = []\n if oldT.Status == True:\n # Shut the transformer down, set its load to 0 and save it to the database\n oldT.Status = False\n oldT.Load = 0\n oldT.save()\n # LoadLog(Transformer = oldT, Load = oldT.Load, Time = timezone.now()).save()\n for conn in oldT.connection_set.filter(Connected = True):\n # Disconnect the buildings which are connected to the transformer.\n conn.Connected = False\n conn.save()\n b = conn.Building\n feasibilities = {}\n for newT in Transformer.objects.filter(Status = True):\n dist = Connection.objects.get(Transformer = newT, Building = b).Distance\n feasibility = ((0.8 * newT.kVA) - newT.Load - b.ConnectedLoad) / dist\n feasibilities[newT.id] = feasibility\n if len(feasibilities) > 0:\n if max(feasibilities.values()) > 0:\n newTransID = max(feasibilities, key = feasibilities.get)\n switchTo = Transformer.objects.get(id = newTransID)\n Connection.objects.filter(Transformer = switchTo, Building = b).update(Connected = True)\n switchTo.Load += b.ConnectedLoad\n switchTo.save()\n ChangedBuildings.append(b.id)\n # LoadLog(Transformer = switchTo, Load = switchTo.Load, Time = timezone.now()).save()\n else:\n oldT.Status = True\n for b in oldT.building_set.all():\n oldT.Load += b.ConnectedLoad\n if (Connection.objects.filter(Building = b, Connected = True).exists()):\n c = Connection.objects.get(Building = b, Connected = True)\n c.Connected = False\n c.Transformer.Load -= b.ConnectedLoad\n c.Transformer.save()\n # LoadLog(Transformer = c.Transformer, Load = c.Transformer.Load, Time = timezone.now()).save()\n c.save()\n ChangedBuildings.append(b.id)\n Connection.objects.filter(Transformer = oldT, Building = b).update(Connected = True)\n oldT.save()\n # LoadLog(Transformer = oldT, Load = oldT.Load, Time = timezone.now()).save()\n\n return ChangedBuildings\n\ndef add_connections():\n gmaps = googlemaps.Client(key='AIzaSyD-IxdRdp56dYFVy-06EMG9VD1RCSwUWdk')\n # Backup key - AIzaSyDfWidKLPWeb8ajWtI9W9ATSCaFJFPfV9w\n\n # Add all virtual connections\n for t in Transformer.objects.all():\n for b in Building.objects.all():\n if (Connection.objects.filter(Transformer = t, Building = b).exists() == False):\n origin = (t.Latitude, t.Longitude)\n destination = (b.Latitude, b.Longitude)\n mat = distance_matrix(gmaps, origin, destination)\n distance = mat['rows'][0]['elements'][0]['distance']['value']\n if (distance == 0):\n distance = 1\n newC = Connection(Transformer = t, Building = b, Distance = distance, Connected = False)\n newC.save()\n print(newC)\n\n # Update Connected to True for the buildings and transformers which are actually connected\n for b in Building.objects.all():\n t = Transformer.objects.get(id = b.Transformer_id)\n\n if Connection.objects.filter(Transformer = t, Building = b, Connected = False).exists():\n if Connection.objects.filter(Building = b, Connected = True).exists()==False:\n c = Connection.objects.get(Transformer = t, Building = b, Connected = False)\n c.Connected = True\n c.save()\n t.Load += b.ConnectedLoad\n t.Status = True\n t.save()\n # LoadLog(Transformer = t, Load = t.Load, Time = timezone.now()).save()\n\n","sub_path":"LDA/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"417740917","text":"'''Criar uma tupla preenchida com os números de zero a vinte por extenso\n O programa deverá ler um número entre 0 e 20 pelo teclado e mostra-lo por extenso.'''\n\n\nnumeros = ('zero','one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight','nine', 'ten','eleven', 'twelve', 'thirteen'\n ,'fouteen', 'fifteen', 'sixteen', 'seventeen', 'nineteen', 'twenty')\n\n\nwhile True:\n num = int(input('Digite um número entre 0 e 20:'))\n\n if num < 0 or num > 20:\n print('Número inválido! Tente novamente.')\n\n else:\n print(f'Você digitou o número:{numeros[num]}')\n\n opcao = ' '\n while opcao not in 'SN':\n opcao = str(input('Deseja continuar?[S/N]')).upper().strip()[0]\n\n if opcao == 'N':\n break\n\nprint('{:=^30}'.format('PROGRAMA FINALIZADO'))\n\n\n\n\n","sub_path":"exercicios/exe072Numeros_por_extenso.py","file_name":"exe072Numeros_por_extenso.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"168253313","text":"\nfrom markdown.extensions import Extension\nimport xml.etree.ElementTree as etree\nfrom markdown.inlinepatterns import LinkInlineProcessor, IMAGE_LINK_RE\nfrom markdown.extensions.attr_list import AttrListTreeprocessor\nimport re\n\n# CAPTION_RE = r'\\!\\[(?=[^\\]])'\nCAPTION_RE = IMAGE_LINK_RE\n\nclass ImageInlineProcessor(LinkInlineProcessor):\n\n def handleMatch(self, m, data):\n text, index, handled = self.getText(data, m.end(0))\n if not handled:\n return None, None, None\n\n src, title, index, handled = self.getLink(data, index)\n if not handled:\n return None, None, None\n\n fig = etree.Element('figure')\n fig.set('class', 'figure text-center d-block')\n img = etree.SubElement(fig, 'img')\n cap = etree.SubElement(fig, 'figcaption')\n cap.set('class', 'figure-caption')\n\n img.set('src', src)\n img.set('class', 'figure-img img-fluid')\n\n if title is not None:\n img.set(\"title\", title)\n\n cap.text = text\n\n # if attr_list is enabled, put '{: xxx}' inside
at end\n # so attr_list will see it\n if 'attr_list' in self.md.treeprocessors:\n # find attr_list curly braces\n curly = re.match(AttrListTreeprocessor.BASE_RE, data[index:])\n if curly:\n # img[-1].tail = '\\n'\n # img[-1].tail += curly.group()\n # remove original '{: xxx}'\n index += curly.endpos\n\n return fig, m.start(0), index\n\n\nclass CaptionsExtension(Extension):\n def extendMarkdown(self, md, md_globals):\n md.inlinePatterns.register(ImageInlineProcessor(CAPTION_RE, md), 'caption', 151)\n\n\ndef makeExtension(**kwargs):\n return CaptionsExtension(**kwargs)\n","sub_path":"larc/markdown/image_fig.py","file_name":"image_fig.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"325770341","text":"import sqlite3\nimport gzip\nimport argparse\n\ndef create_edge_database():\n c.execute('''DROP TABLE IF EXISTS edge_data''')\n c.execute('CREATE TABLE IF NOT EXISTS edge_data(keyword TEXT, neighbor TEXT)')\n c.execute('''PRAGMA synchronous = EXTRA''')\n c.execute('''PRAGMA journal_mode = WAL''')\n\ndef insert_edge_data(keyword, neighbor):\n c.executemany(\"INSERT INTO edge_data(keyword, neighbor) VALUES (?, ?)\", [(keyword, neighbor)])\n\ndef read_edge_data():\n c.execute('SELECT * FROM edge_data')\n data = c.fetchall()\n for row in data:\n print(row)\n\nparser = argparse.ArgumentParser(description='build a database for the graph.')\nparser.add_argument('graph_file', help = 'select the graph file')\nparser.add_argument('graph_database', help='select the database of the graph')\nargs = parser.parse_args()\n\nfilename = args.graph_file\ndatabase_name = args.graph_database\n\nconn = sqlite3.connect(database_name)\nc = conn.cursor()\n\ncreate_edge_database()\n\nprint('building database......')\nwith gzip.open(filename, 'rt') as file:\n string_line = (line.split() for line in file)\n edge_list = ((item[1], item[2]) for item in string_line)\n count = 1\n for edges in edge_list:\n print('inserted records : %i ' % count)\n insert_edge_data(edges[0], edges[1])\n insert_edge_data(edges[1], edges[0])\n count += 1\n conn.commit()\n\nc.execute(\"CREATE INDEX keyword_index ON edge_data (keyword)\")\nprint('build complete.')\n\nc.close()\nconn.close()","sub_path":"db_build.py","file_name":"db_build.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"} +{"seq_id":"563084214","text":"# -*- coding: utf-8 -*-\n\n# import python3-style 'print' function\nfrom __future__ import print_function\n# unicode literals: allowed to use \"unicode_string\" instead of u\"unicode_string\"\nfrom __future__ import unicode_literals\n\nfrom PyQt4.QtCore import *\n\n# create a new layer\nlayer = QgsVectorLayer(\"Point?crs=epsg:4326\", \"Kernkraftwerke\", \"memory\")\nprovider = layer.dataProvider()\nlayer.startEditing()\n\n# add attributes\nprovider.addAttributes([QgsField(\"Name\", QVariant.String),\n QgsField(\"Bruttoleistung\", QVariant.Int)])\n\n# create features\nf1 = QgsFeature()\nf1.setGeometry(QgsGeometry.fromPoint(QgsPoint(8.227778, 47.552222)))\nf1.setAttributes([u\"Beznau\", 760])\n\nf2 = QgsFeature()\nf2.setGeometry(QgsGeometry.fromPoint(QgsPoint(7.968889, 47.365833)))\nf2.setAttributes([u\"Gösgen\", 1035])\n\nf3 = QgsFeature()\nf3.setGeometry(QgsGeometry.fromPoint(QgsPoint(8.18375, 47.60135)))\nf3.setAttributes([u\"Leibstadt\", 1245])\n\nf4 = QgsFeature()\nf4.setGeometry(QgsGeometry.fromPoint(QgsPoint(7.270278, 46.970833)))\nf4.setAttributes([u\"Mühleberg\", 390])\n\n# add features\nprovider.addFeatures([f1, f2, f3, f4])\n# commit changes\nlayer.commitChanges()\n# resize layer to fit added features\nlayer.updateExtents()\n\n# add new layer to the map\nQgsMapLayerRegistry.instance().addMapLayer(layer)\n\n# write new layer to the shape-file\nQgsVectorFileWriter.writeAsVectorFormat(layer,\n \"/Users/maria/Google Drive/FHNW/Informatik/QGIS_Projekt/new_layer/kernkraftwerke.shp\",\n \"UTF8\", provider.crs())","sub_path":"informatik3/qgis/uebung12_2.py","file_name":"uebung12_2.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"7"}